diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 57 |
1 files changed, 33 insertions, 24 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b7b9501b41af..c115d617739d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -599,33 +599,42 @@ struct task_cputime_atomic { | |||
599 | .sum_exec_runtime = ATOMIC64_INIT(0), \ | 599 | .sum_exec_runtime = ATOMIC64_INIT(0), \ |
600 | } | 600 | } |
601 | 601 | ||
602 | #ifdef CONFIG_PREEMPT_COUNT | 602 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
603 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) | ||
604 | #else | ||
605 | #define PREEMPT_DISABLED PREEMPT_ENABLED | ||
606 | #endif | ||
607 | 603 | ||
608 | /* | 604 | /* |
609 | * Disable preemption until the scheduler is running. | 605 | * Disable preemption until the scheduler is running -- use an unconditional |
610 | * Reset by start_kernel()->sched_init()->init_idle(). | 606 | * value so that it also works on !PREEMPT_COUNT kernels. |
611 | * | 607 | * |
612 | * We include PREEMPT_ACTIVE to avoid cond_resched() from working | 608 | * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). |
613 | * before the scheduler is active -- see should_resched(). | ||
614 | */ | 609 | */ |
615 | #define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) | 610 | #define INIT_PREEMPT_COUNT PREEMPT_OFFSET |
611 | |||
612 | /* | ||
613 | * Initial preempt_count value; reflects the preempt_count schedule invariant | ||
614 | * which states that during context switches: | ||
615 | * | ||
616 | * preempt_count() == 2*PREEMPT_DISABLE_OFFSET | ||
617 | * | ||
618 | * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. | ||
619 | * Note: See finish_task_switch(). | ||
620 | */ | ||
621 | #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) | ||
616 | 622 | ||
617 | /** | 623 | /** |
618 | * struct thread_group_cputimer - thread group interval timer counts | 624 | * struct thread_group_cputimer - thread group interval timer counts |
619 | * @cputime_atomic: atomic thread group interval timers. | 625 | * @cputime_atomic: atomic thread group interval timers. |
620 | * @running: non-zero when there are timers running and | 626 | * @running: true when there are timers running and |
621 | * @cputime receives updates. | 627 | * @cputime_atomic receives updates. |
628 | * @checking_timer: true when a thread in the group is in the | ||
629 | * process of checking for thread group timers. | ||
622 | * | 630 | * |
623 | * This structure contains the version of task_cputime, above, that is | 631 | * This structure contains the version of task_cputime, above, that is |
624 | * used for thread group CPU timer calculations. | 632 | * used for thread group CPU timer calculations. |
625 | */ | 633 | */ |
626 | struct thread_group_cputimer { | 634 | struct thread_group_cputimer { |
627 | struct task_cputime_atomic cputime_atomic; | 635 | struct task_cputime_atomic cputime_atomic; |
628 | int running; | 636 | bool running; |
637 | bool checking_timer; | ||
629 | }; | 638 | }; |
630 | 639 | ||
631 | #include <linux/rwsem.h> | 640 | #include <linux/rwsem.h> |
@@ -840,7 +849,7 @@ struct user_struct { | |||
840 | struct hlist_node uidhash_node; | 849 | struct hlist_node uidhash_node; |
841 | kuid_t uid; | 850 | kuid_t uid; |
842 | 851 | ||
843 | #ifdef CONFIG_PERF_EVENTS | 852 | #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) |
844 | atomic_long_t locked_vm; | 853 | atomic_long_t locked_vm; |
845 | #endif | 854 | #endif |
846 | }; | 855 | }; |
@@ -1139,8 +1148,6 @@ struct sched_domain_topology_level { | |||
1139 | #endif | 1148 | #endif |
1140 | }; | 1149 | }; |
1141 | 1150 | ||
1142 | extern struct sched_domain_topology_level *sched_domain_topology; | ||
1143 | |||
1144 | extern void set_sched_topology(struct sched_domain_topology_level *tl); | 1151 | extern void set_sched_topology(struct sched_domain_topology_level *tl); |
1145 | extern void wake_up_if_idle(int cpu); | 1152 | extern void wake_up_if_idle(int cpu); |
1146 | 1153 | ||
@@ -1189,10 +1196,10 @@ struct load_weight { | |||
1189 | 1196 | ||
1190 | /* | 1197 | /* |
1191 | * The load_avg/util_avg accumulates an infinite geometric series. | 1198 | * The load_avg/util_avg accumulates an infinite geometric series. |
1192 | * 1) load_avg factors the amount of time that a sched_entity is | 1199 | * 1) load_avg factors frequency scaling into the amount of time that a |
1193 | * runnable on a rq into its weight. For cfs_rq, it is the aggregated | 1200 | * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the |
1194 | * such weights of all runnable and blocked sched_entities. | 1201 | * aggregated such weights of all runnable and blocked sched_entities. |
1195 | * 2) util_avg factors frequency scaling into the amount of time | 1202 | * 2) util_avg factors frequency and cpu scaling into the amount of time |
1196 | * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. | 1203 | * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. |
1197 | * For cfs_rq, it is the aggregated such times of all runnable and | 1204 | * For cfs_rq, it is the aggregated such times of all runnable and |
1198 | * blocked sched_entities. | 1205 | * blocked sched_entities. |
@@ -1342,10 +1349,12 @@ struct sched_dl_entity { | |||
1342 | 1349 | ||
1343 | union rcu_special { | 1350 | union rcu_special { |
1344 | struct { | 1351 | struct { |
1345 | bool blocked; | 1352 | u8 blocked; |
1346 | bool need_qs; | 1353 | u8 need_qs; |
1347 | } b; | 1354 | u8 exp_need_qs; |
1348 | short s; | 1355 | u8 pad; /* Otherwise the compiler can store garbage here. */ |
1356 | } b; /* Bits. */ | ||
1357 | u32 s; /* Set of bits. */ | ||
1349 | }; | 1358 | }; |
1350 | struct rcu_node; | 1359 | struct rcu_node; |
1351 | 1360 | ||