diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 36 |
1 files changed, 28 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 68daf4f27e2c..ad93e1ec8c65 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
220 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 220 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
221 | #define task_contributes_to_load(task) \ | 221 | #define task_contributes_to_load(task) \ |
222 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ | 222 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
223 | (task->flags & PF_FREEZING) == 0) | 223 | (task->flags & PF_FROZEN) == 0) |
224 | 224 | ||
225 | #define __set_task_state(tsk, state_value) \ | 225 | #define __set_task_state(tsk, state_value) \ |
226 | do { (tsk)->state = (state_value); } while (0) | 226 | do { (tsk)->state = (state_value); } while (0) |
@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu); | |||
273 | 273 | ||
274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
275 | extern void select_nohz_load_balancer(int stop_tick); | 275 | extern void select_nohz_load_balancer(int stop_tick); |
276 | extern void set_cpu_sd_state_idle(void); | ||
276 | extern int get_nohz_timer_target(void); | 277 | extern int get_nohz_timer_target(void); |
277 | #else | 278 | #else |
278 | static inline void select_nohz_load_balancer(int stop_tick) { } | 279 | static inline void select_nohz_load_balancer(int stop_tick) { } |
280 | static inline void set_cpu_sd_state_idle(void) { } | ||
279 | #endif | 281 | #endif |
280 | 282 | ||
281 | /* | 283 | /* |
@@ -483,8 +485,8 @@ struct task_cputime { | |||
483 | 485 | ||
484 | #define INIT_CPUTIME \ | 486 | #define INIT_CPUTIME \ |
485 | (struct task_cputime) { \ | 487 | (struct task_cputime) { \ |
486 | .utime = cputime_zero, \ | 488 | .utime = 0, \ |
487 | .stime = cputime_zero, \ | 489 | .stime = 0, \ |
488 | .sum_exec_runtime = 0, \ | 490 | .sum_exec_runtime = 0, \ |
489 | } | 491 | } |
490 | 492 | ||
@@ -901,6 +903,10 @@ struct sched_group_power { | |||
901 | * single CPU. | 903 | * single CPU. |
902 | */ | 904 | */ |
903 | unsigned int power, power_orig; | 905 | unsigned int power, power_orig; |
906 | /* | ||
907 | * Number of busy cpus in this group. | ||
908 | */ | ||
909 | atomic_t nr_busy_cpus; | ||
904 | }; | 910 | }; |
905 | 911 | ||
906 | struct sched_group { | 912 | struct sched_group { |
@@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
925 | return to_cpumask(sg->cpumask); | 931 | return to_cpumask(sg->cpumask); |
926 | } | 932 | } |
927 | 933 | ||
934 | /** | ||
935 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
936 | * @group: The group whose first cpu is to be returned. | ||
937 | */ | ||
938 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
939 | { | ||
940 | return cpumask_first(sched_group_cpus(group)); | ||
941 | } | ||
942 | |||
928 | struct sched_domain_attr { | 943 | struct sched_domain_attr { |
929 | int relax_domain_level; | 944 | int relax_domain_level; |
930 | }; | 945 | }; |
@@ -1315,8 +1330,8 @@ struct task_struct { | |||
1315 | * older sibling, respectively. (p->father can be replaced with | 1330 | * older sibling, respectively. (p->father can be replaced with |
1316 | * p->real_parent->pid) | 1331 | * p->real_parent->pid) |
1317 | */ | 1332 | */ |
1318 | struct task_struct *real_parent; /* real parent process */ | 1333 | struct task_struct __rcu *real_parent; /* real parent process */ |
1319 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ | 1334 | struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ |
1320 | /* | 1335 | /* |
1321 | * children/sibling forms the list of my natural children | 1336 | * children/sibling forms the list of my natural children |
1322 | */ | 1337 | */ |
@@ -1521,7 +1536,6 @@ struct task_struct { | |||
1521 | #ifdef CONFIG_FAULT_INJECTION | 1536 | #ifdef CONFIG_FAULT_INJECTION |
1522 | int make_it_fail; | 1537 | int make_it_fail; |
1523 | #endif | 1538 | #endif |
1524 | struct prop_local_single dirties; | ||
1525 | /* | 1539 | /* |
1526 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call | 1540 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call |
1527 | * balance_dirty_pages() for some dirty throttling pause | 1541 | * balance_dirty_pages() for some dirty throttling pause |
@@ -1773,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1773 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | 1787 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
1774 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ | 1788 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ |
1775 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ | 1789 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
1776 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ | ||
1777 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ | 1790 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
1778 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ | 1791 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
1779 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ | 1792 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
@@ -1789,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1789 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1802 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1790 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1803 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1791 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ | 1804 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1792 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | ||
1793 | 1805 | ||
1794 | /* | 1806 | /* |
1795 | * Only the _current_ task can read/write to tsk->flags, but other | 1807 | * Only the _current_ task can read/write to tsk->flags, but other |
@@ -2071,6 +2083,14 @@ extern int sched_setscheduler(struct task_struct *, int, | |||
2071 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 2083 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
2072 | const struct sched_param *); | 2084 | const struct sched_param *); |
2073 | extern struct task_struct *idle_task(int cpu); | 2085 | extern struct task_struct *idle_task(int cpu); |
2086 | /** | ||
2087 | * is_idle_task - is the specified task an idle task? | ||
2088 | * @tsk: the task in question. | ||
2089 | */ | ||
2090 | static inline bool is_idle_task(struct task_struct *p) | ||
2091 | { | ||
2092 | return p->pid == 0; | ||
2093 | } | ||
2074 | extern struct task_struct *curr_task(int cpu); | 2094 | extern struct task_struct *curr_task(int cpu); |
2075 | extern void set_curr_task(int cpu, struct task_struct *p); | 2095 | extern void set_curr_task(int cpu, struct task_struct *p); |
2076 | 2096 | ||