diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 68 |
1 files changed, 55 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 55e30d114477..4240f6bfa812 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -96,6 +96,7 @@ struct exec_domain; | |||
96 | struct futex_pi_state; | 96 | struct futex_pi_state; |
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | ||
99 | 100 | ||
100 | /* | 101 | /* |
101 | * List of flags we want to share for kernel threads, | 102 | * List of flags we want to share for kernel threads, |
@@ -249,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
249 | extern int runqueue_is_locked(void); | 250 | extern int runqueue_is_locked(void); |
250 | extern void task_rq_unlock_wait(struct task_struct *p); | 251 | extern void task_rq_unlock_wait(struct task_struct *p); |
251 | 252 | ||
252 | extern cpumask_t nohz_cpu_mask; | 253 | extern cpumask_var_t nohz_cpu_mask; |
253 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
254 | extern int select_nohz_load_balancer(int cpu); | 255 | extern int select_nohz_load_balancer(int cpu); |
255 | #else | 256 | #else |
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu) | |||
259 | } | 260 | } |
260 | #endif | 261 | #endif |
261 | 262 | ||
262 | extern unsigned long rt_needs_cpu(int cpu); | ||
263 | |||
264 | /* | 263 | /* |
265 | * Only dump TASK_* tasks. (0 for all tasks) | 264 | * Only dump TASK_* tasks. (0 for all tasks) |
266 | */ | 265 | */ |
@@ -777,7 +776,6 @@ enum cpu_idle_type { | |||
777 | 776 | ||
778 | struct sched_group { | 777 | struct sched_group { |
779 | struct sched_group *next; /* Must be a circular list */ | 778 | struct sched_group *next; /* Must be a circular list */ |
780 | cpumask_t cpumask; | ||
781 | 779 | ||
782 | /* | 780 | /* |
783 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 781 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -790,8 +788,15 @@ struct sched_group { | |||
790 | * (see include/linux/reciprocal_div.h) | 788 | * (see include/linux/reciprocal_div.h) |
791 | */ | 789 | */ |
792 | u32 reciprocal_cpu_power; | 790 | u32 reciprocal_cpu_power; |
791 | |||
792 | unsigned long cpumask[]; | ||
793 | }; | 793 | }; |
794 | 794 | ||
795 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
796 | { | ||
797 | return to_cpumask(sg->cpumask); | ||
798 | } | ||
799 | |||
795 | enum sched_domain_level { | 800 | enum sched_domain_level { |
796 | SD_LV_NONE = 0, | 801 | SD_LV_NONE = 0, |
797 | SD_LV_SIBLING, | 802 | SD_LV_SIBLING, |
@@ -815,7 +820,6 @@ struct sched_domain { | |||
815 | struct sched_domain *parent; /* top domain must be null terminated */ | 820 | struct sched_domain *parent; /* top domain must be null terminated */ |
816 | struct sched_domain *child; /* bottom domain must be null terminated */ | 821 | struct sched_domain *child; /* bottom domain must be null terminated */ |
817 | struct sched_group *groups; /* the balancing groups of the domain */ | 822 | struct sched_group *groups; /* the balancing groups of the domain */ |
818 | cpumask_t span; /* span of all CPUs in this domain */ | ||
819 | unsigned long min_interval; /* Minimum balance interval ms */ | 823 | unsigned long min_interval; /* Minimum balance interval ms */ |
820 | unsigned long max_interval; /* Maximum balance interval ms */ | 824 | unsigned long max_interval; /* Maximum balance interval ms */ |
821 | unsigned int busy_factor; /* less balancing by factor if busy */ | 825 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -870,9 +874,17 @@ struct sched_domain { | |||
870 | #ifdef CONFIG_SCHED_DEBUG | 874 | #ifdef CONFIG_SCHED_DEBUG |
871 | char *name; | 875 | char *name; |
872 | #endif | 876 | #endif |
877 | |||
878 | /* span of all CPUs in this domain */ | ||
879 | unsigned long span[]; | ||
873 | }; | 880 | }; |
874 | 881 | ||
875 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 882 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
883 | { | ||
884 | return to_cpumask(sd->span); | ||
885 | } | ||
886 | |||
887 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
876 | struct sched_domain_attr *dattr_new); | 888 | struct sched_domain_attr *dattr_new); |
877 | extern int arch_reinit_sched_domains(void); | 889 | extern int arch_reinit_sched_domains(void); |
878 | 890 | ||
@@ -881,7 +893,7 @@ extern int arch_reinit_sched_domains(void); | |||
881 | struct sched_domain_attr; | 893 | struct sched_domain_attr; |
882 | 894 | ||
883 | static inline void | 895 | static inline void |
884 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 896 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, |
885 | struct sched_domain_attr *dattr_new) | 897 | struct sched_domain_attr *dattr_new) |
886 | { | 898 | { |
887 | } | 899 | } |
@@ -963,7 +975,7 @@ struct sched_class { | |||
963 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 975 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
964 | 976 | ||
965 | void (*set_cpus_allowed)(struct task_struct *p, | 977 | void (*set_cpus_allowed)(struct task_struct *p, |
966 | const cpumask_t *newmask); | 978 | const struct cpumask *newmask); |
967 | 979 | ||
968 | void (*rq_online)(struct rq *rq); | 980 | void (*rq_online)(struct rq *rq); |
969 | void (*rq_offline)(struct rq *rq); | 981 | void (*rq_offline)(struct rq *rq); |
@@ -1165,6 +1177,18 @@ struct task_struct { | |||
1165 | struct list_head ptraced; | 1177 | struct list_head ptraced; |
1166 | struct list_head ptrace_entry; | 1178 | struct list_head ptrace_entry; |
1167 | 1179 | ||
1180 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1181 | /* | ||
1182 | * This is the tracer handle for the ptrace BTS extension. | ||
1183 | * This field actually belongs to the ptracer task. | ||
1184 | */ | ||
1185 | struct bts_tracer *bts; | ||
1186 | /* | ||
1187 | * The buffer to hold the BTS data. | ||
1188 | */ | ||
1189 | void *bts_buffer; | ||
1190 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1191 | |||
1168 | /* PID/PID hash table linkage. */ | 1192 | /* PID/PID hash table linkage. */ |
1169 | struct pid_link pids[PIDTYPE_MAX]; | 1193 | struct pid_link pids[PIDTYPE_MAX]; |
1170 | struct list_head thread_group; | 1194 | struct list_head thread_group; |
@@ -1356,6 +1380,23 @@ struct task_struct { | |||
1356 | unsigned long default_timer_slack_ns; | 1380 | unsigned long default_timer_slack_ns; |
1357 | 1381 | ||
1358 | struct list_head *scm_work_list; | 1382 | struct list_head *scm_work_list; |
1383 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1384 | /* Index of current stored adress in ret_stack */ | ||
1385 | int curr_ret_stack; | ||
1386 | /* Stack of return addresses for return function tracing */ | ||
1387 | struct ftrace_ret_stack *ret_stack; | ||
1388 | /* | ||
1389 | * Number of functions that haven't been traced | ||
1390 | * because of depth overrun. | ||
1391 | */ | ||
1392 | atomic_t trace_overrun; | ||
1393 | /* Pause for the tracing */ | ||
1394 | atomic_t tracing_graph_pause; | ||
1395 | #endif | ||
1396 | #ifdef CONFIG_TRACING | ||
1397 | /* state flags for use by tracers */ | ||
1398 | unsigned long trace; | ||
1399 | #endif | ||
1359 | }; | 1400 | }; |
1360 | 1401 | ||
1361 | /* | 1402 | /* |
@@ -1594,12 +1635,12 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1594 | 1635 | ||
1595 | #ifdef CONFIG_SMP | 1636 | #ifdef CONFIG_SMP |
1596 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1637 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1597 | const cpumask_t *new_mask); | 1638 | const struct cpumask *new_mask); |
1598 | #else | 1639 | #else |
1599 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1640 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1600 | const cpumask_t *new_mask) | 1641 | const struct cpumask *new_mask) |
1601 | { | 1642 | { |
1602 | if (!cpu_isset(0, *new_mask)) | 1643 | if (!cpumask_test_cpu(0, new_mask)) |
1603 | return -EINVAL; | 1644 | return -EINVAL; |
1604 | return 0; | 1645 | return 0; |
1605 | } | 1646 | } |
@@ -2212,8 +2253,8 @@ __trace_special(void *__tr, void *__data, | |||
2212 | } | 2253 | } |
2213 | #endif | 2254 | #endif |
2214 | 2255 | ||
2215 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2256 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2216 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2257 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2217 | 2258 | ||
2218 | extern int sched_mc_power_savings, sched_smt_power_savings; | 2259 | extern int sched_mc_power_savings, sched_smt_power_savings; |
2219 | 2260 | ||
@@ -2224,6 +2265,7 @@ extern void normalize_rt_tasks(void); | |||
2224 | extern struct task_group init_task_group; | 2265 | extern struct task_group init_task_group; |
2225 | #ifdef CONFIG_USER_SCHED | 2266 | #ifdef CONFIG_USER_SCHED |
2226 | extern struct task_group root_task_group; | 2267 | extern struct task_group root_task_group; |
2268 | extern void set_tg_uid(struct user_struct *user); | ||
2227 | #endif | 2269 | #endif |
2228 | 2270 | ||
2229 | extern struct task_group *sched_create_group(struct task_group *parent); | 2271 | extern struct task_group *sched_create_group(struct task_group *parent); |