aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h65
1 files changed, 54 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8cccd6dc5d66..4240f6bfa812 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
96struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer;
99 100
100/* 101/*
101 * List of flags we want to share for kernel threads, 102 * List of flags we want to share for kernel threads,
@@ -249,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
249extern int runqueue_is_locked(void); 250extern int runqueue_is_locked(void);
250extern void task_rq_unlock_wait(struct task_struct *p); 251extern void task_rq_unlock_wait(struct task_struct *p);
251 252
252extern cpumask_t nohz_cpu_mask; 253extern cpumask_var_t nohz_cpu_mask;
253#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
254extern int select_nohz_load_balancer(int cpu); 255extern int select_nohz_load_balancer(int cpu);
255#else 256#else
@@ -775,7 +776,6 @@ enum cpu_idle_type {
775 776
776struct sched_group { 777struct sched_group {
777 struct sched_group *next; /* Must be a circular list */ 778 struct sched_group *next; /* Must be a circular list */
778 cpumask_t cpumask;
779 779
780 /* 780 /*
781 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 781 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -788,8 +788,15 @@ struct sched_group {
788 * (see include/linux/reciprocal_div.h) 788 * (see include/linux/reciprocal_div.h)
789 */ 789 */
790 u32 reciprocal_cpu_power; 790 u32 reciprocal_cpu_power;
791
792 unsigned long cpumask[];
791}; 793};
792 794
795static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
796{
797 return to_cpumask(sg->cpumask);
798}
799
793enum sched_domain_level { 800enum sched_domain_level {
794 SD_LV_NONE = 0, 801 SD_LV_NONE = 0,
795 SD_LV_SIBLING, 802 SD_LV_SIBLING,
@@ -813,7 +820,6 @@ struct sched_domain {
813 struct sched_domain *parent; /* top domain must be null terminated */ 820 struct sched_domain *parent; /* top domain must be null terminated */
814 struct sched_domain *child; /* bottom domain must be null terminated */ 821 struct sched_domain *child; /* bottom domain must be null terminated */
815 struct sched_group *groups; /* the balancing groups of the domain */ 822 struct sched_group *groups; /* the balancing groups of the domain */
816 cpumask_t span; /* span of all CPUs in this domain */
817 unsigned long min_interval; /* Minimum balance interval ms */ 823 unsigned long min_interval; /* Minimum balance interval ms */
818 unsigned long max_interval; /* Maximum balance interval ms */ 824 unsigned long max_interval; /* Maximum balance interval ms */
819 unsigned int busy_factor; /* less balancing by factor if busy */ 825 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -868,9 +874,17 @@ struct sched_domain {
868#ifdef CONFIG_SCHED_DEBUG 874#ifdef CONFIG_SCHED_DEBUG
869 char *name; 875 char *name;
870#endif 876#endif
877
878 /* span of all CPUs in this domain */
879 unsigned long span[];
871}; 880};
872 881
873extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 882static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
883{
884 return to_cpumask(sd->span);
885}
886
887extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
874 struct sched_domain_attr *dattr_new); 888 struct sched_domain_attr *dattr_new);
875extern int arch_reinit_sched_domains(void); 889extern int arch_reinit_sched_domains(void);
876 890
@@ -879,7 +893,7 @@ extern int arch_reinit_sched_domains(void);
879struct sched_domain_attr; 893struct sched_domain_attr;
880 894
881static inline void 895static inline void
882partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 896partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
883 struct sched_domain_attr *dattr_new) 897 struct sched_domain_attr *dattr_new)
884{ 898{
885} 899}
@@ -961,7 +975,7 @@ struct sched_class {
961 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 975 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
962 976
963 void (*set_cpus_allowed)(struct task_struct *p, 977 void (*set_cpus_allowed)(struct task_struct *p,
964 const cpumask_t *newmask); 978 const struct cpumask *newmask);
965 979
966 void (*rq_online)(struct rq *rq); 980 void (*rq_online)(struct rq *rq);
967 void (*rq_offline)(struct rq *rq); 981 void (*rq_offline)(struct rq *rq);
@@ -1163,6 +1177,18 @@ struct task_struct {
1163 struct list_head ptraced; 1177 struct list_head ptraced;
1164 struct list_head ptrace_entry; 1178 struct list_head ptrace_entry;
1165 1179
1180#ifdef CONFIG_X86_PTRACE_BTS
1181 /*
1182 * This is the tracer handle for the ptrace BTS extension.
1183 * This field actually belongs to the ptracer task.
1184 */
1185 struct bts_tracer *bts;
1186 /*
1187 * The buffer to hold the BTS data.
1188 */
1189 void *bts_buffer;
1190#endif /* CONFIG_X86_PTRACE_BTS */
1191
1166 /* PID/PID hash table linkage. */ 1192 /* PID/PID hash table linkage. */
1167 struct pid_link pids[PIDTYPE_MAX]; 1193 struct pid_link pids[PIDTYPE_MAX];
1168 struct list_head thread_group; 1194 struct list_head thread_group;
@@ -1354,6 +1380,23 @@ struct task_struct {
1354 unsigned long default_timer_slack_ns; 1380 unsigned long default_timer_slack_ns;
1355 1381
1356 struct list_head *scm_work_list; 1382 struct list_head *scm_work_list;
1383#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1384 /* Index of current stored adress in ret_stack */
1385 int curr_ret_stack;
1386 /* Stack of return addresses for return function tracing */
1387 struct ftrace_ret_stack *ret_stack;
1388 /*
1389 * Number of functions that haven't been traced
1390 * because of depth overrun.
1391 */
1392 atomic_t trace_overrun;
1393 /* Pause for the tracing */
1394 atomic_t tracing_graph_pause;
1395#endif
1396#ifdef CONFIG_TRACING
1397 /* state flags for use by tracers */
1398 unsigned long trace;
1399#endif
1357}; 1400};
1358 1401
1359/* 1402/*
@@ -1592,12 +1635,12 @@ extern cputime_t task_gtime(struct task_struct *p);
1592 1635
1593#ifdef CONFIG_SMP 1636#ifdef CONFIG_SMP
1594extern int set_cpus_allowed_ptr(struct task_struct *p, 1637extern int set_cpus_allowed_ptr(struct task_struct *p,
1595 const cpumask_t *new_mask); 1638 const struct cpumask *new_mask);
1596#else 1639#else
1597static inline int set_cpus_allowed_ptr(struct task_struct *p, 1640static inline int set_cpus_allowed_ptr(struct task_struct *p,
1598 const cpumask_t *new_mask) 1641 const struct cpumask *new_mask)
1599{ 1642{
1600 if (!cpu_isset(0, *new_mask)) 1643 if (!cpumask_test_cpu(0, new_mask))
1601 return -EINVAL; 1644 return -EINVAL;
1602 return 0; 1645 return 0;
1603} 1646}
@@ -2210,8 +2253,8 @@ __trace_special(void *__tr, void *__data,
2210} 2253}
2211#endif 2254#endif
2212 2255
2213extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2256extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2214extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2257extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2215 2258
2216extern int sched_mc_power_savings, sched_smt_power_savings; 2259extern int sched_mc_power_savings, sched_smt_power_savings;
2217 2260