aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-05-14 09:29:52 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 09:29:52 -0400
commit23e117fa44429cc054cb27d5621d64e4ced91e52 (patch)
treea4b9d0902b9c6f009b2c297515221c1b9bed3af8 /include/linux/sched.h
parent668eb65f092902eb7dd526af73d4a7f025a94612 (diff)
parenta93d2f1744206827ccf416e2cdc5018aa503314e (diff)
Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into trace/tip/tracing/core-4
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h70
1 files changed, 37 insertions, 33 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dad7f668ebf7..dfea40574b2a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -275,11 +275,17 @@ extern cpumask_var_t nohz_cpu_mask;
275#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 275#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
276extern int select_nohz_load_balancer(int cpu); 276extern int select_nohz_load_balancer(int cpu);
277extern int get_nohz_load_balancer(void); 277extern int get_nohz_load_balancer(void);
278extern int nohz_ratelimit(int cpu);
278#else 279#else
279static inline int select_nohz_load_balancer(int cpu) 280static inline int select_nohz_load_balancer(int cpu)
280{ 281{
281 return 0; 282 return 0;
282} 283}
284
285static inline int nohz_ratelimit(int cpu)
286{
287 return 0;
288}
283#endif 289#endif
284 290
285/* 291/*
@@ -954,6 +960,7 @@ struct sched_domain {
954 char *name; 960 char *name;
955#endif 961#endif
956 962
963 unsigned int span_weight;
957 /* 964 /*
958 * Span of all CPUs in this domain. 965 * Span of all CPUs in this domain.
959 * 966 *
@@ -1026,12 +1033,17 @@ struct sched_domain;
1026#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1033#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1027#define WF_FORK 0x02 /* child wakeup after fork */ 1034#define WF_FORK 0x02 /* child wakeup after fork */
1028 1035
1036#define ENQUEUE_WAKEUP 1
1037#define ENQUEUE_WAKING 2
1038#define ENQUEUE_HEAD 4
1039
1040#define DEQUEUE_SLEEP 1
1041
1029struct sched_class { 1042struct sched_class {
1030 const struct sched_class *next; 1043 const struct sched_class *next;
1031 1044
1032 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, 1045 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1033 bool head); 1046 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1034 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1035 void (*yield_task) (struct rq *rq); 1047 void (*yield_task) (struct rq *rq);
1036 1048
1037 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1049 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1040,7 +1052,8 @@ struct sched_class {
1040 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1052 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1041 1053
1042#ifdef CONFIG_SMP 1054#ifdef CONFIG_SMP
1043 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1055 int (*select_task_rq)(struct rq *rq, struct task_struct *p,
1056 int sd_flag, int flags);
1044 1057
1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1058 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1046 void (*post_schedule) (struct rq *this_rq); 1059 void (*post_schedule) (struct rq *this_rq);
@@ -1077,36 +1090,8 @@ struct load_weight {
1077 unsigned long weight, inv_weight; 1090 unsigned long weight, inv_weight;
1078}; 1091};
1079 1092
1080/*
1081 * CFS stats for a schedulable entity (task, task-group etc)
1082 *
1083 * Current field usage histogram:
1084 *
1085 * 4 se->block_start
1086 * 4 se->run_node
1087 * 4 se->sleep_start
1088 * 6 se->load.weight
1089 */
1090struct sched_entity {
1091 struct load_weight load; /* for load-balancing */
1092 struct rb_node run_node;
1093 struct list_head group_node;
1094 unsigned int on_rq;
1095
1096 u64 exec_start;
1097 u64 sum_exec_runtime;
1098 u64 vruntime;
1099 u64 prev_sum_exec_runtime;
1100
1101 u64 last_wakeup;
1102 u64 avg_overlap;
1103
1104 u64 nr_migrations;
1105
1106 u64 start_runtime;
1107 u64 avg_wakeup;
1108
1109#ifdef CONFIG_SCHEDSTATS 1093#ifdef CONFIG_SCHEDSTATS
1094struct sched_statistics {
1110 u64 wait_start; 1095 u64 wait_start;
1111 u64 wait_max; 1096 u64 wait_max;
1112 u64 wait_count; 1097 u64 wait_count;
@@ -1138,6 +1123,24 @@ struct sched_entity {
1138 u64 nr_wakeups_affine_attempts; 1123 u64 nr_wakeups_affine_attempts;
1139 u64 nr_wakeups_passive; 1124 u64 nr_wakeups_passive;
1140 u64 nr_wakeups_idle; 1125 u64 nr_wakeups_idle;
1126};
1127#endif
1128
1129struct sched_entity {
1130 struct load_weight load; /* for load-balancing */
1131 struct rb_node run_node;
1132 struct list_head group_node;
1133 unsigned int on_rq;
1134
1135 u64 exec_start;
1136 u64 sum_exec_runtime;
1137 u64 vruntime;
1138 u64 prev_sum_exec_runtime;
1139
1140 u64 nr_migrations;
1141
1142#ifdef CONFIG_SCHEDSTATS
1143 struct sched_statistics statistics;
1141#endif 1144#endif
1142 1145
1143#ifdef CONFIG_FAIR_GROUP_SCHED 1146#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1847,6 +1850,7 @@ extern void sched_clock_idle_sleep_event(void);
1847extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1850extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1848 1851
1849#ifdef CONFIG_HOTPLUG_CPU 1852#ifdef CONFIG_HOTPLUG_CPU
1853extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1850extern void idle_task_exit(void); 1854extern void idle_task_exit(void);
1851#else 1855#else
1852static inline void idle_task_exit(void) {} 1856static inline void idle_task_exit(void) {}