diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:27:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:27:54 -0400 |
commit | b8ae30ee26d379db436b0b8c8c3ff1b52f69e5d1 (patch) | |
tree | 506aa0b4bdbf90f61e7e9261c7db90aa1452dcce /include | |
parent | 4d7b4ac22fbec1a03206c6cde353f2fd6942f828 (diff) | |
parent | 9c6f7e43b4e02c161b53e97ba913855246876c61 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (49 commits)
stop_machine: Move local variable closer to the usage site in cpu_stop_cpu_callback()
sched, wait: Use wrapper functions
sched: Remove a stale comment
ondemand: Make the iowait-is-busy time a sysfs tunable
ondemand: Solve a big performance issue by counting IOWAIT time as busy
sched: Intoduce get_cpu_iowait_time_us()
sched: Eliminate the ts->idle_lastupdate field
sched: Fold updating of the last_update_time_info into update_ts_time_stats()
sched: Update the idle statistics in get_cpu_idle_time_us()
sched: Introduce a function to update the idle statistics
sched: Add a comment to get_cpu_idle_time_us()
cpu_stop: add dummy implementation for UP
sched: Remove rq argument to the tracepoints
rcu: need barrier() in UP synchronize_sched_expedited()
sched: correctly place paranioa memory barriers in synchronize_sched_expedited()
sched: kill paranoia check in synchronize_sched_expedited()
sched: replace migration_thread with cpu_stop
stop_machine: reimplement using cpu_stop
cpu_stop: implement stop_cpu[s]()
sched: Fix select_idle_sibling() logic in select_task_rq_fair()
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpuset.h | 16 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 2 | ||||
-rw-r--r-- | include/linux/rcutree.h | 1 | ||||
-rw-r--r-- | include/linux/sched.h | 70 | ||||
-rw-r--r-- | include/linux/stop_machine.h | 122 | ||||
-rw-r--r-- | include/linux/tick.h | 5 | ||||
-rw-r--r-- | include/linux/wait.h | 35 | ||||
-rw-r--r-- | include/trace/events/sched.h | 32 |
8 files changed, 166 insertions, 117 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index a5740fc4d04b..a73454aec333 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
24 | extern void cpuset_cpus_allowed_locked(struct task_struct *p, | 24 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); |
25 | struct cpumask *mask); | ||
26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
27 | #define cpuset_current_mems_allowed (current->mems_allowed) | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) |
28 | void cpuset_init_current_mems_allowed(void); | 27 | void cpuset_init_current_mems_allowed(void); |
@@ -69,9 +68,6 @@ struct seq_file; | |||
69 | extern void cpuset_task_status_allowed(struct seq_file *m, | 68 | extern void cpuset_task_status_allowed(struct seq_file *m, |
70 | struct task_struct *task); | 69 | struct task_struct *task); |
71 | 70 | ||
72 | extern void cpuset_lock(void); | ||
73 | extern void cpuset_unlock(void); | ||
74 | |||
75 | extern int cpuset_mem_spread_node(void); | 71 | extern int cpuset_mem_spread_node(void); |
76 | 72 | ||
77 | static inline int cpuset_do_page_mem_spread(void) | 73 | static inline int cpuset_do_page_mem_spread(void) |
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, | |||
105 | { | 101 | { |
106 | cpumask_copy(mask, cpu_possible_mask); | 102 | cpumask_copy(mask, cpu_possible_mask); |
107 | } | 103 | } |
108 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, | 104 | |
109 | struct cpumask *mask) | 105 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) |
110 | { | 106 | { |
111 | cpumask_copy(mask, cpu_possible_mask); | 107 | cpumask_copy(&p->cpus_allowed, cpu_possible_mask); |
108 | return cpumask_any(cpu_active_mask); | ||
112 | } | 109 | } |
113 | 110 | ||
114 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 111 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m, | |||
157 | { | 154 | { |
158 | } | 155 | } |
159 | 156 | ||
160 | static inline void cpuset_lock(void) {} | ||
161 | static inline void cpuset_unlock(void) {} | ||
162 | |||
163 | static inline int cpuset_mem_spread_node(void) | 157 | static inline int cpuset_mem_spread_node(void) |
164 | { | 158 | { |
165 | return 0; | 159 | return 0; |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 14e5a76b2c06..e2e893144a84 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -64,8 +64,6 @@ static inline long rcu_batches_completed_bh(void) | |||
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | 66 | ||
67 | extern int rcu_expedited_torture_stats(char *page); | ||
68 | |||
69 | static inline void rcu_force_quiescent_state(void) | 67 | static inline void rcu_force_quiescent_state(void) |
70 | { | 68 | { |
71 | } | 69 | } |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 48282055e83d..c0ed1c056f29 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -36,7 +36,6 @@ extern void rcu_sched_qs(int cpu); | |||
36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); |
37 | extern void rcu_note_context_switch(int cpu); | 37 | extern void rcu_note_context_switch(int cpu); |
38 | extern int rcu_needs_cpu(int cpu); | 38 | extern int rcu_needs_cpu(int cpu); |
39 | extern int rcu_expedited_torture_stats(char *page); | ||
40 | 39 | ||
41 | #ifdef CONFIG_TREE_PREEMPT_RCU | 40 | #ifdef CONFIG_TREE_PREEMPT_RCU |
42 | 41 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 28b71ee133f0..b55e988988b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -274,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask; | |||
274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
275 | extern int select_nohz_load_balancer(int cpu); | 275 | extern int select_nohz_load_balancer(int cpu); |
276 | extern int get_nohz_load_balancer(void); | 276 | extern int get_nohz_load_balancer(void); |
277 | extern int nohz_ratelimit(int cpu); | ||
277 | #else | 278 | #else |
278 | static inline int select_nohz_load_balancer(int cpu) | 279 | static inline int select_nohz_load_balancer(int cpu) |
279 | { | 280 | { |
280 | return 0; | 281 | return 0; |
281 | } | 282 | } |
283 | |||
284 | static inline int nohz_ratelimit(int cpu) | ||
285 | { | ||
286 | return 0; | ||
287 | } | ||
282 | #endif | 288 | #endif |
283 | 289 | ||
284 | /* | 290 | /* |
@@ -953,6 +959,7 @@ struct sched_domain { | |||
953 | char *name; | 959 | char *name; |
954 | #endif | 960 | #endif |
955 | 961 | ||
962 | unsigned int span_weight; | ||
956 | /* | 963 | /* |
957 | * Span of all CPUs in this domain. | 964 | * Span of all CPUs in this domain. |
958 | * | 965 | * |
@@ -1025,12 +1032,17 @@ struct sched_domain; | |||
1025 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | 1032 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ |
1026 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1033 | #define WF_FORK 0x02 /* child wakeup after fork */ |
1027 | 1034 | ||
1035 | #define ENQUEUE_WAKEUP 1 | ||
1036 | #define ENQUEUE_WAKING 2 | ||
1037 | #define ENQUEUE_HEAD 4 | ||
1038 | |||
1039 | #define DEQUEUE_SLEEP 1 | ||
1040 | |||
1028 | struct sched_class { | 1041 | struct sched_class { |
1029 | const struct sched_class *next; | 1042 | const struct sched_class *next; |
1030 | 1043 | ||
1031 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, | 1044 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
1032 | bool head); | 1045 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
1033 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | ||
1034 | void (*yield_task) (struct rq *rq); | 1046 | void (*yield_task) (struct rq *rq); |
1035 | 1047 | ||
1036 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 1048 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
@@ -1039,7 +1051,8 @@ struct sched_class { | |||
1039 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1051 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1040 | 1052 | ||
1041 | #ifdef CONFIG_SMP | 1053 | #ifdef CONFIG_SMP |
1042 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | 1054 | int (*select_task_rq)(struct rq *rq, struct task_struct *p, |
1055 | int sd_flag, int flags); | ||
1043 | 1056 | ||
1044 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1057 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1045 | void (*post_schedule) (struct rq *this_rq); | 1058 | void (*post_schedule) (struct rq *this_rq); |
@@ -1076,36 +1089,8 @@ struct load_weight { | |||
1076 | unsigned long weight, inv_weight; | 1089 | unsigned long weight, inv_weight; |
1077 | }; | 1090 | }; |
1078 | 1091 | ||
1079 | /* | ||
1080 | * CFS stats for a schedulable entity (task, task-group etc) | ||
1081 | * | ||
1082 | * Current field usage histogram: | ||
1083 | * | ||
1084 | * 4 se->block_start | ||
1085 | * 4 se->run_node | ||
1086 | * 4 se->sleep_start | ||
1087 | * 6 se->load.weight | ||
1088 | */ | ||
1089 | struct sched_entity { | ||
1090 | struct load_weight load; /* for load-balancing */ | ||
1091 | struct rb_node run_node; | ||
1092 | struct list_head group_node; | ||
1093 | unsigned int on_rq; | ||
1094 | |||
1095 | u64 exec_start; | ||
1096 | u64 sum_exec_runtime; | ||
1097 | u64 vruntime; | ||
1098 | u64 prev_sum_exec_runtime; | ||
1099 | |||
1100 | u64 last_wakeup; | ||
1101 | u64 avg_overlap; | ||
1102 | |||
1103 | u64 nr_migrations; | ||
1104 | |||
1105 | u64 start_runtime; | ||
1106 | u64 avg_wakeup; | ||
1107 | |||
1108 | #ifdef CONFIG_SCHEDSTATS | 1092 | #ifdef CONFIG_SCHEDSTATS |
1093 | struct sched_statistics { | ||
1109 | u64 wait_start; | 1094 | u64 wait_start; |
1110 | u64 wait_max; | 1095 | u64 wait_max; |
1111 | u64 wait_count; | 1096 | u64 wait_count; |
@@ -1137,6 +1122,24 @@ struct sched_entity { | |||
1137 | u64 nr_wakeups_affine_attempts; | 1122 | u64 nr_wakeups_affine_attempts; |
1138 | u64 nr_wakeups_passive; | 1123 | u64 nr_wakeups_passive; |
1139 | u64 nr_wakeups_idle; | 1124 | u64 nr_wakeups_idle; |
1125 | }; | ||
1126 | #endif | ||
1127 | |||
1128 | struct sched_entity { | ||
1129 | struct load_weight load; /* for load-balancing */ | ||
1130 | struct rb_node run_node; | ||
1131 | struct list_head group_node; | ||
1132 | unsigned int on_rq; | ||
1133 | |||
1134 | u64 exec_start; | ||
1135 | u64 sum_exec_runtime; | ||
1136 | u64 vruntime; | ||
1137 | u64 prev_sum_exec_runtime; | ||
1138 | |||
1139 | u64 nr_migrations; | ||
1140 | |||
1141 | #ifdef CONFIG_SCHEDSTATS | ||
1142 | struct sched_statistics statistics; | ||
1140 | #endif | 1143 | #endif |
1141 | 1144 | ||
1142 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1145 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -1839,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void); | |||
1839 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1842 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1840 | 1843 | ||
1841 | #ifdef CONFIG_HOTPLUG_CPU | 1844 | #ifdef CONFIG_HOTPLUG_CPU |
1845 | extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p); | ||
1842 | extern void idle_task_exit(void); | 1846 | extern void idle_task_exit(void); |
1843 | #else | 1847 | #else |
1844 | static inline void idle_task_exit(void) {} | 1848 | static inline void idle_task_exit(void) {} |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index baba3a23a814..6b524a0d02e4 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -1,13 +1,101 @@ | |||
1 | #ifndef _LINUX_STOP_MACHINE | 1 | #ifndef _LINUX_STOP_MACHINE |
2 | #define _LINUX_STOP_MACHINE | 2 | #define _LINUX_STOP_MACHINE |
3 | /* "Bogolock": stop the entire machine, disable interrupts. This is a | 3 | |
4 | very heavy lock, which is equivalent to grabbing every spinlock | ||
5 | (and more). So the "read" side to such a lock is anything which | ||
6 | disables preeempt. */ | ||
7 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
8 | #include <linux/cpumask.h> | 5 | #include <linux/cpumask.h> |
6 | #include <linux/list.h> | ||
9 | #include <asm/system.h> | 7 | #include <asm/system.h> |
10 | 8 | ||
9 | /* | ||
10 | * stop_cpu[s]() is simplistic per-cpu maximum priority cpu | ||
11 | * monopolization mechanism. The caller can specify a non-sleeping | ||
12 | * function to be executed on a single or multiple cpus preempting all | ||
13 | * other processes and monopolizing those cpus until it finishes. | ||
14 | * | ||
15 | * Resources for this mechanism are preallocated when a cpu is brought | ||
16 | * up and requests are guaranteed to be served as long as the target | ||
17 | * cpus are online. | ||
18 | */ | ||
19 | typedef int (*cpu_stop_fn_t)(void *arg); | ||
20 | |||
21 | #ifdef CONFIG_SMP | ||
22 | |||
23 | struct cpu_stop_work { | ||
24 | struct list_head list; /* cpu_stopper->works */ | ||
25 | cpu_stop_fn_t fn; | ||
26 | void *arg; | ||
27 | struct cpu_stop_done *done; | ||
28 | }; | ||
29 | |||
30 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); | ||
31 | void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | ||
32 | struct cpu_stop_work *work_buf); | ||
33 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | ||
34 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | ||
35 | |||
36 | #else /* CONFIG_SMP */ | ||
37 | |||
38 | #include <linux/workqueue.h> | ||
39 | |||
40 | struct cpu_stop_work { | ||
41 | struct work_struct work; | ||
42 | cpu_stop_fn_t fn; | ||
43 | void *arg; | ||
44 | }; | ||
45 | |||
46 | static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | ||
47 | { | ||
48 | int ret = -ENOENT; | ||
49 | preempt_disable(); | ||
50 | if (cpu == smp_processor_id()) | ||
51 | ret = fn(arg); | ||
52 | preempt_enable(); | ||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | static void stop_one_cpu_nowait_workfn(struct work_struct *work) | ||
57 | { | ||
58 | struct cpu_stop_work *stwork = | ||
59 | container_of(work, struct cpu_stop_work, work); | ||
60 | preempt_disable(); | ||
61 | stwork->fn(stwork->arg); | ||
62 | preempt_enable(); | ||
63 | } | ||
64 | |||
65 | static inline void stop_one_cpu_nowait(unsigned int cpu, | ||
66 | cpu_stop_fn_t fn, void *arg, | ||
67 | struct cpu_stop_work *work_buf) | ||
68 | { | ||
69 | if (cpu == smp_processor_id()) { | ||
70 | INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); | ||
71 | work_buf->fn = fn; | ||
72 | work_buf->arg = arg; | ||
73 | schedule_work(&work_buf->work); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static inline int stop_cpus(const struct cpumask *cpumask, | ||
78 | cpu_stop_fn_t fn, void *arg) | ||
79 | { | ||
80 | if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) | ||
81 | return stop_one_cpu(raw_smp_processor_id(), fn, arg); | ||
82 | return -ENOENT; | ||
83 | } | ||
84 | |||
85 | static inline int try_stop_cpus(const struct cpumask *cpumask, | ||
86 | cpu_stop_fn_t fn, void *arg) | ||
87 | { | ||
88 | return stop_cpus(cpumask, fn, arg); | ||
89 | } | ||
90 | |||
91 | #endif /* CONFIG_SMP */ | ||
92 | |||
93 | /* | ||
94 | * stop_machine "Bogolock": stop the entire machine, disable | ||
95 | * interrupts. This is a very heavy lock, which is equivalent to | ||
96 | * grabbing every spinlock (and more). So the "read" side to such a | ||
97 | * lock is anything which disables preeempt. | ||
98 | */ | ||
11 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) | 99 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) |
12 | 100 | ||
13 | /** | 101 | /** |
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | |||
36 | */ | 124 | */ |
37 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | 125 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); |
38 | 126 | ||
39 | /** | 127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
40 | * stop_machine_create: create all stop_machine threads | ||
41 | * | ||
42 | * Description: This causes all stop_machine threads to be created before | ||
43 | * stop_machine actually gets called. This can be used by subsystems that | ||
44 | * need a non failing stop_machine infrastructure. | ||
45 | */ | ||
46 | int stop_machine_create(void); | ||
47 | |||
48 | /** | ||
49 | * stop_machine_destroy: destroy all stop_machine threads | ||
50 | * | ||
51 | * Description: This causes all stop_machine threads which were created with | ||
52 | * stop_machine_create to be destroyed again. | ||
53 | */ | ||
54 | void stop_machine_destroy(void); | ||
55 | |||
56 | #else | ||
57 | 128 | ||
58 | static inline int stop_machine(int (*fn)(void *), void *data, | 129 | static inline int stop_machine(int (*fn)(void *), void *data, |
59 | const struct cpumask *cpus) | 130 | const struct cpumask *cpus) |
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data, | |||
65 | return ret; | 136 | return ret; |
66 | } | 137 | } |
67 | 138 | ||
68 | static inline int stop_machine_create(void) { return 0; } | 139 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
69 | static inline void stop_machine_destroy(void) { } | 140 | #endif /* _LINUX_STOP_MACHINE */ |
70 | |||
71 | #endif /* CONFIG_SMP */ | ||
72 | #endif /* _LINUX_STOP_MACHINE */ | ||
diff --git a/include/linux/tick.h b/include/linux/tick.h index d2ae79e21be3..b232ccc0ee29 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -42,6 +42,7 @@ enum tick_nohz_mode { | |||
42 | * @idle_waketime: Time when the idle was interrupted | 42 | * @idle_waketime: Time when the idle was interrupted |
43 | * @idle_exittime: Time when the idle state was left | 43 | * @idle_exittime: Time when the idle state was left |
44 | * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped | 44 | * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped |
45 | * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding | ||
45 | * @sleep_length: Duration of the current idle sleep | 46 | * @sleep_length: Duration of the current idle sleep |
46 | * @do_timer_lst: CPU was the last one doing do_timer before going idle | 47 | * @do_timer_lst: CPU was the last one doing do_timer before going idle |
47 | */ | 48 | */ |
@@ -60,7 +61,7 @@ struct tick_sched { | |||
60 | ktime_t idle_waketime; | 61 | ktime_t idle_waketime; |
61 | ktime_t idle_exittime; | 62 | ktime_t idle_exittime; |
62 | ktime_t idle_sleeptime; | 63 | ktime_t idle_sleeptime; |
63 | ktime_t idle_lastupdate; | 64 | ktime_t iowait_sleeptime; |
64 | ktime_t sleep_length; | 65 | ktime_t sleep_length; |
65 | unsigned long last_jiffies; | 66 | unsigned long last_jiffies; |
66 | unsigned long next_jiffies; | 67 | unsigned long next_jiffies; |
@@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle); | |||
124 | extern void tick_nohz_restart_sched_tick(void); | 125 | extern void tick_nohz_restart_sched_tick(void); |
125 | extern ktime_t tick_nohz_get_sleep_length(void); | 126 | extern ktime_t tick_nohz_get_sleep_length(void); |
126 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 127 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); |
128 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); | ||
127 | # else | 129 | # else |
128 | static inline void tick_nohz_stop_sched_tick(int inidle) { } | 130 | static inline void tick_nohz_stop_sched_tick(int inidle) { } |
129 | static inline void tick_nohz_restart_sched_tick(void) { } | 131 | static inline void tick_nohz_restart_sched_tick(void) { } |
@@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) | |||
134 | return len; | 136 | return len; |
135 | } | 137 | } |
136 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } | 138 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } |
139 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } | ||
137 | # endif /* !NO_HZ */ | 140 | # endif /* !NO_HZ */ |
138 | 141 | ||
139 | #endif | 142 | #endif |
diff --git a/include/linux/wait.h b/include/linux/wait.h index a48e16b77d5e..76d96d035ea0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) | |||
127 | /* | 127 | /* |
128 | * Used for wake-one threads: | 128 | * Used for wake-one threads: |
129 | */ | 129 | */ |
130 | static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, | ||
131 | wait_queue_t *wait) | ||
132 | { | ||
133 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
134 | __add_wait_queue(q, wait); | ||
135 | } | ||
136 | |||
130 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, | 137 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, |
131 | wait_queue_t *new) | 138 | wait_queue_t *new) |
132 | { | 139 | { |
133 | list_add_tail(&new->task_list, &head->task_list); | 140 | list_add_tail(&new->task_list, &head->task_list); |
134 | } | 141 | } |
135 | 142 | ||
143 | static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, | ||
144 | wait_queue_t *wait) | ||
145 | { | ||
146 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
147 | __add_wait_queue_tail(q, wait); | ||
148 | } | ||
149 | |||
136 | static inline void __remove_wait_queue(wait_queue_head_t *head, | 150 | static inline void __remove_wait_queue(wait_queue_head_t *head, |
137 | wait_queue_t *old) | 151 | wait_queue_t *old) |
138 | { | 152 | { |
@@ -404,25 +418,6 @@ do { \ | |||
404 | }) | 418 | }) |
405 | 419 | ||
406 | /* | 420 | /* |
407 | * Must be called with the spinlock in the wait_queue_head_t held. | ||
408 | */ | ||
409 | static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, | ||
410 | wait_queue_t * wait) | ||
411 | { | ||
412 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
413 | __add_wait_queue_tail(q, wait); | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * Must be called with the spinlock in the wait_queue_head_t held. | ||
418 | */ | ||
419 | static inline void remove_wait_queue_locked(wait_queue_head_t *q, | ||
420 | wait_queue_t * wait) | ||
421 | { | ||
422 | __remove_wait_queue(q, wait); | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * These are the old interfaces to sleep waiting for an event. | 421 | * These are the old interfaces to sleep waiting for an event. |
427 | * They are racy. DO NOT use them, use the wait_event* interfaces above. | 422 | * They are racy. DO NOT use them, use the wait_event* interfaces above. |
428 | * We plan to remove these interfaces. | 423 | * We plan to remove these interfaces. |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index cfceb0b73e20..4f733ecea46e 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret, | |||
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Tracepoint for waiting on task to unschedule: | 53 | * Tracepoint for waiting on task to unschedule: |
54 | * | ||
55 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
56 | * but used by the latency tracer plugin. ) | ||
57 | */ | 54 | */ |
58 | TRACE_EVENT(sched_wait_task, | 55 | TRACE_EVENT(sched_wait_task, |
59 | 56 | ||
60 | TP_PROTO(struct rq *rq, struct task_struct *p), | 57 | TP_PROTO(struct task_struct *p), |
61 | 58 | ||
62 | TP_ARGS(rq, p), | 59 | TP_ARGS(p), |
63 | 60 | ||
64 | TP_STRUCT__entry( | 61 | TP_STRUCT__entry( |
65 | __array( char, comm, TASK_COMM_LEN ) | 62 | __array( char, comm, TASK_COMM_LEN ) |
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task, | |||
79 | 76 | ||
80 | /* | 77 | /* |
81 | * Tracepoint for waking up a task: | 78 | * Tracepoint for waking up a task: |
82 | * | ||
83 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
84 | * but used by the latency tracer plugin. ) | ||
85 | */ | 79 | */ |
86 | DECLARE_EVENT_CLASS(sched_wakeup_template, | 80 | DECLARE_EVENT_CLASS(sched_wakeup_template, |
87 | 81 | ||
88 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 82 | TP_PROTO(struct task_struct *p, int success), |
89 | 83 | ||
90 | TP_ARGS(rq, p, success), | 84 | TP_ARGS(p, success), |
91 | 85 | ||
92 | TP_STRUCT__entry( | 86 | TP_STRUCT__entry( |
93 | __array( char, comm, TASK_COMM_LEN ) | 87 | __array( char, comm, TASK_COMM_LEN ) |
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, | |||
111 | ); | 105 | ); |
112 | 106 | ||
113 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, | 107 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, |
114 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 108 | TP_PROTO(struct task_struct *p, int success), |
115 | TP_ARGS(rq, p, success)); | 109 | TP_ARGS(p, success)); |
116 | 110 | ||
117 | /* | 111 | /* |
118 | * Tracepoint for waking up a new task: | 112 | * Tracepoint for waking up a new task: |
119 | * | ||
120 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
121 | * but used by the latency tracer plugin. ) | ||
122 | */ | 113 | */ |
123 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, | 114 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, |
124 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 115 | TP_PROTO(struct task_struct *p, int success), |
125 | TP_ARGS(rq, p, success)); | 116 | TP_ARGS(p, success)); |
126 | 117 | ||
127 | /* | 118 | /* |
128 | * Tracepoint for task switches, performed by the scheduler: | 119 | * Tracepoint for task switches, performed by the scheduler: |
129 | * | ||
130 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
131 | * but used by the latency tracer plugin. ) | ||
132 | */ | 120 | */ |
133 | TRACE_EVENT(sched_switch, | 121 | TRACE_EVENT(sched_switch, |
134 | 122 | ||
135 | TP_PROTO(struct rq *rq, struct task_struct *prev, | 123 | TP_PROTO(struct task_struct *prev, |
136 | struct task_struct *next), | 124 | struct task_struct *next), |
137 | 125 | ||
138 | TP_ARGS(rq, prev, next), | 126 | TP_ARGS(prev, next), |
139 | 127 | ||
140 | TP_STRUCT__entry( | 128 | TP_STRUCT__entry( |
141 | __array( char, prev_comm, TASK_COMM_LEN ) | 129 | __array( char, prev_comm, TASK_COMM_LEN ) |