diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-04-20 11:11:50 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2013-04-22 14:29:07 -0400 |
commit | 99e5ada9407cc19d7c4c05ce2165f20dc46fc093 (patch) | |
tree | 6d39bf6095e18b82f78b3cc405d49fbb8836b299 | |
parent | 5811d9963e26146898a24b535b301f7654257f8a (diff) |
nohz: Re-evaluate the tick for the new task after a context switch
When a task is scheduled in, it may have some properties
of its own that could make the CPU reconsider the need for
the tick: posix cpu timers, perf events, ...
So notify the full dynticks subsystem when a task gets
scheduled in and re-check the tick dependency at this
stage. This is done through a self IPI to avoid messing
up with any current lock scenario.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/tick.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 20 |
3 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/tick.h b/include/linux/tick.h index e31e67623ea1..9180f4b85e6d 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -163,12 +163,14 @@ extern int tick_nohz_full_cpu(int cpu); | |||
163 | extern void tick_nohz_full_check(void); | 163 | extern void tick_nohz_full_check(void); |
164 | extern void tick_nohz_full_kick(void); | 164 | extern void tick_nohz_full_kick(void); |
165 | extern void tick_nohz_full_kick_all(void); | 165 | extern void tick_nohz_full_kick_all(void); |
166 | extern void tick_nohz_task_switch(struct task_struct *tsk); | ||
166 | #else | 167 | #else |
167 | static inline void tick_nohz_init(void) { } | 168 | static inline void tick_nohz_init(void) { } |
168 | static inline int tick_nohz_full_cpu(int cpu) { return 0; } | 169 | static inline int tick_nohz_full_cpu(int cpu) { return 0; } |
169 | static inline void tick_nohz_full_check(void) { } | 170 | static inline void tick_nohz_full_check(void) { } |
170 | static inline void tick_nohz_full_kick(void) { } | 171 | static inline void tick_nohz_full_kick(void) { } |
171 | static inline void tick_nohz_full_kick_all(void) { } | 172 | static inline void tick_nohz_full_kick_all(void) { } |
173 | static inline void tick_nohz_task_switch(struct task_struct *tsk) { } | ||
172 | #endif | 174 | #endif |
173 | 175 | ||
174 | 176 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9ad35005f1cb..dd09def88567 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1896,6 +1896,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1896 | kprobe_flush_task(prev); | 1896 | kprobe_flush_task(prev); |
1897 | put_task_struct(prev); | 1897 | put_task_struct(prev); |
1898 | } | 1898 | } |
1899 | |||
1900 | tick_nohz_task_switch(current); | ||
1899 | } | 1901 | } |
1900 | 1902 | ||
1901 | #ifdef CONFIG_SMP | 1903 | #ifdef CONFIG_SMP |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d0ed1905a85c..12a900dbb819 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -232,6 +232,26 @@ void tick_nohz_full_kick_all(void) | |||
232 | preempt_enable(); | 232 | preempt_enable(); |
233 | } | 233 | } |
234 | 234 | ||
235 | /* | ||
236 | * Re-evaluate the need for the tick as we switch the current task. | ||
237 | * It might need the tick due to per task/process properties: | ||
238 | * perf events, posix cpu timers, ... | ||
239 | */ | ||
240 | void tick_nohz_task_switch(struct task_struct *tsk) | ||
241 | { | ||
242 | unsigned long flags; | ||
243 | |||
244 | if (!tick_nohz_full_cpu(smp_processor_id())) | ||
245 | return; | ||
246 | |||
247 | local_irq_save(flags); | ||
248 | |||
249 | if (tick_nohz_tick_stopped() && !can_stop_full_tick()) | ||
250 | tick_nohz_full_kick(); | ||
251 | |||
252 | local_irq_restore(flags); | ||
253 | } | ||
254 | |||
235 | int tick_nohz_full_cpu(int cpu) | 255 | int tick_nohz_full_cpu(int cpu) |
236 | { | 256 | { |
237 | if (!have_nohz_full_mask) | 257 | if (!have_nohz_full_mask) |