diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2011-08-10 17:21:01 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2013-03-21 10:55:59 -0400 |
commit | 1c20091e77fc5a9b7d7d905176443b4822a23cdb (patch) | |
tree | ef3c2edc749ccfc87b5f9682973f54414de746c3 /kernel/sched/core.c | |
parent | a382bf934449ddeb625167537ae81daa0211b477 (diff) |
nohz: Wake up full dynticks CPUs when a timer gets enqueued
Wake up a CPU when a timer list timer is enqueued there and
the target is part of the full dynticks range. Sending an IPI
to it makes it reconsidering the next timer to program on top
of recent updates.
This may later be improved by checking if the tick is really
stopped on the target. This would need some careful
synchronization though. So deal with such optimization later
and start simple.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Namhyung Kim <namhyung.kim@lge.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 849deb96e61e..e91ee589f793 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -587,7 +587,7 @@ unlock: | |||
587 | * account when the CPU goes back to idle and evaluates the timer | 587 | * account when the CPU goes back to idle and evaluates the timer |
588 | * wheel for the next timer event. | 588 | * wheel for the next timer event. |
589 | */ | 589 | */ |
590 | void wake_up_idle_cpu(int cpu) | 590 | static void wake_up_idle_cpu(int cpu) |
591 | { | 591 | { |
592 | struct rq *rq = cpu_rq(cpu); | 592 | struct rq *rq = cpu_rq(cpu); |
593 | 593 | ||
@@ -617,6 +617,24 @@ void wake_up_idle_cpu(int cpu) | |||
617 | smp_send_reschedule(cpu); | 617 | smp_send_reschedule(cpu); |
618 | } | 618 | } |
619 | 619 | ||
620 | static bool wake_up_extended_nohz_cpu(int cpu) | ||
621 | { | ||
622 | if (tick_nohz_extended_cpu(cpu)) { | ||
623 | if (cpu != smp_processor_id() || | ||
624 | tick_nohz_tick_stopped()) | ||
625 | smp_send_reschedule(cpu); | ||
626 | return true; | ||
627 | } | ||
628 | |||
629 | return false; | ||
630 | } | ||
631 | |||
632 | void wake_up_nohz_cpu(int cpu) | ||
633 | { | ||
634 | if (!wake_up_extended_nohz_cpu(cpu)) | ||
635 | wake_up_idle_cpu(cpu); | ||
636 | } | ||
637 | |||
620 | static inline bool got_nohz_idle_kick(void) | 638 | static inline bool got_nohz_idle_kick(void) |
621 | { | 639 | { |
622 | int cpu = smp_processor_id(); | 640 | int cpu = smp_processor_id(); |