aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-04-20 10:40:31 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-04-22 14:29:05 -0400
commit5811d9963e26146898a24b535b301f7654257f8a (patch)
tree1aba8d997214fc1979b049ab23d6430cbd774aee /kernel/time/tick-sched.c
parent9014c45d9e2dbb935498a5f1d106e220e8624643 (diff)
nohz: Prepare to stop the tick on irq exit
Interrupt exit is a natural place to stop the tick: it happens after all events happening before and during the irq which are liable to update the dependency on the tick occured. Also it makes sure that any check on tick dependency is well ordered against dynticks kick IPIs. Bring in the infrastructure that performs the tick dependency checks on irq exit and shut it down if these checks show that we can do it safely. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 95d79aeb3e27..d0ed1905a85c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -647,6 +647,24 @@ out:
647 return ret; 647 return ret;
648} 648}
649 649
650static void tick_nohz_full_stop_tick(struct tick_sched *ts)
651{
652#ifdef CONFIG_NO_HZ_FULL
653 int cpu = smp_processor_id();
654
655 if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
656 return;
657
658 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
659 return;
660
661 if (!can_stop_full_tick())
662 return;
663
664 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
665#endif
666}
667
650static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 668static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
651{ 669{
652 /* 670 /*
@@ -773,12 +791,13 @@ void tick_nohz_irq_exit(void)
773{ 791{
774 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 792 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
775 793
776 if (!ts->inidle) 794 if (ts->inidle) {
777 return; 795 /* Cancel the timer because CPU already waken up from the C-states*/
778 796 menu_hrtimer_cancel();
779 /* Cancel the timer because CPU already waken up from the C-states*/ 797 __tick_nohz_idle_enter(ts);
780 menu_hrtimer_cancel(); 798 } else {
781 __tick_nohz_idle_enter(ts); 799 tick_nohz_full_stop_tick(ts);
800 }
782} 801}
783 802
784/** 803/**