aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-10-07 12:22:06 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:31:35 -0500
commit280f06774afedf849f0b34248ed6aff57d0f6908 (patch)
tree62ef683226d0569c0e6c3ba34ab2e6d85b2e047f /kernel/time/tick-sched.c
parent867f236bd12f5091df6dc7cc75f94d7fd982d78a (diff)
nohz: Separate out irq exit and idle loop dyntick logic
The tick_nohz_stop_sched_tick() function, which tries to delay the next timer tick as long as possible, can be called from two places: - From the idle loop to start the dytick idle mode - From interrupt exit if we have interrupted the dyntick idle mode, so that we reprogram the next tick event in case the irq changed some internal state that requires this action. There are only few minor differences between both that are handled by that function, driven by the ts->inidle cpu variable and the inidle parameter. The whole guarantees that we only update the dyntick mode on irq exit if we actually interrupted the dyntick idle mode, and that we enter in RCU extended quiescent state from idle loop entry only. Split this function into: - tick_nohz_idle_enter(), which sets ts->inidle to 1, enters dynticks idle mode unconditionally if it can, and enters into RCU extended quiescent state. - tick_nohz_irq_exit() which only updates the dynticks idle mode when ts->inidle is set (ie: if tick_nohz_idle_enter() has been called). To maintain symmetry, tick_nohz_restart_sched_tick() has been renamed into tick_nohz_idle_exit(). This simplifies the code and micro-optimize the irq exit path (no need for local_irq_save there). This also prepares for the split between dynticks and rcu extended quiescent state logics. We'll need this split to further fix illegal uses of RCU in extended quiescent states in the idle loop. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: David Miller <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Paul Mackerras <paulus@samba.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c93
1 files changed, 57 insertions, 36 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5d9d23665f12..266c242dc354 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -275,42 +275,17 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
275} 275}
276EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 276EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
277 277
278/** 278static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
279 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
280 *
281 * When the next event is more than a tick into the future, stop the idle tick
282 * Called either from the idle loop or from irq_exit() when an idle period was
283 * just interrupted by an interrupt which did not cause a reschedule.
284 */
285void tick_nohz_stop_sched_tick(int inidle)
286{ 279{
287 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; 280 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
288 struct tick_sched *ts;
289 ktime_t last_update, expires, now; 281 ktime_t last_update, expires, now;
290 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 282 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
291 u64 time_delta; 283 u64 time_delta;
292 int cpu; 284 int cpu;
293 285
294 local_irq_save(flags);
295
296 cpu = smp_processor_id(); 286 cpu = smp_processor_id();
297 ts = &per_cpu(tick_cpu_sched, cpu); 287 ts = &per_cpu(tick_cpu_sched, cpu);
298 288
299 /*
300 * Call to tick_nohz_start_idle stops the last_update_time from being
301 * updated. Thus, it must not be called in the event we are called from
302 * irq_exit() with the prior state different than idle.
303 */
304 if (!inidle && !ts->inidle)
305 goto end;
306
307 /*
308 * Set ts->inidle unconditionally. Even if the system did not
309 * switch to NOHZ mode the cpu frequency governers rely on the
310 * update of the idle time accounting in tick_nohz_start_idle().
311 */
312 ts->inidle = 1;
313
314 now = tick_nohz_start_idle(cpu, ts); 289 now = tick_nohz_start_idle(cpu, ts);
315 290
316 /* 291 /*
@@ -326,10 +301,10 @@ void tick_nohz_stop_sched_tick(int inidle)
326 } 301 }
327 302
328 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 303 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
329 goto end; 304 return;
330 305
331 if (need_resched()) 306 if (need_resched())
332 goto end; 307 return;
333 308
334 if (unlikely(local_softirq_pending() && cpu_online(cpu))) { 309 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
335 static int ratelimit; 310 static int ratelimit;
@@ -339,7 +314,7 @@ void tick_nohz_stop_sched_tick(int inidle)
339 (unsigned int) local_softirq_pending()); 314 (unsigned int) local_softirq_pending());
340 ratelimit++; 315 ratelimit++;
341 } 316 }
342 goto end; 317 return;
343 } 318 }
344 319
345 ts->idle_calls++; 320 ts->idle_calls++;
@@ -471,10 +446,54 @@ out:
471 ts->next_jiffies = next_jiffies; 446 ts->next_jiffies = next_jiffies;
472 ts->last_jiffies = last_jiffies; 447 ts->last_jiffies = last_jiffies;
473 ts->sleep_length = ktime_sub(dev->next_event, now); 448 ts->sleep_length = ktime_sub(dev->next_event, now);
474end: 449}
475 if (inidle) 450
476 rcu_idle_enter(); 451/**
477 local_irq_restore(flags); 452 * tick_nohz_idle_enter - stop the idle tick from the idle task
453 *
454 * When the next event is more than a tick into the future, stop the idle tick
455 * Called when we start the idle loop.
456 * This also enters into RCU extended quiescent state so that this CPU doesn't
457 * need anymore to be part of any global grace period completion. This way
458 * the tick can be stopped safely as we don't need to report quiescent states.
459 */
460void tick_nohz_idle_enter(void)
461{
462 struct tick_sched *ts;
463
464 WARN_ON_ONCE(irqs_disabled());
465
466 local_irq_disable();
467
468 ts = &__get_cpu_var(tick_cpu_sched);
469 /*
470 * set ts->inidle unconditionally. even if the system did not
471 * switch to nohz mode the cpu frequency governers rely on the
472 * update of the idle time accounting in tick_nohz_start_idle().
473 */
474 ts->inidle = 1;
475 tick_nohz_stop_sched_tick(ts);
476 rcu_idle_enter();
477
478 local_irq_enable();
479}
480
481/**
482 * tick_nohz_irq_exit - update next tick event from interrupt exit
483 *
484 * When an interrupt fires while we are idle and it doesn't cause
485 * a reschedule, it may still add, modify or delete a timer, enqueue
486 * an RCU callback, etc...
487 * So we need to re-calculate and reprogram the next tick event.
488 */
489void tick_nohz_irq_exit(void)
490{
491 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
492
493 if (!ts->inidle)
494 return;
495
496 tick_nohz_stop_sched_tick(ts);
478} 497}
479 498
480/** 499/**
@@ -516,11 +535,13 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
516} 535}
517 536
518/** 537/**
519 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task 538 * tick_nohz_idle_exit - restart the idle tick from the idle task
520 * 539 *
521 * Restart the idle tick when the CPU is woken up from idle 540 * Restart the idle tick when the CPU is woken up from idle
541 * This also exit the RCU extended quiescent state. The CPU
542 * can use RCU again after this function is called.
522 */ 543 */
523void tick_nohz_restart_sched_tick(void) 544void tick_nohz_idle_exit(void)
524{ 545{
525 int cpu = smp_processor_id(); 546 int cpu = smp_processor_id();
526 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 547 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);