aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-03-06 06:18:35 -0500
committerThomas Gleixner <tglx@linutronix.de>2013-03-13 06:39:39 -0400
commit26517f3e99248668315aee9460dcea21628cdd7f (patch)
tree053bb79af301c58cd05c9e346d85f66918048d61 /kernel
parentf7dce82d532e911c41933776426785373fe13967 (diff)
tick: Avoid programming the local cpu timer if broadcast pending
If the local cpu timer stops in deep idle, we arm the broadcast device and get woken by an IPI. Now when we return from deep idle we reenable the local cpu timer unconditionally before handling the IPI. But that's a pointless exercise: the timer is already expired and the IPI is on the way. And it's an expensive exercise as we use the forced reprogramming mode so that we do not lose a timer event. This forced reprogramming will loop at least once in the retry. To avoid this reprogramming, we mark the cpu in a pending bit mask before we send the IPI. Now when the IPI target cpu wakes up, it will see the pending bit set and skip the reprogramming. The reprogramming of the cpu local timer will happen in the IPI handler which runs the cpu local timer interrupt function. Reported-by: Jason Liu <liu.h.jason@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: LAK <linux-arm-kernel@lists.infradead.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Arjan van de Veen <arjan@infradead.org> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Link: http://lkml.kernel.org/r/20130306111537.431082074@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-broadcast.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 380910db7157..005c0ca81a32 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -392,6 +392,7 @@ int tick_resume_broadcast(void)
392#ifdef CONFIG_TICK_ONESHOT 392#ifdef CONFIG_TICK_ONESHOT
393 393
394static cpumask_var_t tick_broadcast_oneshot_mask; 394static cpumask_var_t tick_broadcast_oneshot_mask;
395static cpumask_var_t tick_broadcast_pending_mask;
395 396
396/* 397/*
397 * Exposed for debugging: see timer_list.c 398 * Exposed for debugging: see timer_list.c
@@ -470,6 +471,12 @@ again:
470 td = &per_cpu(tick_cpu_device, cpu); 471 td = &per_cpu(tick_cpu_device, cpu);
471 if (td->evtdev->next_event.tv64 <= now.tv64) { 472 if (td->evtdev->next_event.tv64 <= now.tv64) {
472 cpumask_set_cpu(cpu, tmpmask); 473 cpumask_set_cpu(cpu, tmpmask);
474 /*
475 * Mark the remote cpu in the pending mask, so
476 * it can avoid reprogramming the cpu local
477 * timer in tick_broadcast_oneshot_control().
478 */
479 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
473 } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 480 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
474 next_event.tv64 = td->evtdev->next_event.tv64; 481 next_event.tv64 = td->evtdev->next_event.tv64;
475 next_cpu = cpu; 482 next_cpu = cpu;
@@ -535,6 +542,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
535 542
536 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 543 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
537 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 544 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
545 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
538 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 546 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
539 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 547 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
540 if (dev->next_event.tv64 < bc->next_event.tv64) 548 if (dev->next_event.tv64 < bc->next_event.tv64)
@@ -543,10 +551,25 @@ void tick_broadcast_oneshot_control(unsigned long reason)
543 } else { 551 } else {
544 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 552 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
545 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 553 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
546 if (dev->next_event.tv64 != KTIME_MAX) 554 if (dev->next_event.tv64 == KTIME_MAX)
547 tick_program_event(dev->next_event, 1); 555 goto out;
556 /*
557 * The cpu which was handling the broadcast
558 * timer marked this cpu in the broadcast
559 * pending mask and fired the broadcast
560 * IPI. So we are going to handle the expired
561 * event anyway via the broadcast IPI
562 * handler. No need to reprogram the timer
563 * with an already expired event.
564 */
565 if (cpumask_test_and_clear_cpu(cpu,
566 tick_broadcast_pending_mask))
567 goto out;
568
569 tick_program_event(dev->next_event, 1);
548 } 570 }
549 } 571 }
572out:
550 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 573 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
551} 574}
552 575
@@ -683,5 +706,6 @@ void __init tick_broadcast_init(void)
683 alloc_cpumask_var(&tmpmask, GFP_NOWAIT); 706 alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
684#ifdef CONFIG_TICK_ONESHOT 707#ifdef CONFIG_TICK_ONESHOT
685 alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); 708 alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
709 alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
686#endif 710#endif
687} 711}