aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2013-03-02 05:10:11 -0500
committerThomas Gleixner <tglx@linutronix.de>2013-03-07 10:13:26 -0500
commitd2348fb6fdc6d671ad45b62db237f76c8c115603 (patch)
tree30a05c397f91bd8ac32cc904231d9a72ce7b5c1e /kernel
parentf9ae39d04ccdec8d8ecf532191b7056c279a22c0 (diff)
tick: Dynamically set broadcast irq affinity
When a cpu goes to a deep idle state where its local timer is shutdown, it notifies the time frame work to use the broadcast timer instead. Unfortunately, the broadcast device could wake up any CPU, including an idle one which is not concerned by the wake up at all. So in the worst case an idle CPU will wake up to send an IPI to the CPU whose timer expired. Provide an opt-in feature CLOCK_EVT_FEAT_DYNIRQ which tells the core that is should set the interrupt affinity of the broadcast interrupt to the cpu which has the earliest expiry time. This avoids unnecessary spurious wakeups and IPIs. [ tglx: Adopted to cpumask rework, silenced an uninitialized warning, massaged changelog ] Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: viresh.kumar@linaro.org Cc: jacob.jun.pan@linux.intel.com Cc: linux-arm-kernel@lists.infradead.org Cc: santosh.shilimkar@ti.com Cc: linaro-kernel@lists.linaro.org Cc: patches@linaro.org Cc: rickard.andersson@stericsson.com Cc: vincent.guittot@linaro.org Cc: linus.walleij@stericsson.com Cc: john.stultz@linaro.org Link: http://lkml.kernel.org/r/1362219013-18173-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-broadcast.c39
1 files changed, 31 insertions, 8 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 70dd98ce18d7..380910db7157 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -401,13 +401,34 @@ struct cpumask *tick_get_broadcast_oneshot_mask(void)
401 return tick_broadcast_oneshot_mask; 401 return tick_broadcast_oneshot_mask;
402} 402}
403 403
404static int tick_broadcast_set_event(struct clock_event_device *bc, 404/*
405 * Set broadcast interrupt affinity
406 */
407static void tick_broadcast_set_affinity(struct clock_event_device *bc,
408 const struct cpumask *cpumask)
409{
410 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
411 return;
412
413 if (cpumask_equal(bc->cpumask, cpumask))
414 return;
415
416 bc->cpumask = cpumask;
417 irq_set_affinity(bc->irq, bc->cpumask);
418}
419
420static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
405 ktime_t expires, int force) 421 ktime_t expires, int force)
406{ 422{
423 int ret;
424
407 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) 425 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
408 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 426 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
409 427
410 return clockevents_program_event(bc, expires, force); 428 ret = clockevents_program_event(bc, expires, force);
429 if (!ret)
430 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
431 return ret;
411} 432}
412 433
413int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 434int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -436,7 +457,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
436{ 457{
437 struct tick_device *td; 458 struct tick_device *td;
438 ktime_t now, next_event; 459 ktime_t now, next_event;
439 int cpu; 460 int cpu, next_cpu = 0;
440 461
441 raw_spin_lock(&tick_broadcast_lock); 462 raw_spin_lock(&tick_broadcast_lock);
442again: 463again:
@@ -447,10 +468,12 @@ again:
447 /* Find all expired events */ 468 /* Find all expired events */
448 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 469 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
449 td = &per_cpu(tick_cpu_device, cpu); 470 td = &per_cpu(tick_cpu_device, cpu);
450 if (td->evtdev->next_event.tv64 <= now.tv64) 471 if (td->evtdev->next_event.tv64 <= now.tv64) {
451 cpumask_set_cpu(cpu, tmpmask); 472 cpumask_set_cpu(cpu, tmpmask);
452 else if (td->evtdev->next_event.tv64 < next_event.tv64) 473 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
453 next_event.tv64 = td->evtdev->next_event.tv64; 474 next_event.tv64 = td->evtdev->next_event.tv64;
475 next_cpu = cpu;
476 }
454 } 477 }
455 478
456 /* 479 /*
@@ -473,7 +496,7 @@ again:
473 * Rearm the broadcast device. If event expired, 496 * Rearm the broadcast device. If event expired,
474 * repeat the above 497 * repeat the above
475 */ 498 */
476 if (tick_broadcast_set_event(dev, next_event, 0)) 499 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
477 goto again; 500 goto again;
478 } 501 }
479 raw_spin_unlock(&tick_broadcast_lock); 502 raw_spin_unlock(&tick_broadcast_lock);
@@ -515,7 +538,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
515 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 538 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
516 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 539 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
517 if (dev->next_event.tv64 < bc->next_event.tv64) 540 if (dev->next_event.tv64 < bc->next_event.tv64)
518 tick_broadcast_set_event(bc, dev->next_event, 1); 541 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
519 } 542 }
520 } else { 543 } else {
521 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 544 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
@@ -581,7 +604,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
581 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 604 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
582 tick_broadcast_init_next_event(tmpmask, 605 tick_broadcast_init_next_event(tmpmask,
583 tick_next_period); 606 tick_next_period);
584 tick_broadcast_set_event(bc, tick_next_period, 1); 607 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
585 } else 608 } else
586 bc->next_event.tv64 = KTIME_MAX; 609 bc->next_event.tv64 = KTIME_MAX;
587 } else { 610 } else {