diff options
author | Preeti U Murthy <preeti@linux.vnet.ibm.com> | 2014-02-25 19:08:01 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-03-04 23:56:16 -0500 |
commit | 1b7839559b3f1c7a09ff94904788a732063ce2de (patch) | |
tree | 4fcce1fd990bd3afbd96b6c7e94340f98c6f480a | |
parent | 1b67bee129a36c22c17186cc2a9981678e9323ee (diff) |
powerpc: Split timer_interrupt() into timer handling and interrupt handling routines
Split timer_interrupt(), which is the local timer interrupt handler on ppc
into routines called during regular interrupt handling and __timer_interrupt(),
which takes care of running local timers and collecting time related stats.
This will enable callers interested only in running expired local timers to
directly call into __timer_interupt(). One of the use cases of this is the
tick broadcast IPI handling in which the sleeping CPUs need to handle the local
timers that have expired.
Signed-off-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/kernel/time.c | 81 |
1 files changed, 46 insertions, 35 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 3ff97dbb35be..df2989b0d4c0 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -478,6 +478,47 @@ void arch_irq_work_raise(void) | |||
478 | 478 | ||
479 | #endif /* CONFIG_IRQ_WORK */ | 479 | #endif /* CONFIG_IRQ_WORK */ |
480 | 480 | ||
481 | void __timer_interrupt(void) | ||
482 | { | ||
483 | struct pt_regs *regs = get_irq_regs(); | ||
484 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | ||
485 | struct clock_event_device *evt = &__get_cpu_var(decrementers); | ||
486 | u64 now; | ||
487 | |||
488 | trace_timer_interrupt_entry(regs); | ||
489 | |||
490 | if (test_irq_work_pending()) { | ||
491 | clear_irq_work_pending(); | ||
492 | irq_work_run(); | ||
493 | } | ||
494 | |||
495 | now = get_tb_or_rtc(); | ||
496 | if (now >= *next_tb) { | ||
497 | *next_tb = ~(u64)0; | ||
498 | if (evt->event_handler) | ||
499 | evt->event_handler(evt); | ||
500 | __get_cpu_var(irq_stat).timer_irqs_event++; | ||
501 | } else { | ||
502 | now = *next_tb - now; | ||
503 | if (now <= DECREMENTER_MAX) | ||
504 | set_dec((int)now); | ||
505 | /* We may have raced with new irq work */ | ||
506 | if (test_irq_work_pending()) | ||
507 | set_dec(1); | ||
508 | __get_cpu_var(irq_stat).timer_irqs_others++; | ||
509 | } | ||
510 | |||
511 | #ifdef CONFIG_PPC64 | ||
512 | /* collect purr register values often, for accurate calculations */ | ||
513 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | ||
514 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | ||
515 | cu->current_tb = mfspr(SPRN_PURR); | ||
516 | } | ||
517 | #endif | ||
518 | |||
519 | trace_timer_interrupt_exit(regs); | ||
520 | } | ||
521 | |||
481 | /* | 522 | /* |
482 | * timer_interrupt - gets called when the decrementer overflows, | 523 | * timer_interrupt - gets called when the decrementer overflows, |
483 | * with interrupts disabled. | 524 | * with interrupts disabled. |
@@ -486,8 +527,6 @@ void timer_interrupt(struct pt_regs * regs) | |||
486 | { | 527 | { |
487 | struct pt_regs *old_regs; | 528 | struct pt_regs *old_regs; |
488 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 529 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
489 | struct clock_event_device *evt = &__get_cpu_var(decrementers); | ||
490 | u64 now; | ||
491 | 530 | ||
492 | /* Ensure a positive value is written to the decrementer, or else | 531 | /* Ensure a positive value is written to the decrementer, or else |
493 | * some CPUs will continue to take decrementer exceptions. | 532 | * some CPUs will continue to take decrementer exceptions. |
@@ -519,39 +558,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
519 | old_regs = set_irq_regs(regs); | 558 | old_regs = set_irq_regs(regs); |
520 | irq_enter(); | 559 | irq_enter(); |
521 | 560 | ||
522 | trace_timer_interrupt_entry(regs); | 561 | __timer_interrupt(); |
523 | |||
524 | if (test_irq_work_pending()) { | ||
525 | clear_irq_work_pending(); | ||
526 | irq_work_run(); | ||
527 | } | ||
528 | |||
529 | now = get_tb_or_rtc(); | ||
530 | if (now >= *next_tb) { | ||
531 | *next_tb = ~(u64)0; | ||
532 | if (evt->event_handler) | ||
533 | evt->event_handler(evt); | ||
534 | __get_cpu_var(irq_stat).timer_irqs_event++; | ||
535 | } else { | ||
536 | now = *next_tb - now; | ||
537 | if (now <= DECREMENTER_MAX) | ||
538 | set_dec((int)now); | ||
539 | /* We may have raced with new irq work */ | ||
540 | if (test_irq_work_pending()) | ||
541 | set_dec(1); | ||
542 | __get_cpu_var(irq_stat).timer_irqs_others++; | ||
543 | } | ||
544 | |||
545 | #ifdef CONFIG_PPC64 | ||
546 | /* collect purr register values often, for accurate calculations */ | ||
547 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | ||
548 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | ||
549 | cu->current_tb = mfspr(SPRN_PURR); | ||
550 | } | ||
551 | #endif | ||
552 | |||
553 | trace_timer_interrupt_exit(regs); | ||
554 | |||
555 | irq_exit(); | 562 | irq_exit(); |
556 | set_irq_regs(old_regs); | 563 | set_irq_regs(old_regs); |
557 | } | 564 | } |
@@ -828,6 +835,10 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
828 | /* Interrupt handler for the timer broadcast IPI */ | 835 | /* Interrupt handler for the timer broadcast IPI */ |
829 | void tick_broadcast_ipi_handler(void) | 836 | void tick_broadcast_ipi_handler(void) |
830 | { | 837 | { |
838 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | ||
839 | |||
840 | *next_tb = get_tb_or_rtc(); | ||
841 | __timer_interrupt(); | ||
831 | } | 842 | } |
832 | 843 | ||
833 | static void register_decrementer_clockevent(int cpu) | 844 | static void register_decrementer_clockevent(int cpu) |