diff options
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r-- | arch/powerpc/kernel/time.c | 131 |
1 files changed, 88 insertions, 43 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e71a0d8c597a..d20947cf1735 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -73,6 +73,7 @@ | |||
73 | 73 | ||
74 | /* powerpc clocksource/clockevent code */ | 74 | /* powerpc clocksource/clockevent code */ |
75 | 75 | ||
76 | #include <linux/clockchips.h> | ||
76 | #include <linux/clocksource.h> | 77 | #include <linux/clocksource.h> |
77 | 78 | ||
78 | static cycle_t rtc_read(void); | 79 | static cycle_t rtc_read(void); |
@@ -97,6 +98,27 @@ static struct clocksource clocksource_timebase = { | |||
97 | .read = timebase_read, | 98 | .read = timebase_read, |
98 | }; | 99 | }; |
99 | 100 | ||
101 | #define DECREMENTER_MAX 0x7fffffff | ||
102 | |||
103 | static int decrementer_set_next_event(unsigned long evt, | ||
104 | struct clock_event_device *dev); | ||
105 | static void decrementer_set_mode(enum clock_event_mode mode, | ||
106 | struct clock_event_device *dev); | ||
107 | |||
108 | static struct clock_event_device decrementer_clockevent = { | ||
109 | .name = "decrementer", | ||
110 | .rating = 200, | ||
111 | .shift = 32, | ||
112 | .mult = 0, /* To be filled in */ | ||
113 | .irq = 0, | ||
114 | .set_next_event = decrementer_set_next_event, | ||
115 | .set_mode = decrementer_set_mode, | ||
116 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
117 | }; | ||
118 | |||
119 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); | ||
120 | void init_decrementer_clockevent(void); | ||
121 | |||
100 | #ifdef CONFIG_PPC_ISERIES | 122 | #ifdef CONFIG_PPC_ISERIES |
101 | static unsigned long __initdata iSeries_recal_titan; | 123 | static unsigned long __initdata iSeries_recal_titan; |
102 | static signed long __initdata iSeries_recal_tb; | 124 | static signed long __initdata iSeries_recal_tb; |
@@ -517,10 +539,12 @@ void __init iSeries_time_init_early(void) | |||
517 | void timer_interrupt(struct pt_regs * regs) | 539 | void timer_interrupt(struct pt_regs * regs) |
518 | { | 540 | { |
519 | struct pt_regs *old_regs; | 541 | struct pt_regs *old_regs; |
520 | int next_dec; | ||
521 | int cpu = smp_processor_id(); | 542 | int cpu = smp_processor_id(); |
522 | unsigned long ticks; | 543 | struct clock_event_device *evt = &per_cpu(decrementers, cpu); |
523 | u64 tb_next_jiffy; | 544 | |
545 | /* Ensure a positive value is written to the decrementer, or else | ||
546 | * some CPUs will continuue to take decrementer exceptions */ | ||
547 | set_dec(DECREMENTER_MAX); | ||
524 | 548 | ||
525 | #ifdef CONFIG_PPC32 | 549 | #ifdef CONFIG_PPC32 |
526 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 550 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
@@ -530,7 +554,6 @@ void timer_interrupt(struct pt_regs * regs) | |||
530 | old_regs = set_irq_regs(regs); | 554 | old_regs = set_irq_regs(regs); |
531 | irq_enter(); | 555 | irq_enter(); |
532 | 556 | ||
533 | profile_tick(CPU_PROFILING); | ||
534 | calculate_steal_time(); | 557 | calculate_steal_time(); |
535 | 558 | ||
536 | #ifdef CONFIG_PPC_ISERIES | 559 | #ifdef CONFIG_PPC_ISERIES |
@@ -538,44 +561,20 @@ void timer_interrupt(struct pt_regs * regs) | |||
538 | get_lppaca()->int_dword.fields.decr_int = 0; | 561 | get_lppaca()->int_dword.fields.decr_int = 0; |
539 | #endif | 562 | #endif |
540 | 563 | ||
541 | while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) | 564 | /* |
542 | >= tb_ticks_per_jiffy) { | 565 | * We cannot disable the decrementer, so in the period |
543 | /* Update last_jiffy */ | 566 | * between this cpu's being marked offline in cpu_online_map |
544 | per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; | 567 | * and calling stop-self, it is taking timer interrupts. |
545 | /* Handle RTCL overflow on 601 */ | 568 | * Avoid calling into the scheduler rebalancing code if this |
546 | if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) | 569 | * is the case. |
547 | per_cpu(last_jiffy, cpu) -= 1000000000; | 570 | */ |
548 | 571 | if (!cpu_is_offline(cpu)) | |
549 | /* | 572 | account_process_time(regs); |
550 | * We cannot disable the decrementer, so in the period | ||
551 | * between this cpu's being marked offline in cpu_online_map | ||
552 | * and calling stop-self, it is taking timer interrupts. | ||
553 | * Avoid calling into the scheduler rebalancing code if this | ||
554 | * is the case. | ||
555 | */ | ||
556 | if (!cpu_is_offline(cpu)) | ||
557 | account_process_time(regs); | ||
558 | |||
559 | /* | ||
560 | * No need to check whether cpu is offline here; boot_cpuid | ||
561 | * should have been fixed up by now. | ||
562 | */ | ||
563 | if (cpu != boot_cpuid) | ||
564 | continue; | ||
565 | 573 | ||
566 | write_seqlock(&xtime_lock); | 574 | if (evt->event_handler) |
567 | tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; | 575 | evt->event_handler(evt); |
568 | if (__USE_RTC() && tb_next_jiffy >= 1000000000) | 576 | else |
569 | tb_next_jiffy -= 1000000000; | 577 | evt->set_next_event(DECREMENTER_MAX, evt); |
570 | if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { | ||
571 | tb_last_jiffy = tb_next_jiffy; | ||
572 | do_timer(1); | ||
573 | } | ||
574 | write_sequnlock(&xtime_lock); | ||
575 | } | ||
576 | |||
577 | next_dec = tb_ticks_per_jiffy - ticks; | ||
578 | set_dec(next_dec); | ||
579 | 578 | ||
580 | #ifdef CONFIG_PPC_ISERIES | 579 | #ifdef CONFIG_PPC_ISERIES |
581 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) | 580 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) |
@@ -795,6 +794,53 @@ void __init clocksource_init(void) | |||
795 | clock->name, clock->mult, clock->shift); | 794 | clock->name, clock->mult, clock->shift); |
796 | } | 795 | } |
797 | 796 | ||
797 | static int decrementer_set_next_event(unsigned long evt, | ||
798 | struct clock_event_device *dev) | ||
799 | { | ||
800 | set_dec(evt); | ||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | static void decrementer_set_mode(enum clock_event_mode mode, | ||
805 | struct clock_event_device *dev) | ||
806 | { | ||
807 | if (mode != CLOCK_EVT_MODE_ONESHOT) | ||
808 | decrementer_set_next_event(DECREMENTER_MAX, dev); | ||
809 | } | ||
810 | |||
811 | static void register_decrementer_clockevent(int cpu) | ||
812 | { | ||
813 | struct clock_event_device *dec = &per_cpu(decrementers, cpu); | ||
814 | |||
815 | *dec = decrementer_clockevent; | ||
816 | dec->cpumask = cpumask_of_cpu(cpu); | ||
817 | |||
818 | printk(KERN_ERR "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", | ||
819 | dec->name, dec->mult, dec->shift, cpu); | ||
820 | |||
821 | clockevents_register_device(dec); | ||
822 | } | ||
823 | |||
824 | void init_decrementer_clockevent(void) | ||
825 | { | ||
826 | int cpu = smp_processor_id(); | ||
827 | |||
828 | decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC, | ||
829 | decrementer_clockevent.shift); | ||
830 | decrementer_clockevent.max_delta_ns = | ||
831 | clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); | ||
832 | decrementer_clockevent.min_delta_ns = 1000; | ||
833 | |||
834 | register_decrementer_clockevent(cpu); | ||
835 | } | ||
836 | |||
837 | void secondary_cpu_time_init(void) | ||
838 | { | ||
839 | /* FIME: Should make unrelatred change to move snapshot_timebase | ||
840 | * call here ! */ | ||
841 | register_decrementer_clockevent(smp_processor_id()); | ||
842 | } | ||
843 | |||
798 | /* This function is only called on the boot processor */ | 844 | /* This function is only called on the boot processor */ |
799 | void __init time_init(void) | 845 | void __init time_init(void) |
800 | { | 846 | { |
@@ -908,8 +954,7 @@ void __init time_init(void) | |||
908 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 954 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
909 | clocksource_init(); | 955 | clocksource_init(); |
910 | 956 | ||
911 | /* Not exact, but the timer interrupt takes care of this */ | 957 | init_decrementer_clockevent(); |
912 | set_dec(tb_ticks_per_jiffy); | ||
913 | } | 958 | } |
914 | 959 | ||
915 | 960 | ||