aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Breeds <tony@bakeyournoodle.com>2007-09-20 23:26:03 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-03 01:44:34 -0400
commitd831d0b83f205888f4be4dee0a074ad67ef809b3 (patch)
tree74a62c45f88d8c060c487777d27cb55768009113
parentab3e975e7c8b5efb452bdb0d06c1cb7399f83979 (diff)
[POWERPC] Implement clockevents driver for powerpc
This registers a clock event structure for the decrementer and turns on CONFIG_GENERIC_CLOCKEVENTS, which means that we now don't need most of timer_interrupt(), since the work is done in generic code. For secondary CPUs, their decrementer clockevent is registered when the CPU comes up (the generic code automatically removes the clockevent when the CPU goes down). Signed-off-by: Tony Breeds <tony@bakeyournoodle.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/time.c131
-rw-r--r--include/asm-powerpc/time.h1
4 files changed, 94 insertions, 43 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6819a94f2ca..a46f8116f47 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -35,6 +35,9 @@ config GENERIC_TIME
35config GENERIC_TIME_VSYSCALL 35config GENERIC_TIME_VSYSCALL
36 def_bool y 36 def_bool y
37 37
38config GENERIC_CLOCKEVENTS
39 def_bool y
40
38config GENERIC_HARDIRQS 41config GENERIC_HARDIRQS
39 bool 42 bool
40 default y 43 default y
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index b24dcbaeeca..d30f08fa029 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -569,6 +569,8 @@ int __devinit start_secondary(void *unused)
569 if (system_state > SYSTEM_BOOTING) 569 if (system_state > SYSTEM_BOOTING)
570 snapshot_timebase(); 570 snapshot_timebase();
571 571
572 secondary_cpu_time_init();
573
572 spin_lock(&call_lock); 574 spin_lock(&call_lock);
573 cpu_set(cpu, cpu_online_map); 575 cpu_set(cpu, cpu_online_map);
574 spin_unlock(&call_lock); 576 spin_unlock(&call_lock);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e71a0d8c597..d20947cf173 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -73,6 +73,7 @@
73 73
74/* powerpc clocksource/clockevent code */ 74/* powerpc clocksource/clockevent code */
75 75
76#include <linux/clockchips.h>
76#include <linux/clocksource.h> 77#include <linux/clocksource.h>
77 78
78static cycle_t rtc_read(void); 79static cycle_t rtc_read(void);
@@ -97,6 +98,27 @@ static struct clocksource clocksource_timebase = {
97 .read = timebase_read, 98 .read = timebase_read,
98}; 99};
99 100
101#define DECREMENTER_MAX 0x7fffffff
102
103static int decrementer_set_next_event(unsigned long evt,
104 struct clock_event_device *dev);
105static void decrementer_set_mode(enum clock_event_mode mode,
106 struct clock_event_device *dev);
107
108static struct clock_event_device decrementer_clockevent = {
109 .name = "decrementer",
110 .rating = 200,
111 .shift = 32,
112 .mult = 0, /* To be filled in */
113 .irq = 0,
114 .set_next_event = decrementer_set_next_event,
115 .set_mode = decrementer_set_mode,
116 .features = CLOCK_EVT_FEAT_ONESHOT,
117};
118
119static DEFINE_PER_CPU(struct clock_event_device, decrementers);
120void init_decrementer_clockevent(void);
121
100#ifdef CONFIG_PPC_ISERIES 122#ifdef CONFIG_PPC_ISERIES
101static unsigned long __initdata iSeries_recal_titan; 123static unsigned long __initdata iSeries_recal_titan;
102static signed long __initdata iSeries_recal_tb; 124static signed long __initdata iSeries_recal_tb;
@@ -517,10 +539,12 @@ void __init iSeries_time_init_early(void)
517void timer_interrupt(struct pt_regs * regs) 539void timer_interrupt(struct pt_regs * regs)
518{ 540{
519 struct pt_regs *old_regs; 541 struct pt_regs *old_regs;
520 int next_dec;
521 int cpu = smp_processor_id(); 542 int cpu = smp_processor_id();
522 unsigned long ticks; 543 struct clock_event_device *evt = &per_cpu(decrementers, cpu);
523 u64 tb_next_jiffy; 544
545 /* Ensure a positive value is written to the decrementer, or else
546 * some CPUs will continuue to take decrementer exceptions */
547 set_dec(DECREMENTER_MAX);
524 548
525#ifdef CONFIG_PPC32 549#ifdef CONFIG_PPC32
526 if (atomic_read(&ppc_n_lost_interrupts) != 0) 550 if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -530,7 +554,6 @@ void timer_interrupt(struct pt_regs * regs)
530 old_regs = set_irq_regs(regs); 554 old_regs = set_irq_regs(regs);
531 irq_enter(); 555 irq_enter();
532 556
533 profile_tick(CPU_PROFILING);
534 calculate_steal_time(); 557 calculate_steal_time();
535 558
536#ifdef CONFIG_PPC_ISERIES 559#ifdef CONFIG_PPC_ISERIES
@@ -538,44 +561,20 @@ void timer_interrupt(struct pt_regs * regs)
538 get_lppaca()->int_dword.fields.decr_int = 0; 561 get_lppaca()->int_dword.fields.decr_int = 0;
539#endif 562#endif
540 563
541 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 564 /*
542 >= tb_ticks_per_jiffy) { 565 * We cannot disable the decrementer, so in the period
543 /* Update last_jiffy */ 566 * between this cpu's being marked offline in cpu_online_map
544 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; 567 * and calling stop-self, it is taking timer interrupts.
545 /* Handle RTCL overflow on 601 */ 568 * Avoid calling into the scheduler rebalancing code if this
546 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) 569 * is the case.
547 per_cpu(last_jiffy, cpu) -= 1000000000; 570 */
548 571 if (!cpu_is_offline(cpu))
549 /* 572 account_process_time(regs);
550 * We cannot disable the decrementer, so in the period
551 * between this cpu's being marked offline in cpu_online_map
552 * and calling stop-self, it is taking timer interrupts.
553 * Avoid calling into the scheduler rebalancing code if this
554 * is the case.
555 */
556 if (!cpu_is_offline(cpu))
557 account_process_time(regs);
558
559 /*
560 * No need to check whether cpu is offline here; boot_cpuid
561 * should have been fixed up by now.
562 */
563 if (cpu != boot_cpuid)
564 continue;
565 573
566 write_seqlock(&xtime_lock); 574 if (evt->event_handler)
567 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; 575 evt->event_handler(evt);
568 if (__USE_RTC() && tb_next_jiffy >= 1000000000) 576 else
569 tb_next_jiffy -= 1000000000; 577 evt->set_next_event(DECREMENTER_MAX, evt);
570 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
571 tb_last_jiffy = tb_next_jiffy;
572 do_timer(1);
573 }
574 write_sequnlock(&xtime_lock);
575 }
576
577 next_dec = tb_ticks_per_jiffy - ticks;
578 set_dec(next_dec);
579 578
580#ifdef CONFIG_PPC_ISERIES 579#ifdef CONFIG_PPC_ISERIES
581 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 580 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
@@ -795,6 +794,53 @@ void __init clocksource_init(void)
795 clock->name, clock->mult, clock->shift); 794 clock->name, clock->mult, clock->shift);
796} 795}
797 796
797static int decrementer_set_next_event(unsigned long evt,
798 struct clock_event_device *dev)
799{
800 set_dec(evt);
801 return 0;
802}
803
804static void decrementer_set_mode(enum clock_event_mode mode,
805 struct clock_event_device *dev)
806{
807 if (mode != CLOCK_EVT_MODE_ONESHOT)
808 decrementer_set_next_event(DECREMENTER_MAX, dev);
809}
810
811static void register_decrementer_clockevent(int cpu)
812{
813 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
814
815 *dec = decrementer_clockevent;
816 dec->cpumask = cpumask_of_cpu(cpu);
817
818 printk(KERN_ERR "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
819 dec->name, dec->mult, dec->shift, cpu);
820
821 clockevents_register_device(dec);
822}
823
824void init_decrementer_clockevent(void)
825{
826 int cpu = smp_processor_id();
827
828 decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
829 decrementer_clockevent.shift);
830 decrementer_clockevent.max_delta_ns =
831 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
832 decrementer_clockevent.min_delta_ns = 1000;
833
834 register_decrementer_clockevent(cpu);
835}
836
837void secondary_cpu_time_init(void)
838{
839 /* FIME: Should make unrelatred change to move snapshot_timebase
840 * call here ! */
841 register_decrementer_clockevent(smp_processor_id());
842}
843
798/* This function is only called on the boot processor */ 844/* This function is only called on the boot processor */
799void __init time_init(void) 845void __init time_init(void)
800{ 846{
@@ -908,8 +954,7 @@ void __init time_init(void)
908 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 954 if (!firmware_has_feature(FW_FEATURE_ISERIES))
909 clocksource_init(); 955 clocksource_init();
910 956
911 /* Not exact, but the timer interrupt takes care of this */ 957 init_decrementer_clockevent();
912 set_dec(tb_ticks_per_jiffy);
913} 958}
914 959
915 960
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index fa331dad97c..f05895522f7 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -245,6 +245,7 @@ extern void snapshot_timebases(void);
245#define snapshot_timebases() do { } while (0) 245#define snapshot_timebases() do { } while (0)
246#endif 246#endif
247 247
248extern void secondary_cpu_time_init(void);
248extern void iSeries_time_init_early(void); 249extern void iSeries_time_init_early(void);
249 250
250#endif /* __KERNEL__ */ 251#endif /* __KERNEL__ */