diff options
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r-- | arch/powerpc/kernel/time.c | 91 |
1 files changed, 68 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a925a8eae121..5cd3db5cae41 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -116,9 +116,12 @@ static struct clock_event_device decrementer_clockevent = { | |||
116 | .features = CLOCK_EVT_FEAT_ONESHOT, | 116 | .features = CLOCK_EVT_FEAT_ONESHOT, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); | 119 | struct decrementer_clock { |
120 | void init_decrementer_clockevent(void); | 120 | struct clock_event_device event; |
121 | static DEFINE_PER_CPU(u64, decrementer_next_tb); | 121 | u64 next_tb; |
122 | }; | ||
123 | |||
124 | static DEFINE_PER_CPU(struct decrementer_clock, decrementers); | ||
122 | 125 | ||
123 | #ifdef CONFIG_PPC_ISERIES | 126 | #ifdef CONFIG_PPC_ISERIES |
124 | static unsigned long __initdata iSeries_recal_titan; | 127 | static unsigned long __initdata iSeries_recal_titan; |
@@ -216,7 +219,11 @@ static u64 read_purr(void) | |||
216 | */ | 219 | */ |
217 | static u64 read_spurr(u64 purr) | 220 | static u64 read_spurr(u64 purr) |
218 | { | 221 | { |
219 | if (cpu_has_feature(CPU_FTR_SPURR)) | 222 | /* |
223 | * cpus without PURR won't have a SPURR | ||
224 | * We already know the former when we use this, so tell gcc | ||
225 | */ | ||
226 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) | ||
220 | return mfspr(SPRN_SPURR); | 227 | return mfspr(SPRN_SPURR); |
221 | return purr; | 228 | return purr; |
222 | } | 229 | } |
@@ -227,29 +234,30 @@ static u64 read_spurr(u64 purr) | |||
227 | */ | 234 | */ |
228 | void account_system_vtime(struct task_struct *tsk) | 235 | void account_system_vtime(struct task_struct *tsk) |
229 | { | 236 | { |
230 | u64 now, nowscaled, delta, deltascaled; | 237 | u64 now, nowscaled, delta, deltascaled, sys_time; |
231 | unsigned long flags; | 238 | unsigned long flags; |
232 | 239 | ||
233 | local_irq_save(flags); | 240 | local_irq_save(flags); |
234 | now = read_purr(); | 241 | now = read_purr(); |
235 | delta = now - get_paca()->startpurr; | ||
236 | get_paca()->startpurr = now; | ||
237 | nowscaled = read_spurr(now); | 242 | nowscaled = read_spurr(now); |
243 | delta = now - get_paca()->startpurr; | ||
238 | deltascaled = nowscaled - get_paca()->startspurr; | 244 | deltascaled = nowscaled - get_paca()->startspurr; |
245 | get_paca()->startpurr = now; | ||
239 | get_paca()->startspurr = nowscaled; | 246 | get_paca()->startspurr = nowscaled; |
240 | if (!in_interrupt()) { | 247 | if (!in_interrupt()) { |
241 | /* deltascaled includes both user and system time. | 248 | /* deltascaled includes both user and system time. |
242 | * Hence scale it based on the purr ratio to estimate | 249 | * Hence scale it based on the purr ratio to estimate |
243 | * the system time */ | 250 | * the system time */ |
251 | sys_time = get_paca()->system_time; | ||
244 | if (get_paca()->user_time) | 252 | if (get_paca()->user_time) |
245 | deltascaled = deltascaled * get_paca()->system_time / | 253 | deltascaled = deltascaled * sys_time / |
246 | (get_paca()->system_time + get_paca()->user_time); | 254 | (sys_time + get_paca()->user_time); |
247 | delta += get_paca()->system_time; | 255 | delta += sys_time; |
248 | get_paca()->system_time = 0; | 256 | get_paca()->system_time = 0; |
249 | } | 257 | } |
250 | account_system_time(tsk, 0, delta); | 258 | account_system_time(tsk, 0, delta); |
251 | get_paca()->purrdelta = delta; | ||
252 | account_system_time_scaled(tsk, deltascaled); | 259 | account_system_time_scaled(tsk, deltascaled); |
260 | get_paca()->purrdelta = delta; | ||
253 | get_paca()->spurrdelta = deltascaled; | 261 | get_paca()->spurrdelta = deltascaled; |
254 | local_irq_restore(flags); | 262 | local_irq_restore(flags); |
255 | } | 263 | } |
@@ -326,11 +334,9 @@ void calculate_steal_time(void) | |||
326 | s64 stolen; | 334 | s64 stolen; |
327 | struct cpu_purr_data *pme; | 335 | struct cpu_purr_data *pme; |
328 | 336 | ||
329 | if (!cpu_has_feature(CPU_FTR_PURR)) | 337 | pme = &__get_cpu_var(cpu_purr_data); |
330 | return; | ||
331 | pme = &per_cpu(cpu_purr_data, smp_processor_id()); | ||
332 | if (!pme->initialized) | 338 | if (!pme->initialized) |
333 | return; /* this can happen in early boot */ | 339 | return; /* !CPU_FTR_PURR or early in early boot */ |
334 | tb = mftb(); | 340 | tb = mftb(); |
335 | purr = mfspr(SPRN_PURR); | 341 | purr = mfspr(SPRN_PURR); |
336 | stolen = (tb - pme->tb) - (purr - pme->purr); | 342 | stolen = (tb - pme->tb) - (purr - pme->purr); |
@@ -353,7 +359,7 @@ static void snapshot_purr(void) | |||
353 | if (!cpu_has_feature(CPU_FTR_PURR)) | 359 | if (!cpu_has_feature(CPU_FTR_PURR)) |
354 | return; | 360 | return; |
355 | local_irq_save(flags); | 361 | local_irq_save(flags); |
356 | pme = &per_cpu(cpu_purr_data, smp_processor_id()); | 362 | pme = &__get_cpu_var(cpu_purr_data); |
357 | pme->tb = mftb(); | 363 | pme->tb = mftb(); |
358 | pme->purr = mfspr(SPRN_PURR); | 364 | pme->purr = mfspr(SPRN_PURR); |
359 | pme->initialized = 1; | 365 | pme->initialized = 1; |
@@ -556,8 +562,8 @@ void __init iSeries_time_init_early(void) | |||
556 | void timer_interrupt(struct pt_regs * regs) | 562 | void timer_interrupt(struct pt_regs * regs) |
557 | { | 563 | { |
558 | struct pt_regs *old_regs; | 564 | struct pt_regs *old_regs; |
559 | int cpu = smp_processor_id(); | 565 | struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); |
560 | struct clock_event_device *evt = &per_cpu(decrementers, cpu); | 566 | struct clock_event_device *evt = &decrementer->event; |
561 | u64 now; | 567 | u64 now; |
562 | 568 | ||
563 | /* Ensure a positive value is written to the decrementer, or else | 569 | /* Ensure a positive value is written to the decrementer, or else |
@@ -570,9 +576,9 @@ void timer_interrupt(struct pt_regs * regs) | |||
570 | #endif | 576 | #endif |
571 | 577 | ||
572 | now = get_tb_or_rtc(); | 578 | now = get_tb_or_rtc(); |
573 | if (now < per_cpu(decrementer_next_tb, cpu)) { | 579 | if (now < decrementer->next_tb) { |
574 | /* not time for this event yet */ | 580 | /* not time for this event yet */ |
575 | now = per_cpu(decrementer_next_tb, cpu) - now; | 581 | now = decrementer->next_tb - now; |
576 | if (now <= DECREMENTER_MAX) | 582 | if (now <= DECREMENTER_MAX) |
577 | set_dec((int)now); | 583 | set_dec((int)now); |
578 | return; | 584 | return; |
@@ -623,6 +629,45 @@ void wakeup_decrementer(void) | |||
623 | set_dec(ticks); | 629 | set_dec(ticks); |
624 | } | 630 | } |
625 | 631 | ||
632 | #ifdef CONFIG_SUSPEND | ||
633 | void generic_suspend_disable_irqs(void) | ||
634 | { | ||
635 | preempt_disable(); | ||
636 | |||
637 | /* Disable the decrementer, so that it doesn't interfere | ||
638 | * with suspending. | ||
639 | */ | ||
640 | |||
641 | set_dec(0x7fffffff); | ||
642 | local_irq_disable(); | ||
643 | set_dec(0x7fffffff); | ||
644 | } | ||
645 | |||
646 | void generic_suspend_enable_irqs(void) | ||
647 | { | ||
648 | wakeup_decrementer(); | ||
649 | |||
650 | local_irq_enable(); | ||
651 | preempt_enable(); | ||
652 | } | ||
653 | |||
654 | /* Overrides the weak version in kernel/power/main.c */ | ||
655 | void arch_suspend_disable_irqs(void) | ||
656 | { | ||
657 | if (ppc_md.suspend_disable_irqs) | ||
658 | ppc_md.suspend_disable_irqs(); | ||
659 | generic_suspend_disable_irqs(); | ||
660 | } | ||
661 | |||
662 | /* Overrides the weak version in kernel/power/main.c */ | ||
663 | void arch_suspend_enable_irqs(void) | ||
664 | { | ||
665 | generic_suspend_enable_irqs(); | ||
666 | if (ppc_md.suspend_enable_irqs) | ||
667 | ppc_md.suspend_enable_irqs(); | ||
668 | } | ||
669 | #endif | ||
670 | |||
626 | #ifdef CONFIG_SMP | 671 | #ifdef CONFIG_SMP |
627 | void __init smp_space_timers(unsigned int max_cpus) | 672 | void __init smp_space_timers(unsigned int max_cpus) |
628 | { | 673 | { |
@@ -811,7 +856,7 @@ void __init clocksource_init(void) | |||
811 | static int decrementer_set_next_event(unsigned long evt, | 856 | static int decrementer_set_next_event(unsigned long evt, |
812 | struct clock_event_device *dev) | 857 | struct clock_event_device *dev) |
813 | { | 858 | { |
814 | __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt; | 859 | __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; |
815 | set_dec(evt); | 860 | set_dec(evt); |
816 | return 0; | 861 | return 0; |
817 | } | 862 | } |
@@ -825,7 +870,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
825 | 870 | ||
826 | static void register_decrementer_clockevent(int cpu) | 871 | static void register_decrementer_clockevent(int cpu) |
827 | { | 872 | { |
828 | struct clock_event_device *dec = &per_cpu(decrementers, cpu); | 873 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; |
829 | 874 | ||
830 | *dec = decrementer_clockevent; | 875 | *dec = decrementer_clockevent; |
831 | dec->cpumask = cpumask_of_cpu(cpu); | 876 | dec->cpumask = cpumask_of_cpu(cpu); |
@@ -836,7 +881,7 @@ static void register_decrementer_clockevent(int cpu) | |||
836 | clockevents_register_device(dec); | 881 | clockevents_register_device(dec); |
837 | } | 882 | } |
838 | 883 | ||
839 | void init_decrementer_clockevent(void) | 884 | static void __init init_decrementer_clockevent(void) |
840 | { | 885 | { |
841 | int cpu = smp_processor_id(); | 886 | int cpu = smp_processor_id(); |
842 | 887 | ||