diff options
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r-- | arch/powerpc/kernel/time.c | 93 |
1 files changed, 74 insertions, 19 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a136a11c490d..0441bbdadbd1 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/perf_event.h> |
57 | #include <asm/trace.h> | ||
57 | 58 | ||
58 | #include <asm/io.h> | 59 | #include <asm/io.h> |
59 | #include <asm/processor.h> | 60 | #include <asm/processor.h> |
@@ -264,10 +265,11 @@ void account_system_vtime(struct task_struct *tsk) | |||
264 | account_system_time(tsk, 0, delta, deltascaled); | 265 | account_system_time(tsk, 0, delta, deltascaled); |
265 | else | 266 | else |
266 | account_idle_time(delta); | 267 | account_idle_time(delta); |
267 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; | 268 | __get_cpu_var(cputime_last_delta) = delta; |
268 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; | 269 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; |
269 | local_irq_restore(flags); | 270 | local_irq_restore(flags); |
270 | } | 271 | } |
272 | EXPORT_SYMBOL_GPL(account_system_vtime); | ||
271 | 273 | ||
272 | /* | 274 | /* |
273 | * Transfer the user and system times accumulated in the paca | 275 | * Transfer the user and system times accumulated in the paca |
@@ -530,25 +532,60 @@ void __init iSeries_time_init_early(void) | |||
530 | } | 532 | } |
531 | #endif /* CONFIG_PPC_ISERIES */ | 533 | #endif /* CONFIG_PPC_ISERIES */ |
532 | 534 | ||
533 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) | 535 | #ifdef CONFIG_PERF_EVENTS |
534 | DEFINE_PER_CPU(u8, perf_event_pending); | ||
535 | 536 | ||
536 | void set_perf_event_pending(void) | 537 | /* |
538 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... | ||
539 | */ | ||
540 | #ifdef CONFIG_PPC64 | ||
541 | static inline unsigned long test_perf_event_pending(void) | ||
537 | { | 542 | { |
538 | get_cpu_var(perf_event_pending) = 1; | 543 | unsigned long x; |
539 | set_dec(1); | 544 | |
540 | put_cpu_var(perf_event_pending); | 545 | asm volatile("lbz %0,%1(13)" |
546 | : "=r" (x) | ||
547 | : "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
548 | return x; | ||
549 | } | ||
550 | |||
551 | static inline void set_perf_event_pending_flag(void) | ||
552 | { | ||
553 | asm volatile("stb %0,%1(13)" : : | ||
554 | "r" (1), | ||
555 | "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
541 | } | 556 | } |
542 | 557 | ||
558 | static inline void clear_perf_event_pending(void) | ||
559 | { | ||
560 | asm volatile("stb %0,%1(13)" : : | ||
561 | "r" (0), | ||
562 | "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
563 | } | ||
564 | |||
565 | #else /* 32-bit */ | ||
566 | |||
567 | DEFINE_PER_CPU(u8, perf_event_pending); | ||
568 | |||
569 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | ||
543 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 570 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) |
544 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 571 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 |
545 | 572 | ||
546 | #else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ | 573 | #endif /* 32 vs 64 bit */ |
574 | |||
575 | void set_perf_event_pending(void) | ||
576 | { | ||
577 | preempt_disable(); | ||
578 | set_perf_event_pending_flag(); | ||
579 | set_dec(1); | ||
580 | preempt_enable(); | ||
581 | } | ||
582 | |||
583 | #else /* CONFIG_PERF_EVENTS */ | ||
547 | 584 | ||
548 | #define test_perf_event_pending() 0 | 585 | #define test_perf_event_pending() 0 |
549 | #define clear_perf_event_pending() | 586 | #define clear_perf_event_pending() |
550 | 587 | ||
551 | #endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ | 588 | #endif /* CONFIG_PERF_EVENTS */ |
552 | 589 | ||
553 | /* | 590 | /* |
554 | * For iSeries shared processors, we have to let the hypervisor | 591 | * For iSeries shared processors, we have to let the hypervisor |
@@ -571,15 +608,15 @@ void timer_interrupt(struct pt_regs * regs) | |||
571 | struct clock_event_device *evt = &decrementer->event; | 608 | struct clock_event_device *evt = &decrementer->event; |
572 | u64 now; | 609 | u64 now; |
573 | 610 | ||
611 | trace_timer_interrupt_entry(regs); | ||
612 | |||
613 | __get_cpu_var(irq_stat).timer_irqs++; | ||
614 | |||
574 | /* Ensure a positive value is written to the decrementer, or else | 615 | /* Ensure a positive value is written to the decrementer, or else |
575 | * some CPUs will continuue to take decrementer exceptions */ | 616 | * some CPUs will continuue to take decrementer exceptions */ |
576 | set_dec(DECREMENTER_MAX); | 617 | set_dec(DECREMENTER_MAX); |
577 | 618 | ||
578 | #ifdef CONFIG_PPC32 | 619 | #ifdef CONFIG_PPC32 |
579 | if (test_perf_event_pending()) { | ||
580 | clear_perf_event_pending(); | ||
581 | perf_event_do_pending(); | ||
582 | } | ||
583 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 620 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
584 | do_IRQ(regs); | 621 | do_IRQ(regs); |
585 | #endif | 622 | #endif |
@@ -590,6 +627,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
590 | now = decrementer->next_tb - now; | 627 | now = decrementer->next_tb - now; |
591 | if (now <= DECREMENTER_MAX) | 628 | if (now <= DECREMENTER_MAX) |
592 | set_dec((int)now); | 629 | set_dec((int)now); |
630 | trace_timer_interrupt_exit(regs); | ||
593 | return; | 631 | return; |
594 | } | 632 | } |
595 | old_regs = set_irq_regs(regs); | 633 | old_regs = set_irq_regs(regs); |
@@ -597,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs) | |||
597 | 635 | ||
598 | calculate_steal_time(); | 636 | calculate_steal_time(); |
599 | 637 | ||
638 | if (test_perf_event_pending()) { | ||
639 | clear_perf_event_pending(); | ||
640 | perf_event_do_pending(); | ||
641 | } | ||
642 | |||
600 | #ifdef CONFIG_PPC_ISERIES | 643 | #ifdef CONFIG_PPC_ISERIES |
601 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 644 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
602 | get_lppaca()->int_dword.fields.decr_int = 0; | 645 | get_lppaca()->int_dword.fields.decr_int = 0; |
@@ -620,6 +663,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
620 | 663 | ||
621 | irq_exit(); | 664 | irq_exit(); |
622 | set_irq_regs(old_regs); | 665 | set_irq_regs(old_regs); |
666 | |||
667 | trace_timer_interrupt_exit(regs); | ||
623 | } | 668 | } |
624 | 669 | ||
625 | void wakeup_decrementer(void) | 670 | void wakeup_decrementer(void) |
@@ -828,7 +873,8 @@ static cycle_t timebase_read(struct clocksource *cs) | |||
828 | return (cycle_t)get_tb(); | 873 | return (cycle_t)get_tb(); |
829 | } | 874 | } |
830 | 875 | ||
831 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | 876 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, |
877 | u32 mult) | ||
832 | { | 878 | { |
833 | u64 t2x, stamp_xsec; | 879 | u64 t2x, stamp_xsec; |
834 | 880 | ||
@@ -841,7 +887,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
841 | 887 | ||
842 | /* XXX this assumes clock->shift == 22 */ | 888 | /* XXX this assumes clock->shift == 22 */ |
843 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ | 889 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ |
844 | t2x = (u64) clock->mult * 4611686018ULL; | 890 | t2x = (u64) mult * 4611686018ULL; |
845 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; | 891 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; |
846 | do_div(stamp_xsec, 1000000000); | 892 | do_div(stamp_xsec, 1000000000); |
847 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; | 893 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; |
@@ -895,12 +941,21 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
895 | decrementer_set_next_event(DECREMENTER_MAX, dev); | 941 | decrementer_set_next_event(DECREMENTER_MAX, dev); |
896 | } | 942 | } |
897 | 943 | ||
944 | static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec, | ||
945 | int shift) | ||
946 | { | ||
947 | uint64_t tmp = ((uint64_t)ticks) << shift; | ||
948 | |||
949 | do_div(tmp, nsec); | ||
950 | return tmp; | ||
951 | } | ||
952 | |||
898 | static void __init setup_clockevent_multiplier(unsigned long hz) | 953 | static void __init setup_clockevent_multiplier(unsigned long hz) |
899 | { | 954 | { |
900 | u64 mult, shift = 32; | 955 | u64 mult, shift = 32; |
901 | 956 | ||
902 | while (1) { | 957 | while (1) { |
903 | mult = div_sc(hz, NSEC_PER_SEC, shift); | 958 | mult = div_sc64(hz, NSEC_PER_SEC, shift); |
904 | if (mult && (mult >> 32UL) == 0UL) | 959 | if (mult && (mult >> 32UL) == 0UL) |
905 | break; | 960 | break; |
906 | 961 | ||
@@ -918,8 +973,8 @@ static void register_decrementer_clockevent(int cpu) | |||
918 | *dec = decrementer_clockevent; | 973 | *dec = decrementer_clockevent; |
919 | dec->cpumask = cpumask_of(cpu); | 974 | dec->cpumask = cpumask_of(cpu); |
920 | 975 | ||
921 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", | 976 | printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", |
922 | dec->name, dec->mult, dec->shift, cpu); | 977 | dec->name, dec->mult, dec->shift, cpu); |
923 | 978 | ||
924 | clockevents_register_device(dec); | 979 | clockevents_register_device(dec); |
925 | } | 980 | } |