diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/local64.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/perf_event.h | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc.S | 26 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 41 |
4 files changed, 34 insertions, 46 deletions
diff --git a/arch/powerpc/include/asm/local64.h b/arch/powerpc/include/asm/local64.h new file mode 100644 index 000000000000..36c93b5cc239 --- /dev/null +++ b/arch/powerpc/include/asm/local64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local64.h> | |||
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index e6d4ce69b126..5c16b891d501 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h | |||
@@ -21,3 +21,15 @@ | |||
21 | #ifdef CONFIG_FSL_EMB_PERF_EVENT | 21 | #ifdef CONFIG_FSL_EMB_PERF_EVENT |
22 | #include <asm/perf_event_fsl_emb.h> | 22 | #include <asm/perf_event_fsl_emb.h> |
23 | #endif | 23 | #endif |
24 | |||
25 | #ifdef CONFIG_PERF_EVENTS | ||
26 | #include <asm/ptrace.h> | ||
27 | #include <asm/reg.h> | ||
28 | |||
29 | #define perf_arch_fetch_caller_regs(regs, __ip) \ | ||
30 | do { \ | ||
31 | (regs)->nip = __ip; \ | ||
32 | (regs)->gpr[1] = *(unsigned long *)__get_SP(); \ | ||
33 | asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ | ||
34 | } while (0) | ||
35 | #endif | ||
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 22e507c8a556..2d29752cbe16 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -127,29 +127,3 @@ _GLOBAL(__setup_cpu_power7) | |||
127 | _GLOBAL(__restore_cpu_power7) | 127 | _GLOBAL(__restore_cpu_power7) |
128 | /* place holder */ | 128 | /* place holder */ |
129 | blr | 129 | blr |
130 | |||
131 | /* | ||
132 | * Get a minimal set of registers for our caller's nth caller. | ||
133 | * r3 = regs pointer, r5 = n. | ||
134 | * | ||
135 | * We only get R1 (stack pointer), NIP (next instruction pointer) | ||
136 | * and LR (link register). These are all we can get in the | ||
137 | * general case without doing complicated stack unwinding, but | ||
138 | * fortunately they are enough to do a stack backtrace, which | ||
139 | * is all we need them for. | ||
140 | */ | ||
141 | _GLOBAL(perf_arch_fetch_caller_regs) | ||
142 | mr r6,r1 | ||
143 | cmpwi r5,0 | ||
144 | mflr r4 | ||
145 | ble 2f | ||
146 | mtctr r5 | ||
147 | 1: PPC_LL r6,0(r6) | ||
148 | bdnz 1b | ||
149 | PPC_LL r4,PPC_LR_STKOFF(r6) | ||
150 | 2: PPC_LL r7,0(r6) | ||
151 | PPC_LL r7,PPC_LR_STKOFF(r7) | ||
152 | PPC_STL r6,GPR1-STACK_FRAME_OVERHEAD(r3) | ||
153 | PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3) | ||
154 | PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3) | ||
155 | blr | ||
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5c14ffe51258..d301a30445e0 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -410,15 +410,15 @@ static void power_pmu_read(struct perf_event *event) | |||
410 | * Therefore we treat them like NMIs. | 410 | * Therefore we treat them like NMIs. |
411 | */ | 411 | */ |
412 | do { | 412 | do { |
413 | prev = atomic64_read(&event->hw.prev_count); | 413 | prev = local64_read(&event->hw.prev_count); |
414 | barrier(); | 414 | barrier(); |
415 | val = read_pmc(event->hw.idx); | 415 | val = read_pmc(event->hw.idx); |
416 | } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 416 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
417 | 417 | ||
418 | /* The counters are only 32 bits wide */ | 418 | /* The counters are only 32 bits wide */ |
419 | delta = (val - prev) & 0xfffffffful; | 419 | delta = (val - prev) & 0xfffffffful; |
420 | atomic64_add(delta, &event->count); | 420 | local64_add(delta, &event->count); |
421 | atomic64_sub(delta, &event->hw.period_left); | 421 | local64_sub(delta, &event->hw.period_left); |
422 | } | 422 | } |
423 | 423 | ||
424 | /* | 424 | /* |
@@ -444,10 +444,10 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | |||
444 | if (!event->hw.idx) | 444 | if (!event->hw.idx) |
445 | continue; | 445 | continue; |
446 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 446 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
447 | prev = atomic64_read(&event->hw.prev_count); | 447 | prev = local64_read(&event->hw.prev_count); |
448 | event->hw.idx = 0; | 448 | event->hw.idx = 0; |
449 | delta = (val - prev) & 0xfffffffful; | 449 | delta = (val - prev) & 0xfffffffful; |
450 | atomic64_add(delta, &event->count); | 450 | local64_add(delta, &event->count); |
451 | } | 451 | } |
452 | } | 452 | } |
453 | 453 | ||
@@ -462,7 +462,7 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | |||
462 | event = cpuhw->limited_counter[i]; | 462 | event = cpuhw->limited_counter[i]; |
463 | event->hw.idx = cpuhw->limited_hwidx[i]; | 463 | event->hw.idx = cpuhw->limited_hwidx[i]; |
464 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 464 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
465 | atomic64_set(&event->hw.prev_count, val); | 465 | local64_set(&event->hw.prev_count, val); |
466 | perf_event_update_userpage(event); | 466 | perf_event_update_userpage(event); |
467 | } | 467 | } |
468 | } | 468 | } |
@@ -666,11 +666,11 @@ void hw_perf_enable(void) | |||
666 | } | 666 | } |
667 | val = 0; | 667 | val = 0; |
668 | if (event->hw.sample_period) { | 668 | if (event->hw.sample_period) { |
669 | left = atomic64_read(&event->hw.period_left); | 669 | left = local64_read(&event->hw.period_left); |
670 | if (left < 0x80000000L) | 670 | if (left < 0x80000000L) |
671 | val = 0x80000000L - left; | 671 | val = 0x80000000L - left; |
672 | } | 672 | } |
673 | atomic64_set(&event->hw.prev_count, val); | 673 | local64_set(&event->hw.prev_count, val); |
674 | event->hw.idx = idx; | 674 | event->hw.idx = idx; |
675 | write_pmc(idx, val); | 675 | write_pmc(idx, val); |
676 | perf_event_update_userpage(event); | 676 | perf_event_update_userpage(event); |
@@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
754 | * skip the schedulability test here, it will be peformed | 754 | * skip the schedulability test here, it will be peformed |
755 | * at commit time(->commit_txn) as a whole | 755 | * at commit time(->commit_txn) as a whole |
756 | */ | 756 | */ |
757 | if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED) | 757 | if (cpuhw->group_flag & PERF_EVENT_TXN) |
758 | goto nocheck; | 758 | goto nocheck; |
759 | 759 | ||
760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) | 760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) |
@@ -845,8 +845,8 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
845 | if (left < 0x80000000L) | 845 | if (left < 0x80000000L) |
846 | val = 0x80000000L - left; | 846 | val = 0x80000000L - left; |
847 | write_pmc(event->hw.idx, val); | 847 | write_pmc(event->hw.idx, val); |
848 | atomic64_set(&event->hw.prev_count, val); | 848 | local64_set(&event->hw.prev_count, val); |
849 | atomic64_set(&event->hw.period_left, left); | 849 | local64_set(&event->hw.period_left, left); |
850 | perf_event_update_userpage(event); | 850 | perf_event_update_userpage(event); |
851 | perf_enable(); | 851 | perf_enable(); |
852 | local_irq_restore(flags); | 852 | local_irq_restore(flags); |
@@ -861,7 +861,7 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
861 | { | 861 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 863 | ||
864 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 864 | cpuhw->group_flag |= PERF_EVENT_TXN; |
865 | cpuhw->n_txn_start = cpuhw->n_events; | 865 | cpuhw->n_txn_start = cpuhw->n_events; |
866 | } | 866 | } |
867 | 867 | ||
@@ -874,7 +874,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
874 | { | 874 | { |
875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
876 | 876 | ||
877 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 877 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
878 | } | 878 | } |
879 | 879 | ||
880 | /* | 880 | /* |
@@ -900,6 +900,7 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
900 | for (i = cpuhw->n_txn_start; i < n; ++i) | 900 | for (i = cpuhw->n_txn_start; i < n; ++i) |
901 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 901 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
902 | 902 | ||
903 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | ||
903 | return 0; | 904 | return 0; |
904 | } | 905 | } |
905 | 906 | ||
@@ -1111,7 +1112,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1111 | event->hw.config = events[n]; | 1112 | event->hw.config = events[n]; |
1112 | event->hw.event_base = cflags[n]; | 1113 | event->hw.event_base = cflags[n]; |
1113 | event->hw.last_period = event->hw.sample_period; | 1114 | event->hw.last_period = event->hw.sample_period; |
1114 | atomic64_set(&event->hw.period_left, event->hw.last_period); | 1115 | local64_set(&event->hw.period_left, event->hw.last_period); |
1115 | 1116 | ||
1116 | /* | 1117 | /* |
1117 | * See if we need to reserve the PMU. | 1118 | * See if we need to reserve the PMU. |
@@ -1149,16 +1150,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1149 | int record = 0; | 1150 | int record = 0; |
1150 | 1151 | ||
1151 | /* we don't have to worry about interrupts here */ | 1152 | /* we don't have to worry about interrupts here */ |
1152 | prev = atomic64_read(&event->hw.prev_count); | 1153 | prev = local64_read(&event->hw.prev_count); |
1153 | delta = (val - prev) & 0xfffffffful; | 1154 | delta = (val - prev) & 0xfffffffful; |
1154 | atomic64_add(delta, &event->count); | 1155 | local64_add(delta, &event->count); |
1155 | 1156 | ||
1156 | /* | 1157 | /* |
1157 | * See if the total period for this event has expired, | 1158 | * See if the total period for this event has expired, |
1158 | * and update for the next period. | 1159 | * and update for the next period. |
1159 | */ | 1160 | */ |
1160 | val = 0; | 1161 | val = 0; |
1161 | left = atomic64_read(&event->hw.period_left) - delta; | 1162 | left = local64_read(&event->hw.period_left) - delta; |
1162 | if (period) { | 1163 | if (period) { |
1163 | if (left <= 0) { | 1164 | if (left <= 0) { |
1164 | left += period; | 1165 | left += period; |
@@ -1196,8 +1197,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1196 | } | 1197 | } |
1197 | 1198 | ||
1198 | write_pmc(event->hw.idx, val); | 1199 | write_pmc(event->hw.idx, val); |
1199 | atomic64_set(&event->hw.prev_count, val); | 1200 | local64_set(&event->hw.prev_count, val); |
1200 | atomic64_set(&event->hw.period_left, left); | 1201 | local64_set(&event->hw.period_left, left); |
1201 | perf_event_update_userpage(event); | 1202 | perf_event_update_userpage(event); |
1202 | } | 1203 | } |
1203 | 1204 | ||