diff options
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 265 |
1 files changed, 158 insertions, 107 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index bb202388170e..70e1f57f7dd8 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -29,7 +29,7 @@ struct cpu_hw_counters { | |||
29 | struct perf_counter *counter[MAX_HWCOUNTERS]; | 29 | struct perf_counter *counter[MAX_HWCOUNTERS]; |
30 | u64 events[MAX_HWCOUNTERS]; | 30 | u64 events[MAX_HWCOUNTERS]; |
31 | unsigned int flags[MAX_HWCOUNTERS]; | 31 | unsigned int flags[MAX_HWCOUNTERS]; |
32 | u64 mmcr[3]; | 32 | unsigned long mmcr[3]; |
33 | struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; | 33 | struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; |
34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; | 34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; |
35 | }; | 35 | }; |
@@ -46,6 +46,115 @@ struct power_pmu *ppmu; | |||
46 | */ | 46 | */ |
47 | static unsigned int freeze_counters_kernel = MMCR0_FCS; | 47 | static unsigned int freeze_counters_kernel = MMCR0_FCS; |
48 | 48 | ||
49 | /* | ||
50 | * 32-bit doesn't have MMCRA but does have an MMCR2, | ||
51 | * and a few other names are different. | ||
52 | */ | ||
53 | #ifdef CONFIG_PPC32 | ||
54 | |||
55 | #define MMCR0_FCHV 0 | ||
56 | #define MMCR0_PMCjCE MMCR0_PMCnCE | ||
57 | |||
58 | #define SPRN_MMCRA SPRN_MMCR2 | ||
59 | #define MMCRA_SAMPLE_ENABLE 0 | ||
60 | |||
61 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | static inline void perf_set_pmu_inuse(int inuse) { } | ||
66 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } | ||
67 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | static inline void perf_read_regs(struct pt_regs *regs) { } | ||
72 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | #endif /* CONFIG_PPC32 */ | ||
78 | |||
79 | /* | ||
80 | * Things that are specific to 64-bit implementations. | ||
81 | */ | ||
82 | #ifdef CONFIG_PPC64 | ||
83 | |||
84 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | ||
85 | { | ||
86 | unsigned long mmcra = regs->dsisr; | ||
87 | |||
88 | if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { | ||
89 | unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; | ||
90 | if (slot > 1) | ||
91 | return 4 * (slot - 1); | ||
92 | } | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static inline void perf_set_pmu_inuse(int inuse) | ||
97 | { | ||
98 | get_lppaca()->pmcregs_in_use = inuse; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * The user wants a data address recorded. | ||
103 | * If we're not doing instruction sampling, give them the SDAR | ||
104 | * (sampled data address). If we are doing instruction sampling, then | ||
105 | * only give them the SDAR if it corresponds to the instruction | ||
106 | * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC | ||
107 | * bit in MMCRA. | ||
108 | */ | ||
109 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | ||
110 | { | ||
111 | unsigned long mmcra = regs->dsisr; | ||
112 | unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? | ||
113 | POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; | ||
114 | |||
115 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | ||
116 | *addrp = mfspr(SPRN_SDAR); | ||
117 | } | ||
118 | |||
119 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | ||
120 | { | ||
121 | unsigned long mmcra = regs->dsisr; | ||
122 | |||
123 | if (TRAP(regs) != 0xf00) | ||
124 | return 0; /* not a PMU interrupt */ | ||
125 | |||
126 | if (ppmu->flags & PPMU_ALT_SIPR) { | ||
127 | if (mmcra & POWER6_MMCRA_SIHV) | ||
128 | return PERF_EVENT_MISC_HYPERVISOR; | ||
129 | return (mmcra & POWER6_MMCRA_SIPR) ? | ||
130 | PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; | ||
131 | } | ||
132 | if (mmcra & MMCRA_SIHV) | ||
133 | return PERF_EVENT_MISC_HYPERVISOR; | ||
134 | return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : | ||
135 | PERF_EVENT_MISC_KERNEL; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Overload regs->dsisr to store MMCRA so we only need to read it once | ||
140 | * on each interrupt. | ||
141 | */ | ||
142 | static inline void perf_read_regs(struct pt_regs *regs) | ||
143 | { | ||
144 | regs->dsisr = mfspr(SPRN_MMCRA); | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * If interrupts were soft-disabled when a PMU interrupt occurs, treat | ||
149 | * it as an NMI. | ||
150 | */ | ||
151 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | ||
152 | { | ||
153 | return !regs->softe; | ||
154 | } | ||
155 | |||
156 | #endif /* CONFIG_PPC64 */ | ||
157 | |||
49 | static void perf_counter_interrupt(struct pt_regs *regs); | 158 | static void perf_counter_interrupt(struct pt_regs *regs); |
50 | 159 | ||
51 | void perf_counter_print_debug(void) | 160 | void perf_counter_print_debug(void) |
@@ -78,12 +187,14 @@ static unsigned long read_pmc(int idx) | |||
78 | case 6: | 187 | case 6: |
79 | val = mfspr(SPRN_PMC6); | 188 | val = mfspr(SPRN_PMC6); |
80 | break; | 189 | break; |
190 | #ifdef CONFIG_PPC64 | ||
81 | case 7: | 191 | case 7: |
82 | val = mfspr(SPRN_PMC7); | 192 | val = mfspr(SPRN_PMC7); |
83 | break; | 193 | break; |
84 | case 8: | 194 | case 8: |
85 | val = mfspr(SPRN_PMC8); | 195 | val = mfspr(SPRN_PMC8); |
86 | break; | 196 | break; |
197 | #endif /* CONFIG_PPC64 */ | ||
87 | default: | 198 | default: |
88 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | 199 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); |
89 | val = 0; | 200 | val = 0; |
@@ -115,12 +226,14 @@ static void write_pmc(int idx, unsigned long val) | |||
115 | case 6: | 226 | case 6: |
116 | mtspr(SPRN_PMC6, val); | 227 | mtspr(SPRN_PMC6, val); |
117 | break; | 228 | break; |
229 | #ifdef CONFIG_PPC64 | ||
118 | case 7: | 230 | case 7: |
119 | mtspr(SPRN_PMC7, val); | 231 | mtspr(SPRN_PMC7, val); |
120 | break; | 232 | break; |
121 | case 8: | 233 | case 8: |
122 | mtspr(SPRN_PMC8, val); | 234 | mtspr(SPRN_PMC8, val); |
123 | break; | 235 | break; |
236 | #endif /* CONFIG_PPC64 */ | ||
124 | default: | 237 | default: |
125 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | 238 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); |
126 | } | 239 | } |
@@ -135,15 +248,15 @@ static void write_pmc(int idx, unsigned long val) | |||
135 | static int power_check_constraints(u64 event[], unsigned int cflags[], | 248 | static int power_check_constraints(u64 event[], unsigned int cflags[], |
136 | int n_ev) | 249 | int n_ev) |
137 | { | 250 | { |
138 | u64 mask, value, nv; | 251 | unsigned long mask, value, nv; |
139 | u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | 252 | u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; |
140 | u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | 253 | unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; |
141 | u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | 254 | unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; |
142 | u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; | 255 | unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; |
143 | int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; | 256 | int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; |
144 | int i, j; | 257 | int i, j; |
145 | u64 addf = ppmu->add_fields; | 258 | unsigned long addf = ppmu->add_fields; |
146 | u64 tadd = ppmu->test_adder; | 259 | unsigned long tadd = ppmu->test_adder; |
147 | 260 | ||
148 | if (n_ev > ppmu->n_counter) | 261 | if (n_ev > ppmu->n_counter) |
149 | return -1; | 262 | return -1; |
@@ -283,7 +396,7 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], | |||
283 | 396 | ||
284 | static void power_pmu_read(struct perf_counter *counter) | 397 | static void power_pmu_read(struct perf_counter *counter) |
285 | { | 398 | { |
286 | long val, delta, prev; | 399 | s64 val, delta, prev; |
287 | 400 | ||
288 | if (!counter->hw.idx) | 401 | if (!counter->hw.idx) |
289 | return; | 402 | return; |
@@ -403,14 +516,14 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) | |||
403 | void hw_perf_disable(void) | 516 | void hw_perf_disable(void) |
404 | { | 517 | { |
405 | struct cpu_hw_counters *cpuhw; | 518 | struct cpu_hw_counters *cpuhw; |
406 | unsigned long ret; | ||
407 | unsigned long flags; | 519 | unsigned long flags; |
408 | 520 | ||
521 | if (!ppmu) | ||
522 | return; | ||
409 | local_irq_save(flags); | 523 | local_irq_save(flags); |
410 | cpuhw = &__get_cpu_var(cpu_hw_counters); | 524 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
411 | 525 | ||
412 | ret = cpuhw->disabled; | 526 | if (!cpuhw->disabled) { |
413 | if (!ret) { | ||
414 | cpuhw->disabled = 1; | 527 | cpuhw->disabled = 1; |
415 | cpuhw->n_added = 0; | 528 | cpuhw->n_added = 0; |
416 | 529 | ||
@@ -461,6 +574,8 @@ void hw_perf_enable(void) | |||
461 | int n_lim; | 574 | int n_lim; |
462 | int idx; | 575 | int idx; |
463 | 576 | ||
577 | if (!ppmu) | ||
578 | return; | ||
464 | local_irq_save(flags); | 579 | local_irq_save(flags); |
465 | cpuhw = &__get_cpu_var(cpu_hw_counters); | 580 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
466 | if (!cpuhw->disabled) { | 581 | if (!cpuhw->disabled) { |
@@ -479,7 +594,7 @@ void hw_perf_enable(void) | |||
479 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 594 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
480 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 595 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
481 | if (cpuhw->n_counters == 0) | 596 | if (cpuhw->n_counters == 0) |
482 | get_lppaca()->pmcregs_in_use = 0; | 597 | perf_set_pmu_inuse(0); |
483 | goto out_enable; | 598 | goto out_enable; |
484 | } | 599 | } |
485 | 600 | ||
@@ -512,7 +627,7 @@ void hw_perf_enable(void) | |||
512 | * bit set and set the hardware counters to their initial values. | 627 | * bit set and set the hardware counters to their initial values. |
513 | * Then unfreeze the counters. | 628 | * Then unfreeze the counters. |
514 | */ | 629 | */ |
515 | get_lppaca()->pmcregs_in_use = 1; | 630 | perf_set_pmu_inuse(1); |
516 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 631 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
517 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 632 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
518 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | 633 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) |
@@ -626,6 +741,8 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, | |||
626 | long i, n, n0; | 741 | long i, n, n0; |
627 | struct perf_counter *sub; | 742 | struct perf_counter *sub; |
628 | 743 | ||
744 | if (!ppmu) | ||
745 | return 0; | ||
629 | cpuhw = &__get_cpu_var(cpu_hw_counters); | 746 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
630 | n0 = cpuhw->n_counters; | 747 | n0 = cpuhw->n_counters; |
631 | n = collect_events(group_leader, ppmu->n_counter - n0, | 748 | n = collect_events(group_leader, ppmu->n_counter - n0, |
@@ -913,6 +1030,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
913 | case PERF_TYPE_RAW: | 1030 | case PERF_TYPE_RAW: |
914 | ev = counter->attr.config; | 1031 | ev = counter->attr.config; |
915 | break; | 1032 | break; |
1033 | default: | ||
1034 | return ERR_PTR(-EINVAL); | ||
916 | } | 1035 | } |
917 | counter->hw.config_base = ev; | 1036 | counter->hw.config_base = ev; |
918 | counter->hw.idx = 0; | 1037 | counter->hw.idx = 0; |
@@ -1007,13 +1126,12 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
1007 | * things if requested. Note that interrupts are hard-disabled | 1126 | * things if requested. Note that interrupts are hard-disabled |
1008 | * here so there is no possibility of being interrupted. | 1127 | * here so there is no possibility of being interrupted. |
1009 | */ | 1128 | */ |
1010 | static void record_and_restart(struct perf_counter *counter, long val, | 1129 | static void record_and_restart(struct perf_counter *counter, unsigned long val, |
1011 | struct pt_regs *regs, int nmi) | 1130 | struct pt_regs *regs, int nmi) |
1012 | { | 1131 | { |
1013 | u64 period = counter->hw.sample_period; | 1132 | u64 period = counter->hw.sample_period; |
1014 | s64 prev, delta, left; | 1133 | s64 prev, delta, left; |
1015 | int record = 0; | 1134 | int record = 0; |
1016 | u64 addr, mmcra, sdsync; | ||
1017 | 1135 | ||
1018 | /* we don't have to worry about interrupts here */ | 1136 | /* we don't have to worry about interrupts here */ |
1019 | prev = atomic64_read(&counter->hw.prev_count); | 1137 | prev = atomic64_read(&counter->hw.prev_count); |
@@ -1033,8 +1151,8 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
1033 | left = period; | 1151 | left = period; |
1034 | record = 1; | 1152 | record = 1; |
1035 | } | 1153 | } |
1036 | if (left < 0x80000000L) | 1154 | if (left < 0x80000000LL) |
1037 | val = 0x80000000L - left; | 1155 | val = 0x80000000LL - left; |
1038 | } | 1156 | } |
1039 | 1157 | ||
1040 | /* | 1158 | /* |
@@ -1047,22 +1165,9 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
1047 | .period = counter->hw.last_period, | 1165 | .period = counter->hw.last_period, |
1048 | }; | 1166 | }; |
1049 | 1167 | ||
1050 | if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { | 1168 | if (counter->attr.sample_type & PERF_SAMPLE_ADDR) |
1051 | /* | 1169 | perf_get_data_addr(regs, &data.addr); |
1052 | * The user wants a data address recorded. | 1170 | |
1053 | * If we're not doing instruction sampling, | ||
1054 | * give them the SDAR (sampled data address). | ||
1055 | * If we are doing instruction sampling, then only | ||
1056 | * give them the SDAR if it corresponds to the | ||
1057 | * instruction pointed to by SIAR; this is indicated | ||
1058 | * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA. | ||
1059 | */ | ||
1060 | mmcra = regs->dsisr; | ||
1061 | sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? | ||
1062 | POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; | ||
1063 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | ||
1064 | data.addr = mfspr(SPRN_SDAR); | ||
1065 | } | ||
1066 | if (perf_counter_overflow(counter, nmi, &data)) { | 1171 | if (perf_counter_overflow(counter, nmi, &data)) { |
1067 | /* | 1172 | /* |
1068 | * Interrupts are coming too fast - throttle them | 1173 | * Interrupts are coming too fast - throttle them |
@@ -1088,25 +1193,12 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
1088 | */ | 1193 | */ |
1089 | unsigned long perf_misc_flags(struct pt_regs *regs) | 1194 | unsigned long perf_misc_flags(struct pt_regs *regs) |
1090 | { | 1195 | { |
1091 | unsigned long mmcra; | 1196 | u32 flags = perf_get_misc_flags(regs); |
1092 | |||
1093 | if (TRAP(regs) != 0xf00) { | ||
1094 | /* not a PMU interrupt */ | ||
1095 | return user_mode(regs) ? PERF_EVENT_MISC_USER : | ||
1096 | PERF_EVENT_MISC_KERNEL; | ||
1097 | } | ||
1098 | 1197 | ||
1099 | mmcra = regs->dsisr; | 1198 | if (flags) |
1100 | if (ppmu->flags & PPMU_ALT_SIPR) { | 1199 | return flags; |
1101 | if (mmcra & POWER6_MMCRA_SIHV) | 1200 | return user_mode(regs) ? PERF_EVENT_MISC_USER : |
1102 | return PERF_EVENT_MISC_HYPERVISOR; | 1201 | PERF_EVENT_MISC_KERNEL; |
1103 | return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER : | ||
1104 | PERF_EVENT_MISC_KERNEL; | ||
1105 | } | ||
1106 | if (mmcra & MMCRA_SIHV) | ||
1107 | return PERF_EVENT_MISC_HYPERVISOR; | ||
1108 | return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : | ||
1109 | PERF_EVENT_MISC_KERNEL; | ||
1110 | } | 1202 | } |
1111 | 1203 | ||
1112 | /* | 1204 | /* |
@@ -1115,20 +1207,12 @@ unsigned long perf_misc_flags(struct pt_regs *regs) | |||
1115 | */ | 1207 | */ |
1116 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | 1208 | unsigned long perf_instruction_pointer(struct pt_regs *regs) |
1117 | { | 1209 | { |
1118 | unsigned long mmcra; | ||
1119 | unsigned long ip; | 1210 | unsigned long ip; |
1120 | unsigned long slot; | ||
1121 | 1211 | ||
1122 | if (TRAP(regs) != 0xf00) | 1212 | if (TRAP(regs) != 0xf00) |
1123 | return regs->nip; /* not a PMU interrupt */ | 1213 | return regs->nip; /* not a PMU interrupt */ |
1124 | 1214 | ||
1125 | ip = mfspr(SPRN_SIAR); | 1215 | ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); |
1126 | mmcra = regs->dsisr; | ||
1127 | if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { | ||
1128 | slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; | ||
1129 | if (slot > 1) | ||
1130 | ip += 4 * (slot - 1); | ||
1131 | } | ||
1132 | return ip; | 1216 | return ip; |
1133 | } | 1217 | } |
1134 | 1218 | ||
@@ -1140,7 +1224,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
1140 | int i; | 1224 | int i; |
1141 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); | 1225 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); |
1142 | struct perf_counter *counter; | 1226 | struct perf_counter *counter; |
1143 | long val; | 1227 | unsigned long val; |
1144 | int found = 0; | 1228 | int found = 0; |
1145 | int nmi; | 1229 | int nmi; |
1146 | 1230 | ||
@@ -1148,16 +1232,9 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
1148 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), | 1232 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), |
1149 | mfspr(SPRN_PMC6)); | 1233 | mfspr(SPRN_PMC6)); |
1150 | 1234 | ||
1151 | /* | 1235 | perf_read_regs(regs); |
1152 | * Overload regs->dsisr to store MMCRA so we only need to read it once. | ||
1153 | */ | ||
1154 | regs->dsisr = mfspr(SPRN_MMCRA); | ||
1155 | 1236 | ||
1156 | /* | 1237 | nmi = perf_intr_is_nmi(regs); |
1157 | * If interrupts were soft-disabled when this PMU interrupt | ||
1158 | * occurred, treat it as an NMI. | ||
1159 | */ | ||
1160 | nmi = !regs->softe; | ||
1161 | if (nmi) | 1238 | if (nmi) |
1162 | nmi_enter(); | 1239 | nmi_enter(); |
1163 | else | 1240 | else |
@@ -1210,54 +1287,28 @@ void hw_perf_counter_setup(int cpu) | |||
1210 | { | 1287 | { |
1211 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); | 1288 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); |
1212 | 1289 | ||
1290 | if (!ppmu) | ||
1291 | return; | ||
1213 | memset(cpuhw, 0, sizeof(*cpuhw)); | 1292 | memset(cpuhw, 0, sizeof(*cpuhw)); |
1214 | cpuhw->mmcr[0] = MMCR0_FC; | 1293 | cpuhw->mmcr[0] = MMCR0_FC; |
1215 | } | 1294 | } |
1216 | 1295 | ||
1217 | extern struct power_pmu power4_pmu; | 1296 | int register_power_pmu(struct power_pmu *pmu) |
1218 | extern struct power_pmu ppc970_pmu; | ||
1219 | extern struct power_pmu power5_pmu; | ||
1220 | extern struct power_pmu power5p_pmu; | ||
1221 | extern struct power_pmu power6_pmu; | ||
1222 | extern struct power_pmu power7_pmu; | ||
1223 | |||
1224 | static int init_perf_counters(void) | ||
1225 | { | 1297 | { |
1226 | unsigned long pvr; | 1298 | if (ppmu) |
1227 | 1299 | return -EBUSY; /* something's already registered */ | |
1228 | /* XXX should get this from cputable */ | 1300 | |
1229 | pvr = mfspr(SPRN_PVR); | 1301 | ppmu = pmu; |
1230 | switch (PVR_VER(pvr)) { | 1302 | pr_info("%s performance monitor hardware support registered\n", |
1231 | case PV_POWER4: | 1303 | pmu->name); |
1232 | case PV_POWER4p: | ||
1233 | ppmu = &power4_pmu; | ||
1234 | break; | ||
1235 | case PV_970: | ||
1236 | case PV_970FX: | ||
1237 | case PV_970MP: | ||
1238 | ppmu = &ppc970_pmu; | ||
1239 | break; | ||
1240 | case PV_POWER5: | ||
1241 | ppmu = &power5_pmu; | ||
1242 | break; | ||
1243 | case PV_POWER5p: | ||
1244 | ppmu = &power5p_pmu; | ||
1245 | break; | ||
1246 | case 0x3e: | ||
1247 | ppmu = &power6_pmu; | ||
1248 | break; | ||
1249 | case 0x3f: | ||
1250 | ppmu = &power7_pmu; | ||
1251 | break; | ||
1252 | } | ||
1253 | 1304 | ||
1305 | #ifdef MSR_HV | ||
1254 | /* | 1306 | /* |
1255 | * Use FCHV to ignore kernel events if MSR.HV is set. | 1307 | * Use FCHV to ignore kernel events if MSR.HV is set. |
1256 | */ | 1308 | */ |
1257 | if (mfmsr() & MSR_HV) | 1309 | if (mfmsr() & MSR_HV) |
1258 | freeze_counters_kernel = MMCR0_FCHV; | 1310 | freeze_counters_kernel = MMCR0_FCHV; |
1311 | #endif /* CONFIG_PPC64 */ | ||
1259 | 1312 | ||
1260 | return 0; | 1313 | return 0; |
1261 | } | 1314 | } |
1262 | |||
1263 | arch_initcall(init_perf_counters); | ||