diff options
Diffstat (limited to 'arch/powerpc/perf/core-book3s.c')
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 1448 |
1 files changed, 1448 insertions, 0 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c new file mode 100644 index 000000000000..c2e27ede07ec --- /dev/null +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -0,0 +1,1448 @@ | |||
1 | /* | ||
2 | * Performance event support - powerpc architecture code | ||
3 | * | ||
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/perf_event.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/hardirq.h> | ||
16 | #include <asm/reg.h> | ||
17 | #include <asm/pmc.h> | ||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/firmware.h> | ||
20 | #include <asm/ptrace.h> | ||
21 | |||
22 | struct cpu_hw_events { | ||
23 | int n_events; | ||
24 | int n_percpu; | ||
25 | int disabled; | ||
26 | int n_added; | ||
27 | int n_limited; | ||
28 | u8 pmcs_enabled; | ||
29 | struct perf_event *event[MAX_HWEVENTS]; | ||
30 | u64 events[MAX_HWEVENTS]; | ||
31 | unsigned int flags[MAX_HWEVENTS]; | ||
32 | unsigned long mmcr[3]; | ||
33 | struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; | ||
34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; | ||
35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | ||
36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | ||
37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | ||
38 | |||
39 | unsigned int group_flag; | ||
40 | int n_txn_start; | ||
41 | }; | ||
42 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
43 | |||
44 | struct power_pmu *ppmu; | ||
45 | |||
46 | /* | ||
47 | * Normally, to ignore kernel events we set the FCS (freeze counters | ||
48 | * in supervisor mode) bit in MMCR0, but if the kernel runs with the | ||
49 | * hypervisor bit set in the MSR, or if we are running on a processor | ||
50 | * where the hypervisor bit is forced to 1 (as on Apple G5 processors), | ||
51 | * then we need to use the FCHV bit to ignore kernel events. | ||
52 | */ | ||
53 | static unsigned int freeze_events_kernel = MMCR0_FCS; | ||
54 | |||
55 | /* | ||
56 | * 32-bit doesn't have MMCRA but does have an MMCR2, | ||
57 | * and a few other names are different. | ||
58 | */ | ||
59 | #ifdef CONFIG_PPC32 | ||
60 | |||
61 | #define MMCR0_FCHV 0 | ||
62 | #define MMCR0_PMCjCE MMCR0_PMCnCE | ||
63 | |||
64 | #define SPRN_MMCRA SPRN_MMCR2 | ||
65 | #define MMCRA_SAMPLE_ENABLE 0 | ||
66 | |||
67 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } | ||
72 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | static inline void perf_read_regs(struct pt_regs *regs) { } | ||
77 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | ||
78 | { | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | #endif /* CONFIG_PPC32 */ | ||
83 | |||
84 | /* | ||
85 | * Things that are specific to 64-bit implementations. | ||
86 | */ | ||
87 | #ifdef CONFIG_PPC64 | ||
88 | |||
89 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | ||
90 | { | ||
91 | unsigned long mmcra = regs->dsisr; | ||
92 | |||
93 | if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { | ||
94 | unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; | ||
95 | if (slot > 1) | ||
96 | return 4 * (slot - 1); | ||
97 | } | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * The user wants a data address recorded. | ||
103 | * If we're not doing instruction sampling, give them the SDAR | ||
104 | * (sampled data address). If we are doing instruction sampling, then | ||
105 | * only give them the SDAR if it corresponds to the instruction | ||
106 | * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC | ||
107 | * bit in MMCRA. | ||
108 | */ | ||
109 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | ||
110 | { | ||
111 | unsigned long mmcra = regs->dsisr; | ||
112 | unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? | ||
113 | POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; | ||
114 | |||
115 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | ||
116 | *addrp = mfspr(SPRN_SDAR); | ||
117 | } | ||
118 | |||
119 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | ||
120 | { | ||
121 | unsigned long mmcra = regs->dsisr; | ||
122 | unsigned long sihv = MMCRA_SIHV; | ||
123 | unsigned long sipr = MMCRA_SIPR; | ||
124 | |||
125 | if (TRAP(regs) != 0xf00) | ||
126 | return 0; /* not a PMU interrupt */ | ||
127 | |||
128 | if (ppmu->flags & PPMU_ALT_SIPR) { | ||
129 | sihv = POWER6_MMCRA_SIHV; | ||
130 | sipr = POWER6_MMCRA_SIPR; | ||
131 | } | ||
132 | |||
133 | /* PR has priority over HV, so order below is important */ | ||
134 | if (mmcra & sipr) | ||
135 | return PERF_RECORD_MISC_USER; | ||
136 | if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) | ||
137 | return PERF_RECORD_MISC_HYPERVISOR; | ||
138 | return PERF_RECORD_MISC_KERNEL; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Overload regs->dsisr to store MMCRA so we only need to read it once | ||
143 | * on each interrupt. | ||
144 | */ | ||
145 | static inline void perf_read_regs(struct pt_regs *regs) | ||
146 | { | ||
147 | regs->dsisr = mfspr(SPRN_MMCRA); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * If interrupts were soft-disabled when a PMU interrupt occurs, treat | ||
152 | * it as an NMI. | ||
153 | */ | ||
154 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | ||
155 | { | ||
156 | return !regs->softe; | ||
157 | } | ||
158 | |||
159 | #endif /* CONFIG_PPC64 */ | ||
160 | |||
161 | static void perf_event_interrupt(struct pt_regs *regs); | ||
162 | |||
163 | void perf_event_print_debug(void) | ||
164 | { | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Read one performance monitor counter (PMC). | ||
169 | */ | ||
170 | static unsigned long read_pmc(int idx) | ||
171 | { | ||
172 | unsigned long val; | ||
173 | |||
174 | switch (idx) { | ||
175 | case 1: | ||
176 | val = mfspr(SPRN_PMC1); | ||
177 | break; | ||
178 | case 2: | ||
179 | val = mfspr(SPRN_PMC2); | ||
180 | break; | ||
181 | case 3: | ||
182 | val = mfspr(SPRN_PMC3); | ||
183 | break; | ||
184 | case 4: | ||
185 | val = mfspr(SPRN_PMC4); | ||
186 | break; | ||
187 | case 5: | ||
188 | val = mfspr(SPRN_PMC5); | ||
189 | break; | ||
190 | case 6: | ||
191 | val = mfspr(SPRN_PMC6); | ||
192 | break; | ||
193 | #ifdef CONFIG_PPC64 | ||
194 | case 7: | ||
195 | val = mfspr(SPRN_PMC7); | ||
196 | break; | ||
197 | case 8: | ||
198 | val = mfspr(SPRN_PMC8); | ||
199 | break; | ||
200 | #endif /* CONFIG_PPC64 */ | ||
201 | default: | ||
202 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | ||
203 | val = 0; | ||
204 | } | ||
205 | return val; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Write one PMC. | ||
210 | */ | ||
211 | static void write_pmc(int idx, unsigned long val) | ||
212 | { | ||
213 | switch (idx) { | ||
214 | case 1: | ||
215 | mtspr(SPRN_PMC1, val); | ||
216 | break; | ||
217 | case 2: | ||
218 | mtspr(SPRN_PMC2, val); | ||
219 | break; | ||
220 | case 3: | ||
221 | mtspr(SPRN_PMC3, val); | ||
222 | break; | ||
223 | case 4: | ||
224 | mtspr(SPRN_PMC4, val); | ||
225 | break; | ||
226 | case 5: | ||
227 | mtspr(SPRN_PMC5, val); | ||
228 | break; | ||
229 | case 6: | ||
230 | mtspr(SPRN_PMC6, val); | ||
231 | break; | ||
232 | #ifdef CONFIG_PPC64 | ||
233 | case 7: | ||
234 | mtspr(SPRN_PMC7, val); | ||
235 | break; | ||
236 | case 8: | ||
237 | mtspr(SPRN_PMC8, val); | ||
238 | break; | ||
239 | #endif /* CONFIG_PPC64 */ | ||
240 | default: | ||
241 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Check if a set of events can all go on the PMU at once. | ||
247 | * If they can't, this will look at alternative codes for the events | ||
248 | * and see if any combination of alternative codes is feasible. | ||
249 | * The feasible set is returned in event_id[]. | ||
250 | */ | ||
251 | static int power_check_constraints(struct cpu_hw_events *cpuhw, | ||
252 | u64 event_id[], unsigned int cflags[], | ||
253 | int n_ev) | ||
254 | { | ||
255 | unsigned long mask, value, nv; | ||
256 | unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; | ||
257 | int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; | ||
258 | int i, j; | ||
259 | unsigned long addf = ppmu->add_fields; | ||
260 | unsigned long tadd = ppmu->test_adder; | ||
261 | |||
262 | if (n_ev > ppmu->n_counter) | ||
263 | return -1; | ||
264 | |||
265 | /* First see if the events will go on as-is */ | ||
266 | for (i = 0; i < n_ev; ++i) { | ||
267 | if ((cflags[i] & PPMU_LIMITED_PMC_REQD) | ||
268 | && !ppmu->limited_pmc_event(event_id[i])) { | ||
269 | ppmu->get_alternatives(event_id[i], cflags[i], | ||
270 | cpuhw->alternatives[i]); | ||
271 | event_id[i] = cpuhw->alternatives[i][0]; | ||
272 | } | ||
273 | if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], | ||
274 | &cpuhw->avalues[i][0])) | ||
275 | return -1; | ||
276 | } | ||
277 | value = mask = 0; | ||
278 | for (i = 0; i < n_ev; ++i) { | ||
279 | nv = (value | cpuhw->avalues[i][0]) + | ||
280 | (value & cpuhw->avalues[i][0] & addf); | ||
281 | if ((((nv + tadd) ^ value) & mask) != 0 || | ||
282 | (((nv + tadd) ^ cpuhw->avalues[i][0]) & | ||
283 | cpuhw->amasks[i][0]) != 0) | ||
284 | break; | ||
285 | value = nv; | ||
286 | mask |= cpuhw->amasks[i][0]; | ||
287 | } | ||
288 | if (i == n_ev) | ||
289 | return 0; /* all OK */ | ||
290 | |||
291 | /* doesn't work, gather alternatives... */ | ||
292 | if (!ppmu->get_alternatives) | ||
293 | return -1; | ||
294 | for (i = 0; i < n_ev; ++i) { | ||
295 | choice[i] = 0; | ||
296 | n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], | ||
297 | cpuhw->alternatives[i]); | ||
298 | for (j = 1; j < n_alt[i]; ++j) | ||
299 | ppmu->get_constraint(cpuhw->alternatives[i][j], | ||
300 | &cpuhw->amasks[i][j], | ||
301 | &cpuhw->avalues[i][j]); | ||
302 | } | ||
303 | |||
304 | /* enumerate all possibilities and see if any will work */ | ||
305 | i = 0; | ||
306 | j = -1; | ||
307 | value = mask = nv = 0; | ||
308 | while (i < n_ev) { | ||
309 | if (j >= 0) { | ||
310 | /* we're backtracking, restore context */ | ||
311 | value = svalues[i]; | ||
312 | mask = smasks[i]; | ||
313 | j = choice[i]; | ||
314 | } | ||
315 | /* | ||
316 | * See if any alternative k for event_id i, | ||
317 | * where k > j, will satisfy the constraints. | ||
318 | */ | ||
319 | while (++j < n_alt[i]) { | ||
320 | nv = (value | cpuhw->avalues[i][j]) + | ||
321 | (value & cpuhw->avalues[i][j] & addf); | ||
322 | if ((((nv + tadd) ^ value) & mask) == 0 && | ||
323 | (((nv + tadd) ^ cpuhw->avalues[i][j]) | ||
324 | & cpuhw->amasks[i][j]) == 0) | ||
325 | break; | ||
326 | } | ||
327 | if (j >= n_alt[i]) { | ||
328 | /* | ||
329 | * No feasible alternative, backtrack | ||
330 | * to event_id i-1 and continue enumerating its | ||
331 | * alternatives from where we got up to. | ||
332 | */ | ||
333 | if (--i < 0) | ||
334 | return -1; | ||
335 | } else { | ||
336 | /* | ||
337 | * Found a feasible alternative for event_id i, | ||
338 | * remember where we got up to with this event_id, | ||
339 | * go on to the next event_id, and start with | ||
340 | * the first alternative for it. | ||
341 | */ | ||
342 | choice[i] = j; | ||
343 | svalues[i] = value; | ||
344 | smasks[i] = mask; | ||
345 | value = nv; | ||
346 | mask |= cpuhw->amasks[i][j]; | ||
347 | ++i; | ||
348 | j = -1; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | /* OK, we have a feasible combination, tell the caller the solution */ | ||
353 | for (i = 0; i < n_ev; ++i) | ||
354 | event_id[i] = cpuhw->alternatives[i][choice[i]]; | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | /* | ||
359 | * Check if newly-added events have consistent settings for | ||
360 | * exclude_{user,kernel,hv} with each other and any previously | ||
361 | * added events. | ||
362 | */ | ||
363 | static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], | ||
364 | int n_prev, int n_new) | ||
365 | { | ||
366 | int eu = 0, ek = 0, eh = 0; | ||
367 | int i, n, first; | ||
368 | struct perf_event *event; | ||
369 | |||
370 | n = n_prev + n_new; | ||
371 | if (n <= 1) | ||
372 | return 0; | ||
373 | |||
374 | first = 1; | ||
375 | for (i = 0; i < n; ++i) { | ||
376 | if (cflags[i] & PPMU_LIMITED_PMC_OK) { | ||
377 | cflags[i] &= ~PPMU_LIMITED_PMC_REQD; | ||
378 | continue; | ||
379 | } | ||
380 | event = ctrs[i]; | ||
381 | if (first) { | ||
382 | eu = event->attr.exclude_user; | ||
383 | ek = event->attr.exclude_kernel; | ||
384 | eh = event->attr.exclude_hv; | ||
385 | first = 0; | ||
386 | } else if (event->attr.exclude_user != eu || | ||
387 | event->attr.exclude_kernel != ek || | ||
388 | event->attr.exclude_hv != eh) { | ||
389 | return -EAGAIN; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | if (eu || ek || eh) | ||
394 | for (i = 0; i < n; ++i) | ||
395 | if (cflags[i] & PPMU_LIMITED_PMC_OK) | ||
396 | cflags[i] |= PPMU_LIMITED_PMC_REQD; | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | static u64 check_and_compute_delta(u64 prev, u64 val) | ||
402 | { | ||
403 | u64 delta = (val - prev) & 0xfffffffful; | ||
404 | |||
405 | /* | ||
406 | * POWER7 can roll back counter values, if the new value is smaller | ||
407 | * than the previous value it will cause the delta and the counter to | ||
408 | * have bogus values unless we rolled a counter over. If a coutner is | ||
409 | * rolled back, it will be smaller, but within 256, which is the maximum | ||
410 | * number of events to rollback at once. If we dectect a rollback | ||
411 | * return 0. This can lead to a small lack of precision in the | ||
412 | * counters. | ||
413 | */ | ||
414 | if (prev > val && (prev - val) < 256) | ||
415 | delta = 0; | ||
416 | |||
417 | return delta; | ||
418 | } | ||
419 | |||
420 | static void power_pmu_read(struct perf_event *event) | ||
421 | { | ||
422 | s64 val, delta, prev; | ||
423 | |||
424 | if (event->hw.state & PERF_HES_STOPPED) | ||
425 | return; | ||
426 | |||
427 | if (!event->hw.idx) | ||
428 | return; | ||
429 | /* | ||
430 | * Performance monitor interrupts come even when interrupts | ||
431 | * are soft-disabled, as long as interrupts are hard-enabled. | ||
432 | * Therefore we treat them like NMIs. | ||
433 | */ | ||
434 | do { | ||
435 | prev = local64_read(&event->hw.prev_count); | ||
436 | barrier(); | ||
437 | val = read_pmc(event->hw.idx); | ||
438 | delta = check_and_compute_delta(prev, val); | ||
439 | if (!delta) | ||
440 | return; | ||
441 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | ||
442 | |||
443 | local64_add(delta, &event->count); | ||
444 | local64_sub(delta, &event->hw.period_left); | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * On some machines, PMC5 and PMC6 can't be written, don't respect | ||
449 | * the freeze conditions, and don't generate interrupts. This tells | ||
450 | * us if `event' is using such a PMC. | ||
451 | */ | ||
452 | static int is_limited_pmc(int pmcnum) | ||
453 | { | ||
454 | return (ppmu->flags & PPMU_LIMITED_PMC5_6) | ||
455 | && (pmcnum == 5 || pmcnum == 6); | ||
456 | } | ||
457 | |||
458 | static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | ||
459 | unsigned long pmc5, unsigned long pmc6) | ||
460 | { | ||
461 | struct perf_event *event; | ||
462 | u64 val, prev, delta; | ||
463 | int i; | ||
464 | |||
465 | for (i = 0; i < cpuhw->n_limited; ++i) { | ||
466 | event = cpuhw->limited_counter[i]; | ||
467 | if (!event->hw.idx) | ||
468 | continue; | ||
469 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | ||
470 | prev = local64_read(&event->hw.prev_count); | ||
471 | event->hw.idx = 0; | ||
472 | delta = check_and_compute_delta(prev, val); | ||
473 | if (delta) | ||
474 | local64_add(delta, &event->count); | ||
475 | } | ||
476 | } | ||
477 | |||
478 | static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | ||
479 | unsigned long pmc5, unsigned long pmc6) | ||
480 | { | ||
481 | struct perf_event *event; | ||
482 | u64 val, prev; | ||
483 | int i; | ||
484 | |||
485 | for (i = 0; i < cpuhw->n_limited; ++i) { | ||
486 | event = cpuhw->limited_counter[i]; | ||
487 | event->hw.idx = cpuhw->limited_hwidx[i]; | ||
488 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | ||
489 | prev = local64_read(&event->hw.prev_count); | ||
490 | if (check_and_compute_delta(prev, val)) | ||
491 | local64_set(&event->hw.prev_count, val); | ||
492 | perf_event_update_userpage(event); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Since limited events don't respect the freeze conditions, we | ||
498 | * have to read them immediately after freezing or unfreezing the | ||
499 | * other events. We try to keep the values from the limited | ||
500 | * events as consistent as possible by keeping the delay (in | ||
501 | * cycles and instructions) between freezing/unfreezing and reading | ||
502 | * the limited events as small and consistent as possible. | ||
503 | * Therefore, if any limited events are in use, we read them | ||
504 | * both, and always in the same order, to minimize variability, | ||
505 | * and do it inside the same asm that writes MMCR0. | ||
506 | */ | ||
507 | static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | ||
508 | { | ||
509 | unsigned long pmc5, pmc6; | ||
510 | |||
511 | if (!cpuhw->n_limited) { | ||
512 | mtspr(SPRN_MMCR0, mmcr0); | ||
513 | return; | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * Write MMCR0, then read PMC5 and PMC6 immediately. | ||
518 | * To ensure we don't get a performance monitor interrupt | ||
519 | * between writing MMCR0 and freezing/thawing the limited | ||
520 | * events, we first write MMCR0 with the event overflow | ||
521 | * interrupt enable bits turned off. | ||
522 | */ | ||
523 | asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" | ||
524 | : "=&r" (pmc5), "=&r" (pmc6) | ||
525 | : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), | ||
526 | "i" (SPRN_MMCR0), | ||
527 | "i" (SPRN_PMC5), "i" (SPRN_PMC6)); | ||
528 | |||
529 | if (mmcr0 & MMCR0_FC) | ||
530 | freeze_limited_counters(cpuhw, pmc5, pmc6); | ||
531 | else | ||
532 | thaw_limited_counters(cpuhw, pmc5, pmc6); | ||
533 | |||
534 | /* | ||
535 | * Write the full MMCR0 including the event overflow interrupt | ||
536 | * enable bits, if necessary. | ||
537 | */ | ||
538 | if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) | ||
539 | mtspr(SPRN_MMCR0, mmcr0); | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * Disable all events to prevent PMU interrupts and to allow | ||
544 | * events to be added or removed. | ||
545 | */ | ||
546 | static void power_pmu_disable(struct pmu *pmu) | ||
547 | { | ||
548 | struct cpu_hw_events *cpuhw; | ||
549 | unsigned long flags; | ||
550 | |||
551 | if (!ppmu) | ||
552 | return; | ||
553 | local_irq_save(flags); | ||
554 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
555 | |||
556 | if (!cpuhw->disabled) { | ||
557 | cpuhw->disabled = 1; | ||
558 | cpuhw->n_added = 0; | ||
559 | |||
560 | /* | ||
561 | * Check if we ever enabled the PMU on this cpu. | ||
562 | */ | ||
563 | if (!cpuhw->pmcs_enabled) { | ||
564 | ppc_enable_pmcs(); | ||
565 | cpuhw->pmcs_enabled = 1; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * Disable instruction sampling if it was enabled | ||
570 | */ | ||
571 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { | ||
572 | mtspr(SPRN_MMCRA, | ||
573 | cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | ||
574 | mb(); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Set the 'freeze counters' bit. | ||
579 | * The barrier is to make sure the mtspr has been | ||
580 | * executed and the PMU has frozen the events | ||
581 | * before we return. | ||
582 | */ | ||
583 | write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); | ||
584 | mb(); | ||
585 | } | ||
586 | local_irq_restore(flags); | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Re-enable all events if disable == 0. | ||
591 | * If we were previously disabled and events were added, then | ||
592 | * put the new config on the PMU. | ||
593 | */ | ||
594 | static void power_pmu_enable(struct pmu *pmu) | ||
595 | { | ||
596 | struct perf_event *event; | ||
597 | struct cpu_hw_events *cpuhw; | ||
598 | unsigned long flags; | ||
599 | long i; | ||
600 | unsigned long val; | ||
601 | s64 left; | ||
602 | unsigned int hwc_index[MAX_HWEVENTS]; | ||
603 | int n_lim; | ||
604 | int idx; | ||
605 | |||
606 | if (!ppmu) | ||
607 | return; | ||
608 | local_irq_save(flags); | ||
609 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
610 | if (!cpuhw->disabled) { | ||
611 | local_irq_restore(flags); | ||
612 | return; | ||
613 | } | ||
614 | cpuhw->disabled = 0; | ||
615 | |||
616 | /* | ||
617 | * If we didn't change anything, or only removed events, | ||
618 | * no need to recalculate MMCR* settings and reset the PMCs. | ||
619 | * Just reenable the PMU with the current MMCR* settings | ||
620 | * (possibly updated for removal of events). | ||
621 | */ | ||
622 | if (!cpuhw->n_added) { | ||
623 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | ||
624 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | ||
625 | if (cpuhw->n_events == 0) | ||
626 | ppc_set_pmu_inuse(0); | ||
627 | goto out_enable; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * Compute MMCR* values for the new set of events | ||
632 | */ | ||
633 | if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, | ||
634 | cpuhw->mmcr)) { | ||
635 | /* shouldn't ever get here */ | ||
636 | printk(KERN_ERR "oops compute_mmcr failed\n"); | ||
637 | goto out; | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * Add in MMCR0 freeze bits corresponding to the | ||
642 | * attr.exclude_* bits for the first event. | ||
643 | * We have already checked that all events have the | ||
644 | * same values for these bits as the first event. | ||
645 | */ | ||
646 | event = cpuhw->event[0]; | ||
647 | if (event->attr.exclude_user) | ||
648 | cpuhw->mmcr[0] |= MMCR0_FCP; | ||
649 | if (event->attr.exclude_kernel) | ||
650 | cpuhw->mmcr[0] |= freeze_events_kernel; | ||
651 | if (event->attr.exclude_hv) | ||
652 | cpuhw->mmcr[0] |= MMCR0_FCHV; | ||
653 | |||
654 | /* | ||
655 | * Write the new configuration to MMCR* with the freeze | ||
656 | * bit set and set the hardware events to their initial values. | ||
657 | * Then unfreeze the events. | ||
658 | */ | ||
659 | ppc_set_pmu_inuse(1); | ||
660 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | ||
661 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | ||
662 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | ||
663 | | MMCR0_FC); | ||
664 | |||
665 | /* | ||
666 | * Read off any pre-existing events that need to move | ||
667 | * to another PMC. | ||
668 | */ | ||
669 | for (i = 0; i < cpuhw->n_events; ++i) { | ||
670 | event = cpuhw->event[i]; | ||
671 | if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { | ||
672 | power_pmu_read(event); | ||
673 | write_pmc(event->hw.idx, 0); | ||
674 | event->hw.idx = 0; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * Initialize the PMCs for all the new and moved events. | ||
680 | */ | ||
681 | cpuhw->n_limited = n_lim = 0; | ||
682 | for (i = 0; i < cpuhw->n_events; ++i) { | ||
683 | event = cpuhw->event[i]; | ||
684 | if (event->hw.idx) | ||
685 | continue; | ||
686 | idx = hwc_index[i] + 1; | ||
687 | if (is_limited_pmc(idx)) { | ||
688 | cpuhw->limited_counter[n_lim] = event; | ||
689 | cpuhw->limited_hwidx[n_lim] = idx; | ||
690 | ++n_lim; | ||
691 | continue; | ||
692 | } | ||
693 | val = 0; | ||
694 | if (event->hw.sample_period) { | ||
695 | left = local64_read(&event->hw.period_left); | ||
696 | if (left < 0x80000000L) | ||
697 | val = 0x80000000L - left; | ||
698 | } | ||
699 | local64_set(&event->hw.prev_count, val); | ||
700 | event->hw.idx = idx; | ||
701 | if (event->hw.state & PERF_HES_STOPPED) | ||
702 | val = 0; | ||
703 | write_pmc(idx, val); | ||
704 | perf_event_update_userpage(event); | ||
705 | } | ||
706 | cpuhw->n_limited = n_lim; | ||
707 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; | ||
708 | |||
709 | out_enable: | ||
710 | mb(); | ||
711 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); | ||
712 | |||
713 | /* | ||
714 | * Enable instruction sampling if necessary | ||
715 | */ | ||
716 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { | ||
717 | mb(); | ||
718 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | ||
719 | } | ||
720 | |||
721 | out: | ||
722 | local_irq_restore(flags); | ||
723 | } | ||
724 | |||
725 | static int collect_events(struct perf_event *group, int max_count, | ||
726 | struct perf_event *ctrs[], u64 *events, | ||
727 | unsigned int *flags) | ||
728 | { | ||
729 | int n = 0; | ||
730 | struct perf_event *event; | ||
731 | |||
732 | if (!is_software_event(group)) { | ||
733 | if (n >= max_count) | ||
734 | return -1; | ||
735 | ctrs[n] = group; | ||
736 | flags[n] = group->hw.event_base; | ||
737 | events[n++] = group->hw.config; | ||
738 | } | ||
739 | list_for_each_entry(event, &group->sibling_list, group_entry) { | ||
740 | if (!is_software_event(event) && | ||
741 | event->state != PERF_EVENT_STATE_OFF) { | ||
742 | if (n >= max_count) | ||
743 | return -1; | ||
744 | ctrs[n] = event; | ||
745 | flags[n] = event->hw.event_base; | ||
746 | events[n++] = event->hw.config; | ||
747 | } | ||
748 | } | ||
749 | return n; | ||
750 | } | ||
751 | |||
752 | /* | ||
753 | * Add a event to the PMU. | ||
754 | * If all events are not already frozen, then we disable and | ||
755 | * re-enable the PMU in order to get hw_perf_enable to do the | ||
756 | * actual work of reconfiguring the PMU. | ||
757 | */ | ||
758 | static int power_pmu_add(struct perf_event *event, int ef_flags) | ||
759 | { | ||
760 | struct cpu_hw_events *cpuhw; | ||
761 | unsigned long flags; | ||
762 | int n0; | ||
763 | int ret = -EAGAIN; | ||
764 | |||
765 | local_irq_save(flags); | ||
766 | perf_pmu_disable(event->pmu); | ||
767 | |||
768 | /* | ||
769 | * Add the event to the list (if there is room) | ||
770 | * and check whether the total set is still feasible. | ||
771 | */ | ||
772 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
773 | n0 = cpuhw->n_events; | ||
774 | if (n0 >= ppmu->n_counter) | ||
775 | goto out; | ||
776 | cpuhw->event[n0] = event; | ||
777 | cpuhw->events[n0] = event->hw.config; | ||
778 | cpuhw->flags[n0] = event->hw.event_base; | ||
779 | |||
780 | if (!(ef_flags & PERF_EF_START)) | ||
781 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
782 | |||
783 | /* | ||
784 | * If group events scheduling transaction was started, | ||
785 | * skip the schedulability test here, it will be performed | ||
786 | * at commit time(->commit_txn) as a whole | ||
787 | */ | ||
788 | if (cpuhw->group_flag & PERF_EVENT_TXN) | ||
789 | goto nocheck; | ||
790 | |||
791 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) | ||
792 | goto out; | ||
793 | if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) | ||
794 | goto out; | ||
795 | event->hw.config = cpuhw->events[n0]; | ||
796 | |||
797 | nocheck: | ||
798 | ++cpuhw->n_events; | ||
799 | ++cpuhw->n_added; | ||
800 | |||
801 | ret = 0; | ||
802 | out: | ||
803 | perf_pmu_enable(event->pmu); | ||
804 | local_irq_restore(flags); | ||
805 | return ret; | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * Remove a event from the PMU. | ||
810 | */ | ||
811 | static void power_pmu_del(struct perf_event *event, int ef_flags) | ||
812 | { | ||
813 | struct cpu_hw_events *cpuhw; | ||
814 | long i; | ||
815 | unsigned long flags; | ||
816 | |||
817 | local_irq_save(flags); | ||
818 | perf_pmu_disable(event->pmu); | ||
819 | |||
820 | power_pmu_read(event); | ||
821 | |||
822 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
823 | for (i = 0; i < cpuhw->n_events; ++i) { | ||
824 | if (event == cpuhw->event[i]) { | ||
825 | while (++i < cpuhw->n_events) { | ||
826 | cpuhw->event[i-1] = cpuhw->event[i]; | ||
827 | cpuhw->events[i-1] = cpuhw->events[i]; | ||
828 | cpuhw->flags[i-1] = cpuhw->flags[i]; | ||
829 | } | ||
830 | --cpuhw->n_events; | ||
831 | ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); | ||
832 | if (event->hw.idx) { | ||
833 | write_pmc(event->hw.idx, 0); | ||
834 | event->hw.idx = 0; | ||
835 | } | ||
836 | perf_event_update_userpage(event); | ||
837 | break; | ||
838 | } | ||
839 | } | ||
840 | for (i = 0; i < cpuhw->n_limited; ++i) | ||
841 | if (event == cpuhw->limited_counter[i]) | ||
842 | break; | ||
843 | if (i < cpuhw->n_limited) { | ||
844 | while (++i < cpuhw->n_limited) { | ||
845 | cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; | ||
846 | cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; | ||
847 | } | ||
848 | --cpuhw->n_limited; | ||
849 | } | ||
850 | if (cpuhw->n_events == 0) { | ||
851 | /* disable exceptions if no events are running */ | ||
852 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | ||
853 | } | ||
854 | |||
855 | perf_pmu_enable(event->pmu); | ||
856 | local_irq_restore(flags); | ||
857 | } | ||
858 | |||
859 | /* | ||
860 | * POWER-PMU does not support disabling individual counters, hence | ||
861 | * program their cycle counter to their max value and ignore the interrupts. | ||
862 | */ | ||
863 | |||
864 | static void power_pmu_start(struct perf_event *event, int ef_flags) | ||
865 | { | ||
866 | unsigned long flags; | ||
867 | s64 left; | ||
868 | unsigned long val; | ||
869 | |||
870 | if (!event->hw.idx || !event->hw.sample_period) | ||
871 | return; | ||
872 | |||
873 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
874 | return; | ||
875 | |||
876 | if (ef_flags & PERF_EF_RELOAD) | ||
877 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
878 | |||
879 | local_irq_save(flags); | ||
880 | perf_pmu_disable(event->pmu); | ||
881 | |||
882 | event->hw.state = 0; | ||
883 | left = local64_read(&event->hw.period_left); | ||
884 | |||
885 | val = 0; | ||
886 | if (left < 0x80000000L) | ||
887 | val = 0x80000000L - left; | ||
888 | |||
889 | write_pmc(event->hw.idx, val); | ||
890 | |||
891 | perf_event_update_userpage(event); | ||
892 | perf_pmu_enable(event->pmu); | ||
893 | local_irq_restore(flags); | ||
894 | } | ||
895 | |||
896 | static void power_pmu_stop(struct perf_event *event, int ef_flags) | ||
897 | { | ||
898 | unsigned long flags; | ||
899 | |||
900 | if (!event->hw.idx || !event->hw.sample_period) | ||
901 | return; | ||
902 | |||
903 | if (event->hw.state & PERF_HES_STOPPED) | ||
904 | return; | ||
905 | |||
906 | local_irq_save(flags); | ||
907 | perf_pmu_disable(event->pmu); | ||
908 | |||
909 | power_pmu_read(event); | ||
910 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
911 | write_pmc(event->hw.idx, 0); | ||
912 | |||
913 | perf_event_update_userpage(event); | ||
914 | perf_pmu_enable(event->pmu); | ||
915 | local_irq_restore(flags); | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * Start group events scheduling transaction | ||
920 | * Set the flag to make pmu::enable() not perform the | ||
921 | * schedulability test, it will be performed at commit time | ||
922 | */ | ||
923 | void power_pmu_start_txn(struct pmu *pmu) | ||
924 | { | ||
925 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
926 | |||
927 | perf_pmu_disable(pmu); | ||
928 | cpuhw->group_flag |= PERF_EVENT_TXN; | ||
929 | cpuhw->n_txn_start = cpuhw->n_events; | ||
930 | } | ||
931 | |||
932 | /* | ||
933 | * Stop group events scheduling transaction | ||
934 | * Clear the flag and pmu::enable() will perform the | ||
935 | * schedulability test. | ||
936 | */ | ||
937 | void power_pmu_cancel_txn(struct pmu *pmu) | ||
938 | { | ||
939 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
940 | |||
941 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | ||
942 | perf_pmu_enable(pmu); | ||
943 | } | ||
944 | |||
945 | /* | ||
946 | * Commit group events scheduling transaction | ||
947 | * Perform the group schedulability test as a whole | ||
948 | * Return 0 if success | ||
949 | */ | ||
950 | int power_pmu_commit_txn(struct pmu *pmu) | ||
951 | { | ||
952 | struct cpu_hw_events *cpuhw; | ||
953 | long i, n; | ||
954 | |||
955 | if (!ppmu) | ||
956 | return -EAGAIN; | ||
957 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
958 | n = cpuhw->n_events; | ||
959 | if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) | ||
960 | return -EAGAIN; | ||
961 | i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); | ||
962 | if (i < 0) | ||
963 | return -EAGAIN; | ||
964 | |||
965 | for (i = cpuhw->n_txn_start; i < n; ++i) | ||
966 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | ||
967 | |||
968 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | ||
969 | perf_pmu_enable(pmu); | ||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | /* | ||
974 | * Return 1 if we might be able to put event on a limited PMC, | ||
975 | * or 0 if not. | ||
976 | * A event can only go on a limited PMC if it counts something | ||
977 | * that a limited PMC can count, doesn't require interrupts, and | ||
978 | * doesn't exclude any processor mode. | ||
979 | */ | ||
980 | static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, | ||
981 | unsigned int flags) | ||
982 | { | ||
983 | int n; | ||
984 | u64 alt[MAX_EVENT_ALTERNATIVES]; | ||
985 | |||
986 | if (event->attr.exclude_user | ||
987 | || event->attr.exclude_kernel | ||
988 | || event->attr.exclude_hv | ||
989 | || event->attr.sample_period) | ||
990 | return 0; | ||
991 | |||
992 | if (ppmu->limited_pmc_event(ev)) | ||
993 | return 1; | ||
994 | |||
995 | /* | ||
996 | * The requested event_id isn't on a limited PMC already; | ||
997 | * see if any alternative code goes on a limited PMC. | ||
998 | */ | ||
999 | if (!ppmu->get_alternatives) | ||
1000 | return 0; | ||
1001 | |||
1002 | flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; | ||
1003 | n = ppmu->get_alternatives(ev, flags, alt); | ||
1004 | |||
1005 | return n > 0; | ||
1006 | } | ||
1007 | |||
1008 | /* | ||
1009 | * Find an alternative event_id that goes on a normal PMC, if possible, | ||
1010 | * and return the event_id code, or 0 if there is no such alternative. | ||
1011 | * (Note: event_id code 0 is "don't count" on all machines.) | ||
1012 | */ | ||
1013 | static u64 normal_pmc_alternative(u64 ev, unsigned long flags) | ||
1014 | { | ||
1015 | u64 alt[MAX_EVENT_ALTERNATIVES]; | ||
1016 | int n; | ||
1017 | |||
1018 | flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); | ||
1019 | n = ppmu->get_alternatives(ev, flags, alt); | ||
1020 | if (!n) | ||
1021 | return 0; | ||
1022 | return alt[0]; | ||
1023 | } | ||
1024 | |||
1025 | /* Number of perf_events counting hardware events */ | ||
1026 | static atomic_t num_events; | ||
1027 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | ||
1028 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
1029 | |||
1030 | /* | ||
1031 | * Release the PMU if this is the last perf_event. | ||
1032 | */ | ||
1033 | static void hw_perf_event_destroy(struct perf_event *event) | ||
1034 | { | ||
1035 | if (!atomic_add_unless(&num_events, -1, 1)) { | ||
1036 | mutex_lock(&pmc_reserve_mutex); | ||
1037 | if (atomic_dec_return(&num_events) == 0) | ||
1038 | release_pmc_hardware(); | ||
1039 | mutex_unlock(&pmc_reserve_mutex); | ||
1040 | } | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * Translate a generic cache event_id config to a raw event_id code. | ||
1045 | */ | ||
1046 | static int hw_perf_cache_event(u64 config, u64 *eventp) | ||
1047 | { | ||
1048 | unsigned long type, op, result; | ||
1049 | int ev; | ||
1050 | |||
1051 | if (!ppmu->cache_events) | ||
1052 | return -EINVAL; | ||
1053 | |||
1054 | /* unpack config */ | ||
1055 | type = config & 0xff; | ||
1056 | op = (config >> 8) & 0xff; | ||
1057 | result = (config >> 16) & 0xff; | ||
1058 | |||
1059 | if (type >= PERF_COUNT_HW_CACHE_MAX || | ||
1060 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | ||
1061 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
1062 | return -EINVAL; | ||
1063 | |||
1064 | ev = (*ppmu->cache_events)[type][op][result]; | ||
1065 | if (ev == 0) | ||
1066 | return -EOPNOTSUPP; | ||
1067 | if (ev == -1) | ||
1068 | return -EINVAL; | ||
1069 | *eventp = ev; | ||
1070 | return 0; | ||
1071 | } | ||
1072 | |||
1073 | static int power_pmu_event_init(struct perf_event *event) | ||
1074 | { | ||
1075 | u64 ev; | ||
1076 | unsigned long flags; | ||
1077 | struct perf_event *ctrs[MAX_HWEVENTS]; | ||
1078 | u64 events[MAX_HWEVENTS]; | ||
1079 | unsigned int cflags[MAX_HWEVENTS]; | ||
1080 | int n; | ||
1081 | int err; | ||
1082 | struct cpu_hw_events *cpuhw; | ||
1083 | |||
1084 | if (!ppmu) | ||
1085 | return -ENOENT; | ||
1086 | |||
1087 | /* does not support taken branch sampling */ | ||
1088 | if (has_branch_stack(event)) | ||
1089 | return -EOPNOTSUPP; | ||
1090 | |||
1091 | switch (event->attr.type) { | ||
1092 | case PERF_TYPE_HARDWARE: | ||
1093 | ev = event->attr.config; | ||
1094 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | ||
1095 | return -EOPNOTSUPP; | ||
1096 | ev = ppmu->generic_events[ev]; | ||
1097 | break; | ||
1098 | case PERF_TYPE_HW_CACHE: | ||
1099 | err = hw_perf_cache_event(event->attr.config, &ev); | ||
1100 | if (err) | ||
1101 | return err; | ||
1102 | break; | ||
1103 | case PERF_TYPE_RAW: | ||
1104 | ev = event->attr.config; | ||
1105 | break; | ||
1106 | default: | ||
1107 | return -ENOENT; | ||
1108 | } | ||
1109 | |||
1110 | event->hw.config_base = ev; | ||
1111 | event->hw.idx = 0; | ||
1112 | |||
1113 | /* | ||
1114 | * If we are not running on a hypervisor, force the | ||
1115 | * exclude_hv bit to 0 so that we don't care what | ||
1116 | * the user set it to. | ||
1117 | */ | ||
1118 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | ||
1119 | event->attr.exclude_hv = 0; | ||
1120 | |||
1121 | /* | ||
1122 | * If this is a per-task event, then we can use | ||
1123 | * PM_RUN_* events interchangeably with their non RUN_* | ||
1124 | * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. | ||
1125 | * XXX we should check if the task is an idle task. | ||
1126 | */ | ||
1127 | flags = 0; | ||
1128 | if (event->attach_state & PERF_ATTACH_TASK) | ||
1129 | flags |= PPMU_ONLY_COUNT_RUN; | ||
1130 | |||
1131 | /* | ||
1132 | * If this machine has limited events, check whether this | ||
1133 | * event_id could go on a limited event. | ||
1134 | */ | ||
1135 | if (ppmu->flags & PPMU_LIMITED_PMC5_6) { | ||
1136 | if (can_go_on_limited_pmc(event, ev, flags)) { | ||
1137 | flags |= PPMU_LIMITED_PMC_OK; | ||
1138 | } else if (ppmu->limited_pmc_event(ev)) { | ||
1139 | /* | ||
1140 | * The requested event_id is on a limited PMC, | ||
1141 | * but we can't use a limited PMC; see if any | ||
1142 | * alternative goes on a normal PMC. | ||
1143 | */ | ||
1144 | ev = normal_pmc_alternative(ev, flags); | ||
1145 | if (!ev) | ||
1146 | return -EINVAL; | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | /* | ||
1151 | * If this is in a group, check if it can go on with all the | ||
1152 | * other hardware events in the group. We assume the event | ||
1153 | * hasn't been linked into its leader's sibling list at this point. | ||
1154 | */ | ||
1155 | n = 0; | ||
1156 | if (event->group_leader != event) { | ||
1157 | n = collect_events(event->group_leader, ppmu->n_counter - 1, | ||
1158 | ctrs, events, cflags); | ||
1159 | if (n < 0) | ||
1160 | return -EINVAL; | ||
1161 | } | ||
1162 | events[n] = ev; | ||
1163 | ctrs[n] = event; | ||
1164 | cflags[n] = flags; | ||
1165 | if (check_excludes(ctrs, cflags, n, 1)) | ||
1166 | return -EINVAL; | ||
1167 | |||
1168 | cpuhw = &get_cpu_var(cpu_hw_events); | ||
1169 | err = power_check_constraints(cpuhw, events, cflags, n + 1); | ||
1170 | put_cpu_var(cpu_hw_events); | ||
1171 | if (err) | ||
1172 | return -EINVAL; | ||
1173 | |||
1174 | event->hw.config = events[n]; | ||
1175 | event->hw.event_base = cflags[n]; | ||
1176 | event->hw.last_period = event->hw.sample_period; | ||
1177 | local64_set(&event->hw.period_left, event->hw.last_period); | ||
1178 | |||
1179 | /* | ||
1180 | * See if we need to reserve the PMU. | ||
1181 | * If no events are currently in use, then we have to take a | ||
1182 | * mutex to ensure that we don't race with another task doing | ||
1183 | * reserve_pmc_hardware or release_pmc_hardware. | ||
1184 | */ | ||
1185 | err = 0; | ||
1186 | if (!atomic_inc_not_zero(&num_events)) { | ||
1187 | mutex_lock(&pmc_reserve_mutex); | ||
1188 | if (atomic_read(&num_events) == 0 && | ||
1189 | reserve_pmc_hardware(perf_event_interrupt)) | ||
1190 | err = -EBUSY; | ||
1191 | else | ||
1192 | atomic_inc(&num_events); | ||
1193 | mutex_unlock(&pmc_reserve_mutex); | ||
1194 | } | ||
1195 | event->destroy = hw_perf_event_destroy; | ||
1196 | |||
1197 | return err; | ||
1198 | } | ||
1199 | |||
1200 | static int power_pmu_event_idx(struct perf_event *event) | ||
1201 | { | ||
1202 | return event->hw.idx; | ||
1203 | } | ||
1204 | |||
1205 | struct pmu power_pmu = { | ||
1206 | .pmu_enable = power_pmu_enable, | ||
1207 | .pmu_disable = power_pmu_disable, | ||
1208 | .event_init = power_pmu_event_init, | ||
1209 | .add = power_pmu_add, | ||
1210 | .del = power_pmu_del, | ||
1211 | .start = power_pmu_start, | ||
1212 | .stop = power_pmu_stop, | ||
1213 | .read = power_pmu_read, | ||
1214 | .start_txn = power_pmu_start_txn, | ||
1215 | .cancel_txn = power_pmu_cancel_txn, | ||
1216 | .commit_txn = power_pmu_commit_txn, | ||
1217 | .event_idx = power_pmu_event_idx, | ||
1218 | }; | ||
1219 | |||
1220 | /* | ||
1221 | * A counter has overflowed; update its count and record | ||
1222 | * things if requested. Note that interrupts are hard-disabled | ||
1223 | * here so there is no possibility of being interrupted. | ||
1224 | */ | ||
1225 | static void record_and_restart(struct perf_event *event, unsigned long val, | ||
1226 | struct pt_regs *regs) | ||
1227 | { | ||
1228 | u64 period = event->hw.sample_period; | ||
1229 | s64 prev, delta, left; | ||
1230 | int record = 0; | ||
1231 | |||
1232 | if (event->hw.state & PERF_HES_STOPPED) { | ||
1233 | write_pmc(event->hw.idx, 0); | ||
1234 | return; | ||
1235 | } | ||
1236 | |||
1237 | /* we don't have to worry about interrupts here */ | ||
1238 | prev = local64_read(&event->hw.prev_count); | ||
1239 | delta = check_and_compute_delta(prev, val); | ||
1240 | local64_add(delta, &event->count); | ||
1241 | |||
1242 | /* | ||
1243 | * See if the total period for this event has expired, | ||
1244 | * and update for the next period. | ||
1245 | */ | ||
1246 | val = 0; | ||
1247 | left = local64_read(&event->hw.period_left) - delta; | ||
1248 | if (period) { | ||
1249 | if (left <= 0) { | ||
1250 | left += period; | ||
1251 | if (left <= 0) | ||
1252 | left = period; | ||
1253 | record = 1; | ||
1254 | event->hw.last_period = event->hw.sample_period; | ||
1255 | } | ||
1256 | if (left < 0x80000000LL) | ||
1257 | val = 0x80000000LL - left; | ||
1258 | } | ||
1259 | |||
1260 | write_pmc(event->hw.idx, val); | ||
1261 | local64_set(&event->hw.prev_count, val); | ||
1262 | local64_set(&event->hw.period_left, left); | ||
1263 | perf_event_update_userpage(event); | ||
1264 | |||
1265 | /* | ||
1266 | * Finally record data if requested. | ||
1267 | */ | ||
1268 | if (record) { | ||
1269 | struct perf_sample_data data; | ||
1270 | |||
1271 | perf_sample_data_init(&data, ~0ULL); | ||
1272 | data.period = event->hw.last_period; | ||
1273 | |||
1274 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | ||
1275 | perf_get_data_addr(regs, &data.addr); | ||
1276 | |||
1277 | if (perf_event_overflow(event, &data, regs)) | ||
1278 | power_pmu_stop(event, 0); | ||
1279 | } | ||
1280 | } | ||
1281 | |||
1282 | /* | ||
1283 | * Called from generic code to get the misc flags (i.e. processor mode) | ||
1284 | * for an event_id. | ||
1285 | */ | ||
1286 | unsigned long perf_misc_flags(struct pt_regs *regs) | ||
1287 | { | ||
1288 | u32 flags = perf_get_misc_flags(regs); | ||
1289 | |||
1290 | if (flags) | ||
1291 | return flags; | ||
1292 | return user_mode(regs) ? PERF_RECORD_MISC_USER : | ||
1293 | PERF_RECORD_MISC_KERNEL; | ||
1294 | } | ||
1295 | |||
1296 | /* | ||
1297 | * Called from generic code to get the instruction pointer | ||
1298 | * for an event_id. | ||
1299 | */ | ||
1300 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | ||
1301 | { | ||
1302 | unsigned long ip; | ||
1303 | |||
1304 | if (TRAP(regs) != 0xf00) | ||
1305 | return regs->nip; /* not a PMU interrupt */ | ||
1306 | |||
1307 | ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); | ||
1308 | return ip; | ||
1309 | } | ||
1310 | |||
1311 | static bool pmc_overflow(unsigned long val) | ||
1312 | { | ||
1313 | if ((int)val < 0) | ||
1314 | return true; | ||
1315 | |||
1316 | /* | ||
1317 | * Events on POWER7 can roll back if a speculative event doesn't | ||
1318 | * eventually complete. Unfortunately in some rare cases they will | ||
1319 | * raise a performance monitor exception. We need to catch this to | ||
1320 | * ensure we reset the PMC. In all cases the PMC will be 256 or less | ||
1321 | * cycles from overflow. | ||
1322 | * | ||
1323 | * We only do this if the first pass fails to find any overflowing | ||
1324 | * PMCs because a user might set a period of less than 256 and we | ||
1325 | * don't want to mistakenly reset them. | ||
1326 | */ | ||
1327 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | ||
1328 | return true; | ||
1329 | |||
1330 | return false; | ||
1331 | } | ||
1332 | |||
1333 | /* | ||
1334 | * Performance monitor interrupt stuff | ||
1335 | */ | ||
1336 | static void perf_event_interrupt(struct pt_regs *regs) | ||
1337 | { | ||
1338 | int i; | ||
1339 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
1340 | struct perf_event *event; | ||
1341 | unsigned long val; | ||
1342 | int found = 0; | ||
1343 | int nmi; | ||
1344 | |||
1345 | if (cpuhw->n_limited) | ||
1346 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), | ||
1347 | mfspr(SPRN_PMC6)); | ||
1348 | |||
1349 | perf_read_regs(regs); | ||
1350 | |||
1351 | nmi = perf_intr_is_nmi(regs); | ||
1352 | if (nmi) | ||
1353 | nmi_enter(); | ||
1354 | else | ||
1355 | irq_enter(); | ||
1356 | |||
1357 | for (i = 0; i < cpuhw->n_events; ++i) { | ||
1358 | event = cpuhw->event[i]; | ||
1359 | if (!event->hw.idx || is_limited_pmc(event->hw.idx)) | ||
1360 | continue; | ||
1361 | val = read_pmc(event->hw.idx); | ||
1362 | if ((int)val < 0) { | ||
1363 | /* event has overflowed */ | ||
1364 | found = 1; | ||
1365 | record_and_restart(event, val, regs); | ||
1366 | } | ||
1367 | } | ||
1368 | |||
1369 | /* | ||
1370 | * In case we didn't find and reset the event that caused | ||
1371 | * the interrupt, scan all events and reset any that are | ||
1372 | * negative, to avoid getting continual interrupts. | ||
1373 | * Any that we processed in the previous loop will not be negative. | ||
1374 | */ | ||
1375 | if (!found) { | ||
1376 | for (i = 0; i < ppmu->n_counter; ++i) { | ||
1377 | if (is_limited_pmc(i + 1)) | ||
1378 | continue; | ||
1379 | val = read_pmc(i + 1); | ||
1380 | if (pmc_overflow(val)) | ||
1381 | write_pmc(i + 1, 0); | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | /* | ||
1386 | * Reset MMCR0 to its normal value. This will set PMXE and | ||
1387 | * clear FC (freeze counters) and PMAO (perf mon alert occurred) | ||
1388 | * and thus allow interrupts to occur again. | ||
1389 | * XXX might want to use MSR.PM to keep the events frozen until | ||
1390 | * we get back out of this interrupt. | ||
1391 | */ | ||
1392 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); | ||
1393 | |||
1394 | if (nmi) | ||
1395 | nmi_exit(); | ||
1396 | else | ||
1397 | irq_exit(); | ||
1398 | } | ||
1399 | |||
1400 | static void power_pmu_setup(int cpu) | ||
1401 | { | ||
1402 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | ||
1403 | |||
1404 | if (!ppmu) | ||
1405 | return; | ||
1406 | memset(cpuhw, 0, sizeof(*cpuhw)); | ||
1407 | cpuhw->mmcr[0] = MMCR0_FC; | ||
1408 | } | ||
1409 | |||
1410 | static int __cpuinit | ||
1411 | power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
1412 | { | ||
1413 | unsigned int cpu = (long)hcpu; | ||
1414 | |||
1415 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1416 | case CPU_UP_PREPARE: | ||
1417 | power_pmu_setup(cpu); | ||
1418 | break; | ||
1419 | |||
1420 | default: | ||
1421 | break; | ||
1422 | } | ||
1423 | |||
1424 | return NOTIFY_OK; | ||
1425 | } | ||
1426 | |||
1427 | int __cpuinit register_power_pmu(struct power_pmu *pmu) | ||
1428 | { | ||
1429 | if (ppmu) | ||
1430 | return -EBUSY; /* something's already registered */ | ||
1431 | |||
1432 | ppmu = pmu; | ||
1433 | pr_info("%s performance monitor hardware support registered\n", | ||
1434 | pmu->name); | ||
1435 | |||
1436 | #ifdef MSR_HV | ||
1437 | /* | ||
1438 | * Use FCHV to ignore kernel events if MSR.HV is set. | ||
1439 | */ | ||
1440 | if (mfmsr() & MSR_HV) | ||
1441 | freeze_events_kernel = MMCR0_FCHV; | ||
1442 | #endif /* CONFIG_PPC64 */ | ||
1443 | |||
1444 | perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); | ||
1445 | perf_cpu_notifier(power_pmu_notifier); | ||
1446 | |||
1447 | return 0; | ||
1448 | } | ||