diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/arm/kernel/perf_event.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 2277 |
1 files changed, 2277 insertions, 0 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c new file mode 100644 index 000000000000..9e70f2053f9a --- /dev/null +++ b/arch/arm/kernel/perf_event.c | |||
@@ -0,0 +1,2277 @@ | |||
1 | #undef DEBUG | ||
2 | |||
3 | /* | ||
4 | * ARM performance counter support. | ||
5 | * | ||
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | ||
7 | * | ||
8 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | ||
9 | * 2010 (c) MontaVista Software, LLC. | ||
10 | * | ||
11 | * This code is based on the sparc64 perf event code, which is in turn based | ||
12 | * on the x86 code. Callchain code is based on the ARM OProfile backtrace | ||
13 | * code. | ||
14 | */ | ||
15 | #define pr_fmt(fmt) "hw perfevents: " fmt | ||
16 | |||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/perf_event.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | #include <asm/cputype.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/irq_regs.h> | ||
26 | #include <asm/pmu.h> | ||
27 | #include <asm/stacktrace.h> | ||
28 | |||
29 | static const struct pmu_irqs *pmu_irqs; | ||
30 | |||
31 | /* | ||
32 | * Hardware lock to serialize accesses to PMU registers. Needed for the | ||
33 | * read/modify/write sequences. | ||
34 | */ | ||
35 | DEFINE_SPINLOCK(pmu_lock); | ||
36 | |||
37 | /* | ||
38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add | ||
39 | * another platform that supports more, we need to increase this to be the | ||
40 | * largest of all platforms. | ||
41 | * | ||
42 | * ARMv7 supports up to 32 events: | ||
43 | * cycle counter CCNT + 31 events counters CNT0..30. | ||
44 | * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. | ||
45 | */ | ||
46 | #define ARMPMU_MAX_HWEVENTS 33 | ||
47 | |||
48 | /* The events for a given CPU. */ | ||
49 | struct cpu_hw_events { | ||
50 | /* | ||
51 | * The events that are active on the CPU for the given index. Index 0 | ||
52 | * is reserved. | ||
53 | */ | ||
54 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; | ||
55 | |||
56 | /* | ||
57 | * A 1 bit for an index indicates that the counter is being used for | ||
58 | * an event. A 0 means that the counter can be used. | ||
59 | */ | ||
60 | unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
61 | |||
62 | /* | ||
63 | * A 1 bit for an index indicates that the counter is actively being | ||
64 | * used. | ||
65 | */ | ||
66 | unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
67 | }; | ||
68 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
69 | |||
70 | struct arm_pmu { | ||
71 | char *name; | ||
72 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
73 | void (*enable)(struct hw_perf_event *evt, int idx); | ||
74 | void (*disable)(struct hw_perf_event *evt, int idx); | ||
75 | int (*event_map)(int evt); | ||
76 | u64 (*raw_event)(u64); | ||
77 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | ||
78 | struct hw_perf_event *hwc); | ||
79 | u32 (*read_counter)(int idx); | ||
80 | void (*write_counter)(int idx, u32 val); | ||
81 | void (*start)(void); | ||
82 | void (*stop)(void); | ||
83 | int num_events; | ||
84 | u64 max_period; | ||
85 | }; | ||
86 | |||
87 | /* Set at runtime when we know what CPU type we are. */ | ||
88 | static const struct arm_pmu *armpmu; | ||
89 | |||
90 | #define HW_OP_UNSUPPORTED 0xFFFF | ||
91 | |||
92 | #define C(_x) \ | ||
93 | PERF_COUNT_HW_CACHE_##_x | ||
94 | |||
95 | #define CACHE_OP_UNSUPPORTED 0xFFFF | ||
96 | |||
97 | static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
98 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
99 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
100 | |||
101 | static int | ||
102 | armpmu_map_cache_event(u64 config) | ||
103 | { | ||
104 | unsigned int cache_type, cache_op, cache_result, ret; | ||
105 | |||
106 | cache_type = (config >> 0) & 0xff; | ||
107 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
108 | return -EINVAL; | ||
109 | |||
110 | cache_op = (config >> 8) & 0xff; | ||
111 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
112 | return -EINVAL; | ||
113 | |||
114 | cache_result = (config >> 16) & 0xff; | ||
115 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
116 | return -EINVAL; | ||
117 | |||
118 | ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; | ||
119 | |||
120 | if (ret == CACHE_OP_UNSUPPORTED) | ||
121 | return -ENOENT; | ||
122 | |||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static int | ||
127 | armpmu_event_set_period(struct perf_event *event, | ||
128 | struct hw_perf_event *hwc, | ||
129 | int idx) | ||
130 | { | ||
131 | s64 left = atomic64_read(&hwc->period_left); | ||
132 | s64 period = hwc->sample_period; | ||
133 | int ret = 0; | ||
134 | |||
135 | if (unlikely(left <= -period)) { | ||
136 | left = period; | ||
137 | atomic64_set(&hwc->period_left, left); | ||
138 | hwc->last_period = period; | ||
139 | ret = 1; | ||
140 | } | ||
141 | |||
142 | if (unlikely(left <= 0)) { | ||
143 | left += period; | ||
144 | atomic64_set(&hwc->period_left, left); | ||
145 | hwc->last_period = period; | ||
146 | ret = 1; | ||
147 | } | ||
148 | |||
149 | if (left > (s64)armpmu->max_period) | ||
150 | left = armpmu->max_period; | ||
151 | |||
152 | atomic64_set(&hwc->prev_count, (u64)-left); | ||
153 | |||
154 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | ||
155 | |||
156 | perf_event_update_userpage(event); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | static u64 | ||
162 | armpmu_event_update(struct perf_event *event, | ||
163 | struct hw_perf_event *hwc, | ||
164 | int idx) | ||
165 | { | ||
166 | int shift = 64 - 32; | ||
167 | s64 prev_raw_count, new_raw_count; | ||
168 | s64 delta; | ||
169 | |||
170 | again: | ||
171 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
172 | new_raw_count = armpmu->read_counter(idx); | ||
173 | |||
174 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
175 | new_raw_count) != prev_raw_count) | ||
176 | goto again; | ||
177 | |||
178 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
179 | delta >>= shift; | ||
180 | |||
181 | atomic64_add(delta, &event->count); | ||
182 | atomic64_sub(delta, &hwc->period_left); | ||
183 | |||
184 | return new_raw_count; | ||
185 | } | ||
186 | |||
187 | static void | ||
188 | armpmu_disable(struct perf_event *event) | ||
189 | { | ||
190 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
191 | struct hw_perf_event *hwc = &event->hw; | ||
192 | int idx = hwc->idx; | ||
193 | |||
194 | WARN_ON(idx < 0); | ||
195 | |||
196 | clear_bit(idx, cpuc->active_mask); | ||
197 | armpmu->disable(hwc, idx); | ||
198 | |||
199 | barrier(); | ||
200 | |||
201 | armpmu_event_update(event, hwc, idx); | ||
202 | cpuc->events[idx] = NULL; | ||
203 | clear_bit(idx, cpuc->used_mask); | ||
204 | |||
205 | perf_event_update_userpage(event); | ||
206 | } | ||
207 | |||
208 | static void | ||
209 | armpmu_read(struct perf_event *event) | ||
210 | { | ||
211 | struct hw_perf_event *hwc = &event->hw; | ||
212 | |||
213 | /* Don't read disabled counters! */ | ||
214 | if (hwc->idx < 0) | ||
215 | return; | ||
216 | |||
217 | armpmu_event_update(event, hwc, hwc->idx); | ||
218 | } | ||
219 | |||
220 | static void | ||
221 | armpmu_unthrottle(struct perf_event *event) | ||
222 | { | ||
223 | struct hw_perf_event *hwc = &event->hw; | ||
224 | |||
225 | /* | ||
226 | * Set the period again. Some counters can't be stopped, so when we | ||
227 | * were throttled we simply disabled the IRQ source and the counter | ||
228 | * may have been left counting. If we don't do this step then we may | ||
229 | * get an interrupt too soon or *way* too late if the overflow has | ||
230 | * happened since disabling. | ||
231 | */ | ||
232 | armpmu_event_set_period(event, hwc, hwc->idx); | ||
233 | armpmu->enable(hwc, hwc->idx); | ||
234 | } | ||
235 | |||
236 | static int | ||
237 | armpmu_enable(struct perf_event *event) | ||
238 | { | ||
239 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
240 | struct hw_perf_event *hwc = &event->hw; | ||
241 | int idx; | ||
242 | int err = 0; | ||
243 | |||
244 | /* If we don't have a space for the counter then finish early. */ | ||
245 | idx = armpmu->get_event_idx(cpuc, hwc); | ||
246 | if (idx < 0) { | ||
247 | err = idx; | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * If there is an event in the counter we are going to use then make | ||
253 | * sure it is disabled. | ||
254 | */ | ||
255 | event->hw.idx = idx; | ||
256 | armpmu->disable(hwc, idx); | ||
257 | cpuc->events[idx] = event; | ||
258 | set_bit(idx, cpuc->active_mask); | ||
259 | |||
260 | /* Set the period for the event. */ | ||
261 | armpmu_event_set_period(event, hwc, idx); | ||
262 | |||
263 | /* Enable the event. */ | ||
264 | armpmu->enable(hwc, idx); | ||
265 | |||
266 | /* Propagate our changes to the userspace mapping. */ | ||
267 | perf_event_update_userpage(event); | ||
268 | |||
269 | out: | ||
270 | return err; | ||
271 | } | ||
272 | |||
273 | static struct pmu pmu = { | ||
274 | .enable = armpmu_enable, | ||
275 | .disable = armpmu_disable, | ||
276 | .unthrottle = armpmu_unthrottle, | ||
277 | .read = armpmu_read, | ||
278 | }; | ||
279 | |||
280 | static int | ||
281 | validate_event(struct cpu_hw_events *cpuc, | ||
282 | struct perf_event *event) | ||
283 | { | ||
284 | struct hw_perf_event fake_event = event->hw; | ||
285 | |||
286 | if (event->pmu && event->pmu != &pmu) | ||
287 | return 0; | ||
288 | |||
289 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | ||
290 | } | ||
291 | |||
292 | static int | ||
293 | validate_group(struct perf_event *event) | ||
294 | { | ||
295 | struct perf_event *sibling, *leader = event->group_leader; | ||
296 | struct cpu_hw_events fake_pmu; | ||
297 | |||
298 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | ||
299 | |||
300 | if (!validate_event(&fake_pmu, leader)) | ||
301 | return -ENOSPC; | ||
302 | |||
303 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
304 | if (!validate_event(&fake_pmu, sibling)) | ||
305 | return -ENOSPC; | ||
306 | } | ||
307 | |||
308 | if (!validate_event(&fake_pmu, event)) | ||
309 | return -ENOSPC; | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static int | ||
315 | armpmu_reserve_hardware(void) | ||
316 | { | ||
317 | int i; | ||
318 | int err; | ||
319 | |||
320 | pmu_irqs = reserve_pmu(); | ||
321 | if (IS_ERR(pmu_irqs)) { | ||
322 | pr_warning("unable to reserve pmu\n"); | ||
323 | return PTR_ERR(pmu_irqs); | ||
324 | } | ||
325 | |||
326 | init_pmu(); | ||
327 | |||
328 | if (pmu_irqs->num_irqs < 1) { | ||
329 | pr_err("no irqs for PMUs defined\n"); | ||
330 | return -ENODEV; | ||
331 | } | ||
332 | |||
333 | for (i = 0; i < pmu_irqs->num_irqs; ++i) { | ||
334 | err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, | ||
335 | IRQF_DISABLED | IRQF_NOBALANCING, | ||
336 | "armpmu", NULL); | ||
337 | if (err) { | ||
338 | pr_warning("unable to request IRQ%d for ARM " | ||
339 | "perf counters\n", pmu_irqs->irqs[i]); | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | if (err) { | ||
345 | for (i = i - 1; i >= 0; --i) | ||
346 | free_irq(pmu_irqs->irqs[i], NULL); | ||
347 | release_pmu(pmu_irqs); | ||
348 | pmu_irqs = NULL; | ||
349 | } | ||
350 | |||
351 | return err; | ||
352 | } | ||
353 | |||
354 | static void | ||
355 | armpmu_release_hardware(void) | ||
356 | { | ||
357 | int i; | ||
358 | |||
359 | for (i = pmu_irqs->num_irqs - 1; i >= 0; --i) | ||
360 | free_irq(pmu_irqs->irqs[i], NULL); | ||
361 | armpmu->stop(); | ||
362 | |||
363 | release_pmu(pmu_irqs); | ||
364 | pmu_irqs = NULL; | ||
365 | } | ||
366 | |||
367 | static atomic_t active_events = ATOMIC_INIT(0); | ||
368 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
369 | |||
370 | static void | ||
371 | hw_perf_event_destroy(struct perf_event *event) | ||
372 | { | ||
373 | if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { | ||
374 | armpmu_release_hardware(); | ||
375 | mutex_unlock(&pmu_reserve_mutex); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | static int | ||
380 | __hw_perf_event_init(struct perf_event *event) | ||
381 | { | ||
382 | struct hw_perf_event *hwc = &event->hw; | ||
383 | int mapping, err; | ||
384 | |||
385 | /* Decode the generic type into an ARM event identifier. */ | ||
386 | if (PERF_TYPE_HARDWARE == event->attr.type) { | ||
387 | mapping = armpmu->event_map(event->attr.config); | ||
388 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | ||
389 | mapping = armpmu_map_cache_event(event->attr.config); | ||
390 | } else if (PERF_TYPE_RAW == event->attr.type) { | ||
391 | mapping = armpmu->raw_event(event->attr.config); | ||
392 | } else { | ||
393 | pr_debug("event type %x not supported\n", event->attr.type); | ||
394 | return -EOPNOTSUPP; | ||
395 | } | ||
396 | |||
397 | if (mapping < 0) { | ||
398 | pr_debug("event %x:%llx not supported\n", event->attr.type, | ||
399 | event->attr.config); | ||
400 | return mapping; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Check whether we need to exclude the counter from certain modes. | ||
405 | * The ARM performance counters are on all of the time so if someone | ||
406 | * has asked us for some excludes then we have to fail. | ||
407 | */ | ||
408 | if (event->attr.exclude_kernel || event->attr.exclude_user || | ||
409 | event->attr.exclude_hv || event->attr.exclude_idle) { | ||
410 | pr_debug("ARM performance counters do not support " | ||
411 | "mode exclusion\n"); | ||
412 | return -EPERM; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * We don't assign an index until we actually place the event onto | ||
417 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
418 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
419 | * clever allocation or constraints checking at this point. | ||
420 | */ | ||
421 | hwc->idx = -1; | ||
422 | |||
423 | /* | ||
424 | * Store the event encoding into the config_base field. config and | ||
425 | * event_base are unused as the only 2 things we need to know are | ||
426 | * the event mapping and the counter to use. The counter to use is | ||
427 | * also the indx and the config_base is the event type. | ||
428 | */ | ||
429 | hwc->config_base = (unsigned long)mapping; | ||
430 | hwc->config = 0; | ||
431 | hwc->event_base = 0; | ||
432 | |||
433 | if (!hwc->sample_period) { | ||
434 | hwc->sample_period = armpmu->max_period; | ||
435 | hwc->last_period = hwc->sample_period; | ||
436 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
437 | } | ||
438 | |||
439 | err = 0; | ||
440 | if (event->group_leader != event) { | ||
441 | err = validate_group(event); | ||
442 | if (err) | ||
443 | return -EINVAL; | ||
444 | } | ||
445 | |||
446 | return err; | ||
447 | } | ||
448 | |||
449 | const struct pmu * | ||
450 | hw_perf_event_init(struct perf_event *event) | ||
451 | { | ||
452 | int err = 0; | ||
453 | |||
454 | if (!armpmu) | ||
455 | return ERR_PTR(-ENODEV); | ||
456 | |||
457 | event->destroy = hw_perf_event_destroy; | ||
458 | |||
459 | if (!atomic_inc_not_zero(&active_events)) { | ||
460 | if (atomic_read(&active_events) > perf_max_events) { | ||
461 | atomic_dec(&active_events); | ||
462 | return ERR_PTR(-ENOSPC); | ||
463 | } | ||
464 | |||
465 | mutex_lock(&pmu_reserve_mutex); | ||
466 | if (atomic_read(&active_events) == 0) { | ||
467 | err = armpmu_reserve_hardware(); | ||
468 | } | ||
469 | |||
470 | if (!err) | ||
471 | atomic_inc(&active_events); | ||
472 | mutex_unlock(&pmu_reserve_mutex); | ||
473 | } | ||
474 | |||
475 | if (err) | ||
476 | return ERR_PTR(err); | ||
477 | |||
478 | err = __hw_perf_event_init(event); | ||
479 | if (err) | ||
480 | hw_perf_event_destroy(event); | ||
481 | |||
482 | return err ? ERR_PTR(err) : &pmu; | ||
483 | } | ||
484 | |||
485 | void | ||
486 | hw_perf_enable(void) | ||
487 | { | ||
488 | /* Enable all of the perf events on hardware. */ | ||
489 | int idx; | ||
490 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
491 | |||
492 | if (!armpmu) | ||
493 | return; | ||
494 | |||
495 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
496 | struct perf_event *event = cpuc->events[idx]; | ||
497 | |||
498 | if (!event) | ||
499 | continue; | ||
500 | |||
501 | armpmu->enable(&event->hw, idx); | ||
502 | } | ||
503 | |||
504 | armpmu->start(); | ||
505 | } | ||
506 | |||
507 | void | ||
508 | hw_perf_disable(void) | ||
509 | { | ||
510 | if (armpmu) | ||
511 | armpmu->stop(); | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * ARMv6 Performance counter handling code. | ||
516 | * | ||
517 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | ||
518 | * They all share a single reset bit but can be written to zero so we can use | ||
519 | * that for a reset. | ||
520 | * | ||
521 | * The counters can't be individually enabled or disabled so when we remove | ||
522 | * one event and replace it with another we could get spurious counts from the | ||
523 | * wrong event. However, we can take advantage of the fact that the | ||
524 | * performance counters can export events to the event bus, and the event bus | ||
525 | * itself can be monitored. This requires that we *don't* export the events to | ||
526 | * the event bus. The procedure for disabling a configurable counter is: | ||
527 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | ||
528 | * effectively stops the counter from counting. | ||
529 | * - disable the counter's interrupt generation (each counter has it's | ||
530 | * own interrupt enable bit). | ||
531 | * Once stopped, the counter value can be written as 0 to reset. | ||
532 | * | ||
533 | * To enable a counter: | ||
534 | * - enable the counter's interrupt generation. | ||
535 | * - set the new event type. | ||
536 | * | ||
537 | * Note: the dedicated cycle counter only counts cycles and can't be | ||
538 | * enabled/disabled independently of the others. When we want to disable the | ||
539 | * cycle counter, we have to just disable the interrupt reporting and start | ||
540 | * ignoring that counter. When re-enabling, we have to reset the value and | ||
541 | * enable the interrupt. | ||
542 | */ | ||
543 | |||
544 | enum armv6_perf_types { | ||
545 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | ||
546 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | ||
547 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | ||
548 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | ||
549 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | ||
550 | ARMV6_PERFCTR_BR_EXEC = 0x5, | ||
551 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | ||
552 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | ||
553 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | ||
554 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | ||
555 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | ||
556 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | ||
557 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | ||
558 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | ||
559 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | ||
560 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | ||
561 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | ||
562 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | ||
563 | ARMV6_PERFCTR_NOP = 0x20, | ||
564 | }; | ||
565 | |||
566 | enum armv6_counters { | ||
567 | ARMV6_CYCLE_COUNTER = 1, | ||
568 | ARMV6_COUNTER0, | ||
569 | ARMV6_COUNTER1, | ||
570 | }; | ||
571 | |||
572 | /* | ||
573 | * The hardware events that we support. We do support cache operations but | ||
574 | * we have harvard caches and no way to combine instruction and data | ||
575 | * accesses/misses in hardware. | ||
576 | */ | ||
577 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | ||
578 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, | ||
579 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | ||
580 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
581 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
582 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, | ||
583 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | ||
584 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
585 | }; | ||
586 | |||
587 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
588 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
589 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
590 | [C(L1D)] = { | ||
591 | /* | ||
592 | * The performance counters don't differentiate between read | ||
593 | * and write accesses/misses so this isn't strictly correct, | ||
594 | * but it's the best we can do. Writes and reads get | ||
595 | * combined. | ||
596 | */ | ||
597 | [C(OP_READ)] = { | ||
598 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
599 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
600 | }, | ||
601 | [C(OP_WRITE)] = { | ||
602 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
603 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
604 | }, | ||
605 | [C(OP_PREFETCH)] = { | ||
606 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
607 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
608 | }, | ||
609 | }, | ||
610 | [C(L1I)] = { | ||
611 | [C(OP_READ)] = { | ||
612 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
613 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
614 | }, | ||
615 | [C(OP_WRITE)] = { | ||
616 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
617 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
618 | }, | ||
619 | [C(OP_PREFETCH)] = { | ||
620 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
621 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
622 | }, | ||
623 | }, | ||
624 | [C(LL)] = { | ||
625 | [C(OP_READ)] = { | ||
626 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
627 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
628 | }, | ||
629 | [C(OP_WRITE)] = { | ||
630 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
631 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
632 | }, | ||
633 | [C(OP_PREFETCH)] = { | ||
634 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
635 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
636 | }, | ||
637 | }, | ||
638 | [C(DTLB)] = { | ||
639 | /* | ||
640 | * The ARM performance counters can count micro DTLB misses, | ||
641 | * micro ITLB misses and main TLB misses. There isn't an event | ||
642 | * for TLB misses, so use the micro misses here and if users | ||
643 | * want the main TLB misses they can use a raw counter. | ||
644 | */ | ||
645 | [C(OP_READ)] = { | ||
646 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
647 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
648 | }, | ||
649 | [C(OP_WRITE)] = { | ||
650 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
651 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
652 | }, | ||
653 | [C(OP_PREFETCH)] = { | ||
654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
656 | }, | ||
657 | }, | ||
658 | [C(ITLB)] = { | ||
659 | [C(OP_READ)] = { | ||
660 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
661 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
662 | }, | ||
663 | [C(OP_WRITE)] = { | ||
664 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
665 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
666 | }, | ||
667 | [C(OP_PREFETCH)] = { | ||
668 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
669 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
670 | }, | ||
671 | }, | ||
672 | [C(BPU)] = { | ||
673 | [C(OP_READ)] = { | ||
674 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
675 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
676 | }, | ||
677 | [C(OP_WRITE)] = { | ||
678 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
679 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
680 | }, | ||
681 | [C(OP_PREFETCH)] = { | ||
682 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
683 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
684 | }, | ||
685 | }, | ||
686 | }; | ||
687 | |||
688 | enum armv6mpcore_perf_types { | ||
689 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | ||
690 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | ||
691 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | ||
692 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | ||
693 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | ||
694 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | ||
695 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | ||
696 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | ||
697 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | ||
698 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | ||
699 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | ||
700 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | ||
701 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | ||
702 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | ||
703 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | ||
704 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | ||
705 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | ||
706 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | ||
707 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | ||
708 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | ||
709 | }; | ||
710 | |||
711 | /* | ||
712 | * The hardware events that we support. We do support cache operations but | ||
713 | * we have harvard caches and no way to combine instruction and data | ||
714 | * accesses/misses in hardware. | ||
715 | */ | ||
716 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | ||
717 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, | ||
718 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | ||
719 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
720 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
721 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, | ||
722 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | ||
723 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
724 | }; | ||
725 | |||
726 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
727 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
728 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
729 | [C(L1D)] = { | ||
730 | [C(OP_READ)] = { | ||
731 | [C(RESULT_ACCESS)] = | ||
732 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | ||
733 | [C(RESULT_MISS)] = | ||
734 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | ||
735 | }, | ||
736 | [C(OP_WRITE)] = { | ||
737 | [C(RESULT_ACCESS)] = | ||
738 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | ||
739 | [C(RESULT_MISS)] = | ||
740 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | ||
741 | }, | ||
742 | [C(OP_PREFETCH)] = { | ||
743 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
744 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
745 | }, | ||
746 | }, | ||
747 | [C(L1I)] = { | ||
748 | [C(OP_READ)] = { | ||
749 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
750 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
751 | }, | ||
752 | [C(OP_WRITE)] = { | ||
753 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
754 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
755 | }, | ||
756 | [C(OP_PREFETCH)] = { | ||
757 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
758 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
759 | }, | ||
760 | }, | ||
761 | [C(LL)] = { | ||
762 | [C(OP_READ)] = { | ||
763 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
764 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
765 | }, | ||
766 | [C(OP_WRITE)] = { | ||
767 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
768 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
769 | }, | ||
770 | [C(OP_PREFETCH)] = { | ||
771 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
772 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
773 | }, | ||
774 | }, | ||
775 | [C(DTLB)] = { | ||
776 | /* | ||
777 | * The ARM performance counters can count micro DTLB misses, | ||
778 | * micro ITLB misses and main TLB misses. There isn't an event | ||
779 | * for TLB misses, so use the micro misses here and if users | ||
780 | * want the main TLB misses they can use a raw counter. | ||
781 | */ | ||
782 | [C(OP_READ)] = { | ||
783 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
784 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
785 | }, | ||
786 | [C(OP_WRITE)] = { | ||
787 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
788 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
789 | }, | ||
790 | [C(OP_PREFETCH)] = { | ||
791 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
792 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
793 | }, | ||
794 | }, | ||
795 | [C(ITLB)] = { | ||
796 | [C(OP_READ)] = { | ||
797 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
798 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
799 | }, | ||
800 | [C(OP_WRITE)] = { | ||
801 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
802 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
803 | }, | ||
804 | [C(OP_PREFETCH)] = { | ||
805 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
806 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
807 | }, | ||
808 | }, | ||
809 | [C(BPU)] = { | ||
810 | [C(OP_READ)] = { | ||
811 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
812 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
813 | }, | ||
814 | [C(OP_WRITE)] = { | ||
815 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
816 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
817 | }, | ||
818 | [C(OP_PREFETCH)] = { | ||
819 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
820 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
821 | }, | ||
822 | }, | ||
823 | }; | ||
824 | |||
825 | static inline unsigned long | ||
826 | armv6_pmcr_read(void) | ||
827 | { | ||
828 | u32 val; | ||
829 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | ||
830 | return val; | ||
831 | } | ||
832 | |||
833 | static inline void | ||
834 | armv6_pmcr_write(unsigned long val) | ||
835 | { | ||
836 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | ||
837 | } | ||
838 | |||
839 | #define ARMV6_PMCR_ENABLE (1 << 0) | ||
840 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | ||
841 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | ||
842 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | ||
843 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | ||
844 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | ||
845 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | ||
846 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | ||
847 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | ||
848 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | ||
849 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | ||
850 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | ||
851 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | ||
852 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | ||
853 | |||
854 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | ||
855 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | ||
856 | ARMV6_PMCR_CCOUNT_OVERFLOW) | ||
857 | |||
858 | static inline int | ||
859 | armv6_pmcr_has_overflowed(unsigned long pmcr) | ||
860 | { | ||
861 | return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK); | ||
862 | } | ||
863 | |||
864 | static inline int | ||
865 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | ||
866 | enum armv6_counters counter) | ||
867 | { | ||
868 | int ret = 0; | ||
869 | |||
870 | if (ARMV6_CYCLE_COUNTER == counter) | ||
871 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | ||
872 | else if (ARMV6_COUNTER0 == counter) | ||
873 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | ||
874 | else if (ARMV6_COUNTER1 == counter) | ||
875 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | ||
876 | else | ||
877 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
878 | |||
879 | return ret; | ||
880 | } | ||
881 | |||
882 | static inline u32 | ||
883 | armv6pmu_read_counter(int counter) | ||
884 | { | ||
885 | unsigned long value = 0; | ||
886 | |||
887 | if (ARMV6_CYCLE_COUNTER == counter) | ||
888 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | ||
889 | else if (ARMV6_COUNTER0 == counter) | ||
890 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | ||
891 | else if (ARMV6_COUNTER1 == counter) | ||
892 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | ||
893 | else | ||
894 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
895 | |||
896 | return value; | ||
897 | } | ||
898 | |||
899 | static inline void | ||
900 | armv6pmu_write_counter(int counter, | ||
901 | u32 value) | ||
902 | { | ||
903 | if (ARMV6_CYCLE_COUNTER == counter) | ||
904 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | ||
905 | else if (ARMV6_COUNTER0 == counter) | ||
906 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | ||
907 | else if (ARMV6_COUNTER1 == counter) | ||
908 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | ||
909 | else | ||
910 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
911 | } | ||
912 | |||
913 | void | ||
914 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
915 | int idx) | ||
916 | { | ||
917 | unsigned long val, mask, evt, flags; | ||
918 | |||
919 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
920 | mask = 0; | ||
921 | evt = ARMV6_PMCR_CCOUNT_IEN; | ||
922 | } else if (ARMV6_COUNTER0 == idx) { | ||
923 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | ||
924 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | ||
925 | ARMV6_PMCR_COUNT0_IEN; | ||
926 | } else if (ARMV6_COUNTER1 == idx) { | ||
927 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | ||
928 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | ||
929 | ARMV6_PMCR_COUNT1_IEN; | ||
930 | } else { | ||
931 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
932 | return; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * Mask out the current event and set the counter to count the event | ||
937 | * that we're interested in. | ||
938 | */ | ||
939 | spin_lock_irqsave(&pmu_lock, flags); | ||
940 | val = armv6_pmcr_read(); | ||
941 | val &= ~mask; | ||
942 | val |= evt; | ||
943 | armv6_pmcr_write(val); | ||
944 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
945 | } | ||
946 | |||
947 | static irqreturn_t | ||
948 | armv6pmu_handle_irq(int irq_num, | ||
949 | void *dev) | ||
950 | { | ||
951 | unsigned long pmcr = armv6_pmcr_read(); | ||
952 | struct perf_sample_data data; | ||
953 | struct cpu_hw_events *cpuc; | ||
954 | struct pt_regs *regs; | ||
955 | int idx; | ||
956 | |||
957 | if (!armv6_pmcr_has_overflowed(pmcr)) | ||
958 | return IRQ_NONE; | ||
959 | |||
960 | regs = get_irq_regs(); | ||
961 | |||
962 | /* | ||
963 | * The interrupts are cleared by writing the overflow flags back to | ||
964 | * the control register. All of the other bits don't have any effect | ||
965 | * if they are rewritten, so write the whole value back. | ||
966 | */ | ||
967 | armv6_pmcr_write(pmcr); | ||
968 | |||
969 | perf_sample_data_init(&data, 0); | ||
970 | |||
971 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
972 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
973 | struct perf_event *event = cpuc->events[idx]; | ||
974 | struct hw_perf_event *hwc; | ||
975 | |||
976 | if (!test_bit(idx, cpuc->active_mask)) | ||
977 | continue; | ||
978 | |||
979 | /* | ||
980 | * We have a single interrupt for all counters. Check that | ||
981 | * each counter has overflowed before we process it. | ||
982 | */ | ||
983 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | ||
984 | continue; | ||
985 | |||
986 | hwc = &event->hw; | ||
987 | armpmu_event_update(event, hwc, idx); | ||
988 | data.period = event->hw.last_period; | ||
989 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
990 | continue; | ||
991 | |||
992 | if (perf_event_overflow(event, 0, &data, regs)) | ||
993 | armpmu->disable(hwc, idx); | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Handle the pending perf events. | ||
998 | * | ||
999 | * Note: this call *must* be run with interrupts enabled. For | ||
1000 | * platforms that can have the PMU interrupts raised as a PMI, this | ||
1001 | * will not work. | ||
1002 | */ | ||
1003 | perf_event_do_pending(); | ||
1004 | |||
1005 | return IRQ_HANDLED; | ||
1006 | } | ||
1007 | |||
1008 | static void | ||
1009 | armv6pmu_start(void) | ||
1010 | { | ||
1011 | unsigned long flags, val; | ||
1012 | |||
1013 | spin_lock_irqsave(&pmu_lock, flags); | ||
1014 | val = armv6_pmcr_read(); | ||
1015 | val |= ARMV6_PMCR_ENABLE; | ||
1016 | armv6_pmcr_write(val); | ||
1017 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1018 | } | ||
1019 | |||
1020 | void | ||
1021 | armv6pmu_stop(void) | ||
1022 | { | ||
1023 | unsigned long flags, val; | ||
1024 | |||
1025 | spin_lock_irqsave(&pmu_lock, flags); | ||
1026 | val = armv6_pmcr_read(); | ||
1027 | val &= ~ARMV6_PMCR_ENABLE; | ||
1028 | armv6_pmcr_write(val); | ||
1029 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1030 | } | ||
1031 | |||
1032 | static inline int | ||
1033 | armv6pmu_event_map(int config) | ||
1034 | { | ||
1035 | int mapping = armv6_perf_map[config]; | ||
1036 | if (HW_OP_UNSUPPORTED == mapping) | ||
1037 | mapping = -EOPNOTSUPP; | ||
1038 | return mapping; | ||
1039 | } | ||
1040 | |||
1041 | static inline int | ||
1042 | armv6mpcore_pmu_event_map(int config) | ||
1043 | { | ||
1044 | int mapping = armv6mpcore_perf_map[config]; | ||
1045 | if (HW_OP_UNSUPPORTED == mapping) | ||
1046 | mapping = -EOPNOTSUPP; | ||
1047 | return mapping; | ||
1048 | } | ||
1049 | |||
1050 | static u64 | ||
1051 | armv6pmu_raw_event(u64 config) | ||
1052 | { | ||
1053 | return config & 0xff; | ||
1054 | } | ||
1055 | |||
1056 | static int | ||
1057 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
1058 | struct hw_perf_event *event) | ||
1059 | { | ||
1060 | /* Always place a cycle counter into the cycle counter. */ | ||
1061 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | ||
1062 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | ||
1063 | return -EAGAIN; | ||
1064 | |||
1065 | return ARMV6_CYCLE_COUNTER; | ||
1066 | } else { | ||
1067 | /* | ||
1068 | * For anything other than a cycle counter, try and use | ||
1069 | * counter0 and counter1. | ||
1070 | */ | ||
1071 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) { | ||
1072 | return ARMV6_COUNTER1; | ||
1073 | } | ||
1074 | |||
1075 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) { | ||
1076 | return ARMV6_COUNTER0; | ||
1077 | } | ||
1078 | |||
1079 | /* The counters are all in use. */ | ||
1080 | return -EAGAIN; | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | static void | ||
1085 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
1086 | int idx) | ||
1087 | { | ||
1088 | unsigned long val, mask, evt, flags; | ||
1089 | |||
1090 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1091 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
1092 | evt = 0; | ||
1093 | } else if (ARMV6_COUNTER0 == idx) { | ||
1094 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | ||
1095 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | ||
1096 | } else if (ARMV6_COUNTER1 == idx) { | ||
1097 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | ||
1098 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | ||
1099 | } else { | ||
1100 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1101 | return; | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | * Mask out the current event and set the counter to count the number | ||
1106 | * of ETM bus signal assertion cycles. The external reporting should | ||
1107 | * be disabled and so this should never increment. | ||
1108 | */ | ||
1109 | spin_lock_irqsave(&pmu_lock, flags); | ||
1110 | val = armv6_pmcr_read(); | ||
1111 | val &= ~mask; | ||
1112 | val |= evt; | ||
1113 | armv6_pmcr_write(val); | ||
1114 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1115 | } | ||
1116 | |||
1117 | static void | ||
1118 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
1119 | int idx) | ||
1120 | { | ||
1121 | unsigned long val, mask, flags, evt = 0; | ||
1122 | |||
1123 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1124 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
1125 | } else if (ARMV6_COUNTER0 == idx) { | ||
1126 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
1127 | } else if (ARMV6_COUNTER1 == idx) { | ||
1128 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
1129 | } else { | ||
1130 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1131 | return; | ||
1132 | } | ||
1133 | |||
1134 | /* | ||
1135 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | ||
1136 | * simply disable the interrupt reporting. | ||
1137 | */ | ||
1138 | spin_lock_irqsave(&pmu_lock, flags); | ||
1139 | val = armv6_pmcr_read(); | ||
1140 | val &= ~mask; | ||
1141 | val |= evt; | ||
1142 | armv6_pmcr_write(val); | ||
1143 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1144 | } | ||
1145 | |||
1146 | static const struct arm_pmu armv6pmu = { | ||
1147 | .name = "v6", | ||
1148 | .handle_irq = armv6pmu_handle_irq, | ||
1149 | .enable = armv6pmu_enable_event, | ||
1150 | .disable = armv6pmu_disable_event, | ||
1151 | .event_map = armv6pmu_event_map, | ||
1152 | .raw_event = armv6pmu_raw_event, | ||
1153 | .read_counter = armv6pmu_read_counter, | ||
1154 | .write_counter = armv6pmu_write_counter, | ||
1155 | .get_event_idx = armv6pmu_get_event_idx, | ||
1156 | .start = armv6pmu_start, | ||
1157 | .stop = armv6pmu_stop, | ||
1158 | .num_events = 3, | ||
1159 | .max_period = (1LLU << 32) - 1, | ||
1160 | }; | ||
1161 | |||
1162 | /* | ||
1163 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | ||
1164 | * that some of the events have different enumerations and that there is no | ||
1165 | * *hack* to stop the programmable counters. To stop the counters we simply | ||
1166 | * disable the interrupt reporting and update the event. When unthrottling we | ||
1167 | * reset the period and enable the interrupt reporting. | ||
1168 | */ | ||
1169 | static const struct arm_pmu armv6mpcore_pmu = { | ||
1170 | .name = "v6mpcore", | ||
1171 | .handle_irq = armv6pmu_handle_irq, | ||
1172 | .enable = armv6pmu_enable_event, | ||
1173 | .disable = armv6mpcore_pmu_disable_event, | ||
1174 | .event_map = armv6mpcore_pmu_event_map, | ||
1175 | .raw_event = armv6pmu_raw_event, | ||
1176 | .read_counter = armv6pmu_read_counter, | ||
1177 | .write_counter = armv6pmu_write_counter, | ||
1178 | .get_event_idx = armv6pmu_get_event_idx, | ||
1179 | .start = armv6pmu_start, | ||
1180 | .stop = armv6pmu_stop, | ||
1181 | .num_events = 3, | ||
1182 | .max_period = (1LLU << 32) - 1, | ||
1183 | }; | ||
1184 | |||
1185 | /* | ||
1186 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | ||
1187 | * | ||
1188 | * Copied from ARMv6 code, with the low level code inspired | ||
1189 | * by the ARMv7 Oprofile code. | ||
1190 | * | ||
1191 | * Cortex-A8 has up to 4 configurable performance counters and | ||
1192 | * a single cycle counter. | ||
1193 | * Cortex-A9 has up to 31 configurable performance counters and | ||
1194 | * a single cycle counter. | ||
1195 | * | ||
1196 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | ||
1197 | * counter and all 4 performance counters together can be reset separately. | ||
1198 | */ | ||
1199 | |||
1200 | #define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8" | ||
1201 | |||
1202 | #define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9" | ||
1203 | |||
1204 | /* Common ARMv7 event types */ | ||
1205 | enum armv7_perf_types { | ||
1206 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | ||
1207 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | ||
1208 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | ||
1209 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | ||
1210 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | ||
1211 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | ||
1212 | ARMV7_PERFCTR_DREAD = 0x06, | ||
1213 | ARMV7_PERFCTR_DWRITE = 0x07, | ||
1214 | |||
1215 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | ||
1216 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | ||
1217 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | ||
1218 | /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | ||
1219 | * It counts: | ||
1220 | * - all branch instructions, | ||
1221 | * - instructions that explicitly write the PC, | ||
1222 | * - exception generating instructions. | ||
1223 | */ | ||
1224 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | ||
1225 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | ||
1226 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | ||
1227 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | ||
1228 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | ||
1229 | |||
1230 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | ||
1231 | |||
1232 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | ||
1233 | }; | ||
1234 | |||
1235 | /* ARMv7 Cortex-A8 specific event types */ | ||
1236 | enum armv7_a8_perf_types { | ||
1237 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
1238 | |||
1239 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
1240 | |||
1241 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | ||
1242 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | ||
1243 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | ||
1244 | ARMV7_PERFCTR_L2_ACCESS = 0x43, | ||
1245 | ARMV7_PERFCTR_L2_CACH_MISS = 0x44, | ||
1246 | ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, | ||
1247 | ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, | ||
1248 | ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, | ||
1249 | ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, | ||
1250 | ARMV7_PERFCTR_L1_DATA_MISS = 0x49, | ||
1251 | ARMV7_PERFCTR_L1_INST_MISS = 0x4A, | ||
1252 | ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, | ||
1253 | ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, | ||
1254 | ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, | ||
1255 | ARMV7_PERFCTR_L2_NEON = 0x4E, | ||
1256 | ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, | ||
1257 | ARMV7_PERFCTR_L1_INST = 0x50, | ||
1258 | ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, | ||
1259 | ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, | ||
1260 | ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, | ||
1261 | ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, | ||
1262 | ARMV7_PERFCTR_OP_EXECUTED = 0x55, | ||
1263 | ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, | ||
1264 | ARMV7_PERFCTR_CYCLES_INST = 0x57, | ||
1265 | ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, | ||
1266 | ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, | ||
1267 | ARMV7_PERFCTR_NEON_CYCLES = 0x5A, | ||
1268 | |||
1269 | ARMV7_PERFCTR_PMU0_EVENTS = 0x70, | ||
1270 | ARMV7_PERFCTR_PMU1_EVENTS = 0x71, | ||
1271 | ARMV7_PERFCTR_PMU_EVENTS = 0x72, | ||
1272 | }; | ||
1273 | |||
1274 | /* ARMv7 Cortex-A9 specific event types */ | ||
1275 | enum armv7_a9_perf_types { | ||
1276 | ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, | ||
1277 | ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, | ||
1278 | ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, | ||
1279 | |||
1280 | ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, | ||
1281 | ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, | ||
1282 | |||
1283 | ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, | ||
1284 | ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, | ||
1285 | ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, | ||
1286 | ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, | ||
1287 | ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, | ||
1288 | ARMV7_PERFCTR_DATA_EVICTION = 0x65, | ||
1289 | ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, | ||
1290 | ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, | ||
1291 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, | ||
1292 | |||
1293 | ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, | ||
1294 | |||
1295 | ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, | ||
1296 | ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, | ||
1297 | ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, | ||
1298 | ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, | ||
1299 | ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, | ||
1300 | |||
1301 | ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, | ||
1302 | ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, | ||
1303 | ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, | ||
1304 | ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, | ||
1305 | ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, | ||
1306 | ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, | ||
1307 | ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, | ||
1308 | |||
1309 | ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, | ||
1310 | ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, | ||
1311 | |||
1312 | ARMV7_PERFCTR_ISB_INST = 0x90, | ||
1313 | ARMV7_PERFCTR_DSB_INST = 0x91, | ||
1314 | ARMV7_PERFCTR_DMB_INST = 0x92, | ||
1315 | ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, | ||
1316 | |||
1317 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, | ||
1318 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, | ||
1319 | ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, | ||
1320 | ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, | ||
1321 | ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, | ||
1322 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | ||
1323 | }; | ||
1324 | |||
1325 | /* | ||
1326 | * Cortex-A8 HW events mapping | ||
1327 | * | ||
1328 | * The hardware events that we support. We do support cache operations but | ||
1329 | * we have harvard caches and no way to combine instruction and data | ||
1330 | * accesses/misses in hardware. | ||
1331 | */ | ||
1332 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | ||
1333 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
1334 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
1335 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
1336 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
1337 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
1338 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1339 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
1340 | }; | ||
1341 | |||
1342 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
1343 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1344 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1345 | [C(L1D)] = { | ||
1346 | /* | ||
1347 | * The performance counters don't differentiate between read | ||
1348 | * and write accesses/misses so this isn't strictly correct, | ||
1349 | * but it's the best we can do. Writes and reads get | ||
1350 | * combined. | ||
1351 | */ | ||
1352 | [C(OP_READ)] = { | ||
1353 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1354 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1355 | }, | ||
1356 | [C(OP_WRITE)] = { | ||
1357 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1358 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1359 | }, | ||
1360 | [C(OP_PREFETCH)] = { | ||
1361 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1362 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1363 | }, | ||
1364 | }, | ||
1365 | [C(L1I)] = { | ||
1366 | [C(OP_READ)] = { | ||
1367 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
1368 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
1369 | }, | ||
1370 | [C(OP_WRITE)] = { | ||
1371 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
1372 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
1373 | }, | ||
1374 | [C(OP_PREFETCH)] = { | ||
1375 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1376 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1377 | }, | ||
1378 | }, | ||
1379 | [C(LL)] = { | ||
1380 | [C(OP_READ)] = { | ||
1381 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
1382 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
1383 | }, | ||
1384 | [C(OP_WRITE)] = { | ||
1385 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
1386 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
1387 | }, | ||
1388 | [C(OP_PREFETCH)] = { | ||
1389 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1390 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1391 | }, | ||
1392 | }, | ||
1393 | [C(DTLB)] = { | ||
1394 | /* | ||
1395 | * Only ITLB misses and DTLB refills are supported. | ||
1396 | * If users want the DTLB refills misses a raw counter | ||
1397 | * must be used. | ||
1398 | */ | ||
1399 | [C(OP_READ)] = { | ||
1400 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1401 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1402 | }, | ||
1403 | [C(OP_WRITE)] = { | ||
1404 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1405 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1406 | }, | ||
1407 | [C(OP_PREFETCH)] = { | ||
1408 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1409 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1410 | }, | ||
1411 | }, | ||
1412 | [C(ITLB)] = { | ||
1413 | [C(OP_READ)] = { | ||
1414 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1415 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1416 | }, | ||
1417 | [C(OP_WRITE)] = { | ||
1418 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1419 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1420 | }, | ||
1421 | [C(OP_PREFETCH)] = { | ||
1422 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1423 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1424 | }, | ||
1425 | }, | ||
1426 | [C(BPU)] = { | ||
1427 | [C(OP_READ)] = { | ||
1428 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1429 | [C(RESULT_MISS)] | ||
1430 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1431 | }, | ||
1432 | [C(OP_WRITE)] = { | ||
1433 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1434 | [C(RESULT_MISS)] | ||
1435 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1436 | }, | ||
1437 | [C(OP_PREFETCH)] = { | ||
1438 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1439 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1440 | }, | ||
1441 | }, | ||
1442 | }; | ||
1443 | |||
1444 | /* | ||
1445 | * Cortex-A9 HW events mapping | ||
1446 | */ | ||
1447 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | ||
1448 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
1449 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
1450 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | ||
1451 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | ||
1452 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | ||
1453 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
1454 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1455 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
1456 | }; | ||
1457 | |||
1458 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
1459 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1460 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1461 | [C(L1D)] = { | ||
1462 | /* | ||
1463 | * The performance counters don't differentiate between read | ||
1464 | * and write accesses/misses so this isn't strictly correct, | ||
1465 | * but it's the best we can do. Writes and reads get | ||
1466 | * combined. | ||
1467 | */ | ||
1468 | [C(OP_READ)] = { | ||
1469 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1470 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1471 | }, | ||
1472 | [C(OP_WRITE)] = { | ||
1473 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1474 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1475 | }, | ||
1476 | [C(OP_PREFETCH)] = { | ||
1477 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1478 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1479 | }, | ||
1480 | }, | ||
1481 | [C(L1I)] = { | ||
1482 | [C(OP_READ)] = { | ||
1483 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1484 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
1485 | }, | ||
1486 | [C(OP_WRITE)] = { | ||
1487 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1488 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
1489 | }, | ||
1490 | [C(OP_PREFETCH)] = { | ||
1491 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1492 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1493 | }, | ||
1494 | }, | ||
1495 | [C(LL)] = { | ||
1496 | [C(OP_READ)] = { | ||
1497 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1498 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1499 | }, | ||
1500 | [C(OP_WRITE)] = { | ||
1501 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1502 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1503 | }, | ||
1504 | [C(OP_PREFETCH)] = { | ||
1505 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1506 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1507 | }, | ||
1508 | }, | ||
1509 | [C(DTLB)] = { | ||
1510 | /* | ||
1511 | * Only ITLB misses and DTLB refills are supported. | ||
1512 | * If users want the DTLB refills misses a raw counter | ||
1513 | * must be used. | ||
1514 | */ | ||
1515 | [C(OP_READ)] = { | ||
1516 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1517 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1518 | }, | ||
1519 | [C(OP_WRITE)] = { | ||
1520 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1521 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1522 | }, | ||
1523 | [C(OP_PREFETCH)] = { | ||
1524 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1525 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1526 | }, | ||
1527 | }, | ||
1528 | [C(ITLB)] = { | ||
1529 | [C(OP_READ)] = { | ||
1530 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1531 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1532 | }, | ||
1533 | [C(OP_WRITE)] = { | ||
1534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1535 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1536 | }, | ||
1537 | [C(OP_PREFETCH)] = { | ||
1538 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1539 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1540 | }, | ||
1541 | }, | ||
1542 | [C(BPU)] = { | ||
1543 | [C(OP_READ)] = { | ||
1544 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1545 | [C(RESULT_MISS)] | ||
1546 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1547 | }, | ||
1548 | [C(OP_WRITE)] = { | ||
1549 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1550 | [C(RESULT_MISS)] | ||
1551 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1552 | }, | ||
1553 | [C(OP_PREFETCH)] = { | ||
1554 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1555 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1556 | }, | ||
1557 | }, | ||
1558 | }; | ||
1559 | |||
1560 | /* | ||
1561 | * Perf Events counters | ||
1562 | */ | ||
1563 | enum armv7_counters { | ||
1564 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | ||
1565 | ARMV7_COUNTER0 = 2, /* First event counter */ | ||
1566 | }; | ||
1567 | |||
1568 | /* | ||
1569 | * The cycle counter is ARMV7_CYCLE_COUNTER. | ||
1570 | * The first event counter is ARMV7_COUNTER0. | ||
1571 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
1572 | */ | ||
1573 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
1574 | |||
1575 | /* | ||
1576 | * ARMv7 low level PMNC access | ||
1577 | */ | ||
1578 | |||
1579 | /* | ||
1580 | * Per-CPU PMNC: config reg | ||
1581 | */ | ||
1582 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | ||
1583 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | ||
1584 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | ||
1585 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
1586 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | ||
1587 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
1588 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | ||
1589 | #define ARMV7_PMNC_N_MASK 0x1f | ||
1590 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | ||
1591 | |||
1592 | /* | ||
1593 | * Available counters | ||
1594 | */ | ||
1595 | #define ARMV7_CNT0 0 /* First event counter */ | ||
1596 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
1597 | |||
1598 | /* Perf Event to low level counters mapping */ | ||
1599 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
1600 | |||
1601 | /* | ||
1602 | * CNTENS: counters enable reg | ||
1603 | */ | ||
1604 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1605 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
1606 | |||
1607 | /* | ||
1608 | * CNTENC: counters disable reg | ||
1609 | */ | ||
1610 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1611 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
1612 | |||
1613 | /* | ||
1614 | * INTENS: counters overflow interrupt enable reg | ||
1615 | */ | ||
1616 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1617 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
1618 | |||
1619 | /* | ||
1620 | * INTENC: counters overflow interrupt disable reg | ||
1621 | */ | ||
1622 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1623 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
1624 | |||
1625 | /* | ||
1626 | * EVTSEL: Event selection reg | ||
1627 | */ | ||
1628 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | ||
1629 | |||
1630 | /* | ||
1631 | * SELECT: Counter selection reg | ||
1632 | */ | ||
1633 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | ||
1634 | |||
1635 | /* | ||
1636 | * FLAG: counters overflow flag status reg | ||
1637 | */ | ||
1638 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1639 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | ||
1640 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | ||
1641 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
1642 | |||
1643 | static inline unsigned long armv7_pmnc_read(void) | ||
1644 | { | ||
1645 | u32 val; | ||
1646 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | ||
1647 | return val; | ||
1648 | } | ||
1649 | |||
1650 | static inline void armv7_pmnc_write(unsigned long val) | ||
1651 | { | ||
1652 | val &= ARMV7_PMNC_MASK; | ||
1653 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | ||
1654 | } | ||
1655 | |||
1656 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | ||
1657 | { | ||
1658 | return pmnc & ARMV7_OVERFLOWED_MASK; | ||
1659 | } | ||
1660 | |||
1661 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
1662 | enum armv7_counters counter) | ||
1663 | { | ||
1664 | int ret; | ||
1665 | |||
1666 | if (counter == ARMV7_CYCLE_COUNTER) | ||
1667 | ret = pmnc & ARMV7_FLAG_C; | ||
1668 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
1669 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
1670 | else | ||
1671 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
1672 | smp_processor_id(), counter); | ||
1673 | |||
1674 | return ret; | ||
1675 | } | ||
1676 | |||
1677 | static inline int armv7_pmnc_select_counter(unsigned int idx) | ||
1678 | { | ||
1679 | u32 val; | ||
1680 | |||
1681 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | ||
1682 | pr_err("CPU%u selecting wrong PMNC counter" | ||
1683 | " %d\n", smp_processor_id(), idx); | ||
1684 | return -1; | ||
1685 | } | ||
1686 | |||
1687 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | ||
1688 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | ||
1689 | |||
1690 | return idx; | ||
1691 | } | ||
1692 | |||
1693 | static inline u32 armv7pmu_read_counter(int idx) | ||
1694 | { | ||
1695 | unsigned long value = 0; | ||
1696 | |||
1697 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1698 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
1699 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
1700 | if (armv7_pmnc_select_counter(idx) == idx) | ||
1701 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
1702 | : "=r" (value)); | ||
1703 | } else | ||
1704 | pr_err("CPU%u reading wrong counter %d\n", | ||
1705 | smp_processor_id(), idx); | ||
1706 | |||
1707 | return value; | ||
1708 | } | ||
1709 | |||
1710 | static inline void armv7pmu_write_counter(int idx, u32 value) | ||
1711 | { | ||
1712 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1713 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
1714 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
1715 | if (armv7_pmnc_select_counter(idx) == idx) | ||
1716 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
1717 | : : "r" (value)); | ||
1718 | } else | ||
1719 | pr_err("CPU%u writing wrong counter %d\n", | ||
1720 | smp_processor_id(), idx); | ||
1721 | } | ||
1722 | |||
1723 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | ||
1724 | { | ||
1725 | if (armv7_pmnc_select_counter(idx) == idx) { | ||
1726 | val &= ARMV7_EVTSEL_MASK; | ||
1727 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | ||
1732 | { | ||
1733 | u32 val; | ||
1734 | |||
1735 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1736 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1737 | pr_err("CPU%u enabling wrong PMNC counter" | ||
1738 | " %d\n", smp_processor_id(), idx); | ||
1739 | return -1; | ||
1740 | } | ||
1741 | |||
1742 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1743 | val = ARMV7_CNTENS_C; | ||
1744 | else | ||
1745 | val = ARMV7_CNTENS_P(idx); | ||
1746 | |||
1747 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
1748 | |||
1749 | return idx; | ||
1750 | } | ||
1751 | |||
1752 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | ||
1753 | { | ||
1754 | u32 val; | ||
1755 | |||
1756 | |||
1757 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1758 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1759 | pr_err("CPU%u disabling wrong PMNC counter" | ||
1760 | " %d\n", smp_processor_id(), idx); | ||
1761 | return -1; | ||
1762 | } | ||
1763 | |||
1764 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1765 | val = ARMV7_CNTENC_C; | ||
1766 | else | ||
1767 | val = ARMV7_CNTENC_P(idx); | ||
1768 | |||
1769 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
1770 | |||
1771 | return idx; | ||
1772 | } | ||
1773 | |||
1774 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | ||
1775 | { | ||
1776 | u32 val; | ||
1777 | |||
1778 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1779 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1780 | pr_err("CPU%u enabling wrong PMNC counter" | ||
1781 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
1782 | return -1; | ||
1783 | } | ||
1784 | |||
1785 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1786 | val = ARMV7_INTENS_C; | ||
1787 | else | ||
1788 | val = ARMV7_INTENS_P(idx); | ||
1789 | |||
1790 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
1791 | |||
1792 | return idx; | ||
1793 | } | ||
1794 | |||
1795 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | ||
1796 | { | ||
1797 | u32 val; | ||
1798 | |||
1799 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1800 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1801 | pr_err("CPU%u disabling wrong PMNC counter" | ||
1802 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
1803 | return -1; | ||
1804 | } | ||
1805 | |||
1806 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1807 | val = ARMV7_INTENC_C; | ||
1808 | else | ||
1809 | val = ARMV7_INTENC_P(idx); | ||
1810 | |||
1811 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
1812 | |||
1813 | return idx; | ||
1814 | } | ||
1815 | |||
1816 | static inline u32 armv7_pmnc_getreset_flags(void) | ||
1817 | { | ||
1818 | u32 val; | ||
1819 | |||
1820 | /* Read */ | ||
1821 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
1822 | |||
1823 | /* Write to clear flags */ | ||
1824 | val &= ARMV7_FLAG_MASK; | ||
1825 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | ||
1826 | |||
1827 | return val; | ||
1828 | } | ||
1829 | |||
1830 | #ifdef DEBUG | ||
1831 | static void armv7_pmnc_dump_regs(void) | ||
1832 | { | ||
1833 | u32 val; | ||
1834 | unsigned int cnt; | ||
1835 | |||
1836 | printk(KERN_INFO "PMNC registers dump:\n"); | ||
1837 | |||
1838 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | ||
1839 | printk(KERN_INFO "PMNC =0x%08x\n", val); | ||
1840 | |||
1841 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | ||
1842 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | ||
1843 | |||
1844 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | ||
1845 | printk(KERN_INFO "INTENS=0x%08x\n", val); | ||
1846 | |||
1847 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
1848 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | ||
1849 | |||
1850 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | ||
1851 | printk(KERN_INFO "SELECT=0x%08x\n", val); | ||
1852 | |||
1853 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | ||
1854 | printk(KERN_INFO "CCNT =0x%08x\n", val); | ||
1855 | |||
1856 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | ||
1857 | armv7_pmnc_select_counter(cnt); | ||
1858 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | ||
1859 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | ||
1860 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
1861 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | ||
1862 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | ||
1863 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
1864 | } | ||
1865 | } | ||
1866 | #endif | ||
1867 | |||
1868 | void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
1869 | { | ||
1870 | unsigned long flags; | ||
1871 | |||
1872 | /* | ||
1873 | * Enable counter and interrupt, and set the counter to count | ||
1874 | * the event that we're interested in. | ||
1875 | */ | ||
1876 | spin_lock_irqsave(&pmu_lock, flags); | ||
1877 | |||
1878 | /* | ||
1879 | * Disable counter | ||
1880 | */ | ||
1881 | armv7_pmnc_disable_counter(idx); | ||
1882 | |||
1883 | /* | ||
1884 | * Set event (if destined for PMNx counters) | ||
1885 | * We don't need to set the event if it's a cycle count | ||
1886 | */ | ||
1887 | if (idx != ARMV7_CYCLE_COUNTER) | ||
1888 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
1889 | |||
1890 | /* | ||
1891 | * Enable interrupt for this counter | ||
1892 | */ | ||
1893 | armv7_pmnc_enable_intens(idx); | ||
1894 | |||
1895 | /* | ||
1896 | * Enable counter | ||
1897 | */ | ||
1898 | armv7_pmnc_enable_counter(idx); | ||
1899 | |||
1900 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1901 | } | ||
1902 | |||
1903 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
1904 | { | ||
1905 | unsigned long flags; | ||
1906 | |||
1907 | /* | ||
1908 | * Disable counter and interrupt | ||
1909 | */ | ||
1910 | spin_lock_irqsave(&pmu_lock, flags); | ||
1911 | |||
1912 | /* | ||
1913 | * Disable counter | ||
1914 | */ | ||
1915 | armv7_pmnc_disable_counter(idx); | ||
1916 | |||
1917 | /* | ||
1918 | * Disable interrupt for this counter | ||
1919 | */ | ||
1920 | armv7_pmnc_disable_intens(idx); | ||
1921 | |||
1922 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1923 | } | ||
1924 | |||
1925 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||
1926 | { | ||
1927 | unsigned long pmnc; | ||
1928 | struct perf_sample_data data; | ||
1929 | struct cpu_hw_events *cpuc; | ||
1930 | struct pt_regs *regs; | ||
1931 | int idx; | ||
1932 | |||
1933 | /* | ||
1934 | * Get and reset the IRQ flags | ||
1935 | */ | ||
1936 | pmnc = armv7_pmnc_getreset_flags(); | ||
1937 | |||
1938 | /* | ||
1939 | * Did an overflow occur? | ||
1940 | */ | ||
1941 | if (!armv7_pmnc_has_overflowed(pmnc)) | ||
1942 | return IRQ_NONE; | ||
1943 | |||
1944 | /* | ||
1945 | * Handle the counter(s) overflow(s) | ||
1946 | */ | ||
1947 | regs = get_irq_regs(); | ||
1948 | |||
1949 | perf_sample_data_init(&data, 0); | ||
1950 | |||
1951 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1952 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
1953 | struct perf_event *event = cpuc->events[idx]; | ||
1954 | struct hw_perf_event *hwc; | ||
1955 | |||
1956 | if (!test_bit(idx, cpuc->active_mask)) | ||
1957 | continue; | ||
1958 | |||
1959 | /* | ||
1960 | * We have a single interrupt for all counters. Check that | ||
1961 | * each counter has overflowed before we process it. | ||
1962 | */ | ||
1963 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | ||
1964 | continue; | ||
1965 | |||
1966 | hwc = &event->hw; | ||
1967 | armpmu_event_update(event, hwc, idx); | ||
1968 | data.period = event->hw.last_period; | ||
1969 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
1970 | continue; | ||
1971 | |||
1972 | if (perf_event_overflow(event, 0, &data, regs)) | ||
1973 | armpmu->disable(hwc, idx); | ||
1974 | } | ||
1975 | |||
1976 | /* | ||
1977 | * Handle the pending perf events. | ||
1978 | * | ||
1979 | * Note: this call *must* be run with interrupts enabled. For | ||
1980 | * platforms that can have the PMU interrupts raised as a PMI, this | ||
1981 | * will not work. | ||
1982 | */ | ||
1983 | perf_event_do_pending(); | ||
1984 | |||
1985 | return IRQ_HANDLED; | ||
1986 | } | ||
1987 | |||
1988 | static void armv7pmu_start(void) | ||
1989 | { | ||
1990 | unsigned long flags; | ||
1991 | |||
1992 | spin_lock_irqsave(&pmu_lock, flags); | ||
1993 | /* Enable all counters */ | ||
1994 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||
1995 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1996 | } | ||
1997 | |||
1998 | static void armv7pmu_stop(void) | ||
1999 | { | ||
2000 | unsigned long flags; | ||
2001 | |||
2002 | spin_lock_irqsave(&pmu_lock, flags); | ||
2003 | /* Disable all counters */ | ||
2004 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||
2005 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2006 | } | ||
2007 | |||
2008 | static inline int armv7_a8_pmu_event_map(int config) | ||
2009 | { | ||
2010 | int mapping = armv7_a8_perf_map[config]; | ||
2011 | if (HW_OP_UNSUPPORTED == mapping) | ||
2012 | mapping = -EOPNOTSUPP; | ||
2013 | return mapping; | ||
2014 | } | ||
2015 | |||
2016 | static inline int armv7_a9_pmu_event_map(int config) | ||
2017 | { | ||
2018 | int mapping = armv7_a9_perf_map[config]; | ||
2019 | if (HW_OP_UNSUPPORTED == mapping) | ||
2020 | mapping = -EOPNOTSUPP; | ||
2021 | return mapping; | ||
2022 | } | ||
2023 | |||
2024 | static u64 armv7pmu_raw_event(u64 config) | ||
2025 | { | ||
2026 | return config & 0xff; | ||
2027 | } | ||
2028 | |||
2029 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
2030 | struct hw_perf_event *event) | ||
2031 | { | ||
2032 | int idx; | ||
2033 | |||
2034 | /* Always place a cycle counter into the cycle counter. */ | ||
2035 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | ||
2036 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | ||
2037 | return -EAGAIN; | ||
2038 | |||
2039 | return ARMV7_CYCLE_COUNTER; | ||
2040 | } else { | ||
2041 | /* | ||
2042 | * For anything other than a cycle counter, try and use | ||
2043 | * the events counters | ||
2044 | */ | ||
2045 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
2046 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
2047 | return idx; | ||
2048 | } | ||
2049 | |||
2050 | /* The counters are all in use. */ | ||
2051 | return -EAGAIN; | ||
2052 | } | ||
2053 | } | ||
2054 | |||
2055 | static struct arm_pmu armv7pmu = { | ||
2056 | .handle_irq = armv7pmu_handle_irq, | ||
2057 | .enable = armv7pmu_enable_event, | ||
2058 | .disable = armv7pmu_disable_event, | ||
2059 | .raw_event = armv7pmu_raw_event, | ||
2060 | .read_counter = armv7pmu_read_counter, | ||
2061 | .write_counter = armv7pmu_write_counter, | ||
2062 | .get_event_idx = armv7pmu_get_event_idx, | ||
2063 | .start = armv7pmu_start, | ||
2064 | .stop = armv7pmu_stop, | ||
2065 | .max_period = (1LLU << 32) - 1, | ||
2066 | }; | ||
2067 | |||
2068 | static u32 __init armv7_reset_read_pmnc(void) | ||
2069 | { | ||
2070 | u32 nb_cnt; | ||
2071 | |||
2072 | /* Initialize & Reset PMNC: C and P bits */ | ||
2073 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
2074 | |||
2075 | /* Read the nb of CNTx counters supported from PMNC */ | ||
2076 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | ||
2077 | |||
2078 | /* Add the CPU cycles counter and return */ | ||
2079 | return nb_cnt + 1; | ||
2080 | } | ||
2081 | |||
2082 | static int __init | ||
2083 | init_hw_perf_events(void) | ||
2084 | { | ||
2085 | unsigned long cpuid = read_cpuid_id(); | ||
2086 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | ||
2087 | unsigned long part_number = (cpuid & 0xFFF0); | ||
2088 | |||
2089 | /* We only support ARM CPUs implemented by ARM at the moment. */ | ||
2090 | if (0x41 == implementor) { | ||
2091 | switch (part_number) { | ||
2092 | case 0xB360: /* ARM1136 */ | ||
2093 | case 0xB560: /* ARM1156 */ | ||
2094 | case 0xB760: /* ARM1176 */ | ||
2095 | armpmu = &armv6pmu; | ||
2096 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, | ||
2097 | sizeof(armv6_perf_cache_map)); | ||
2098 | perf_max_events = armv6pmu.num_events; | ||
2099 | break; | ||
2100 | case 0xB020: /* ARM11mpcore */ | ||
2101 | armpmu = &armv6mpcore_pmu; | ||
2102 | memcpy(armpmu_perf_cache_map, | ||
2103 | armv6mpcore_perf_cache_map, | ||
2104 | sizeof(armv6mpcore_perf_cache_map)); | ||
2105 | perf_max_events = armv6mpcore_pmu.num_events; | ||
2106 | break; | ||
2107 | case 0xC080: /* Cortex-A8 */ | ||
2108 | armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME; | ||
2109 | memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, | ||
2110 | sizeof(armv7_a8_perf_cache_map)); | ||
2111 | armv7pmu.event_map = armv7_a8_pmu_event_map; | ||
2112 | armpmu = &armv7pmu; | ||
2113 | |||
2114 | /* Reset PMNC and read the nb of CNTx counters | ||
2115 | supported */ | ||
2116 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
2117 | perf_max_events = armv7pmu.num_events; | ||
2118 | break; | ||
2119 | case 0xC090: /* Cortex-A9 */ | ||
2120 | armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME; | ||
2121 | memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, | ||
2122 | sizeof(armv7_a9_perf_cache_map)); | ||
2123 | armv7pmu.event_map = armv7_a9_pmu_event_map; | ||
2124 | armpmu = &armv7pmu; | ||
2125 | |||
2126 | /* Reset PMNC and read the nb of CNTx counters | ||
2127 | supported */ | ||
2128 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
2129 | perf_max_events = armv7pmu.num_events; | ||
2130 | break; | ||
2131 | default: | ||
2132 | pr_info("no hardware support available\n"); | ||
2133 | perf_max_events = -1; | ||
2134 | } | ||
2135 | } | ||
2136 | |||
2137 | if (armpmu) | ||
2138 | pr_info("enabled with %s PMU driver, %d counters available\n", | ||
2139 | armpmu->name, armpmu->num_events); | ||
2140 | |||
2141 | return 0; | ||
2142 | } | ||
2143 | arch_initcall(init_hw_perf_events); | ||
2144 | |||
2145 | /* | ||
2146 | * Callchain handling code. | ||
2147 | */ | ||
2148 | static inline void | ||
2149 | callchain_store(struct perf_callchain_entry *entry, | ||
2150 | u64 ip) | ||
2151 | { | ||
2152 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
2153 | entry->ip[entry->nr++] = ip; | ||
2154 | } | ||
2155 | |||
2156 | /* | ||
2157 | * The registers we're interested in are at the end of the variable | ||
2158 | * length saved register structure. The fp points at the end of this | ||
2159 | * structure so the address of this struct is: | ||
2160 | * (struct frame_tail *)(xxx->fp)-1 | ||
2161 | * | ||
2162 | * This code has been adapted from the ARM OProfile support. | ||
2163 | */ | ||
2164 | struct frame_tail { | ||
2165 | struct frame_tail *fp; | ||
2166 | unsigned long sp; | ||
2167 | unsigned long lr; | ||
2168 | } __attribute__((packed)); | ||
2169 | |||
2170 | /* | ||
2171 | * Get the return address for a single stackframe and return a pointer to the | ||
2172 | * next frame tail. | ||
2173 | */ | ||
2174 | static struct frame_tail * | ||
2175 | user_backtrace(struct frame_tail *tail, | ||
2176 | struct perf_callchain_entry *entry) | ||
2177 | { | ||
2178 | struct frame_tail buftail; | ||
2179 | |||
2180 | /* Also check accessibility of one struct frame_tail beyond */ | ||
2181 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | ||
2182 | return NULL; | ||
2183 | if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) | ||
2184 | return NULL; | ||
2185 | |||
2186 | callchain_store(entry, buftail.lr); | ||
2187 | |||
2188 | /* | ||
2189 | * Frame pointers should strictly progress back up the stack | ||
2190 | * (towards higher addresses). | ||
2191 | */ | ||
2192 | if (tail >= buftail.fp) | ||
2193 | return NULL; | ||
2194 | |||
2195 | return buftail.fp - 1; | ||
2196 | } | ||
2197 | |||
2198 | static void | ||
2199 | perf_callchain_user(struct pt_regs *regs, | ||
2200 | struct perf_callchain_entry *entry) | ||
2201 | { | ||
2202 | struct frame_tail *tail; | ||
2203 | |||
2204 | callchain_store(entry, PERF_CONTEXT_USER); | ||
2205 | |||
2206 | if (!user_mode(regs)) | ||
2207 | regs = task_pt_regs(current); | ||
2208 | |||
2209 | tail = (struct frame_tail *)regs->ARM_fp - 1; | ||
2210 | |||
2211 | while (tail && !((unsigned long)tail & 0x3)) | ||
2212 | tail = user_backtrace(tail, entry); | ||
2213 | } | ||
2214 | |||
2215 | /* | ||
2216 | * Gets called by walk_stackframe() for every stackframe. This will be called | ||
2217 | * whist unwinding the stackframe and is like a subroutine return so we use | ||
2218 | * the PC. | ||
2219 | */ | ||
2220 | static int | ||
2221 | callchain_trace(struct stackframe *fr, | ||
2222 | void *data) | ||
2223 | { | ||
2224 | struct perf_callchain_entry *entry = data; | ||
2225 | callchain_store(entry, fr->pc); | ||
2226 | return 0; | ||
2227 | } | ||
2228 | |||
2229 | static void | ||
2230 | perf_callchain_kernel(struct pt_regs *regs, | ||
2231 | struct perf_callchain_entry *entry) | ||
2232 | { | ||
2233 | struct stackframe fr; | ||
2234 | |||
2235 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
2236 | fr.fp = regs->ARM_fp; | ||
2237 | fr.sp = regs->ARM_sp; | ||
2238 | fr.lr = regs->ARM_lr; | ||
2239 | fr.pc = regs->ARM_pc; | ||
2240 | walk_stackframe(&fr, callchain_trace, entry); | ||
2241 | } | ||
2242 | |||
2243 | static void | ||
2244 | perf_do_callchain(struct pt_regs *regs, | ||
2245 | struct perf_callchain_entry *entry) | ||
2246 | { | ||
2247 | int is_user; | ||
2248 | |||
2249 | if (!regs) | ||
2250 | return; | ||
2251 | |||
2252 | is_user = user_mode(regs); | ||
2253 | |||
2254 | if (!current || !current->pid) | ||
2255 | return; | ||
2256 | |||
2257 | if (is_user && current->state != TASK_RUNNING) | ||
2258 | return; | ||
2259 | |||
2260 | if (!is_user) | ||
2261 | perf_callchain_kernel(regs, entry); | ||
2262 | |||
2263 | if (current->mm) | ||
2264 | perf_callchain_user(regs, entry); | ||
2265 | } | ||
2266 | |||
2267 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
2268 | |||
2269 | struct perf_callchain_entry * | ||
2270 | perf_callchain(struct pt_regs *regs) | ||
2271 | { | ||
2272 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
2273 | |||
2274 | entry->nr = 0; | ||
2275 | perf_do_callchain(regs, entry); | ||
2276 | return entry; | ||
2277 | } | ||