aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 20:44:08 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 20:44:08 -0500
commitc0d362a832ee70435fc4555a64f820893b1da0bd (patch)
tree162497df64451936dfdb70f972dd7849d3e3e520 /arch/powerpc/kernel
parent506c10f26c481b7f8ef27c1c79290f68989b2e9e (diff)
parentf78628374a13bc150db77c6e02d4f2c0a7f932ef (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounters into perfcounters/core
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/irq.c10
-rw-r--r--arch/powerpc/kernel/perf_counter.c771
-rw-r--r--arch/powerpc/kernel/power6-pmu.c283
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c375
7 files changed, 1450 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 8d1a419df35d..7c941ec3b23e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_AUDIT) += audit.o
94obj64-$(CONFIG_AUDIT) += compat_audit.o 94obj64-$(CONFIG_AUDIT) += compat_audit.o
95 95
96obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 96obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
97obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o power6-pmu.o
97 98
98obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 99obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
99 100
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9937fe44555f..ce3f8f12f731 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -127,6 +127,7 @@ int main(void)
127 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 127 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
128 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 128 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
129 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 129 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
130 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
130 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 131 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
131 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 132 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
132 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 133 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 383ed6eb0085..f30b4e553c53 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5262: 5262:
527 TRACE_AND_RESTORE_IRQ(r5); 527 TRACE_AND_RESTORE_IRQ(r5);
528 528
529#ifdef CONFIG_PERF_COUNTERS
530 /* check paca->perf_counter_pending if we're enabling ints */
531 lbz r3,PACAPERFPEND(r13)
532 and. r3,r3,r5
533 beq 27f
534 bl .perf_counter_do_pending
53527:
536#endif /* CONFIG_PERF_COUNTERS */
537
529 /* extract EE bit and use it to restore paca->hard_enabled */ 538 /* extract EE bit and use it to restore paca->hard_enabled */
530 ld r3,_MSR(r1) 539 ld r3,_MSR(r1)
531 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ 540 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 23b8b5e36f98..f5ae4878ccef 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -104,6 +104,13 @@ static inline notrace void set_soft_enabled(unsigned long enable)
104 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 104 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
105} 105}
106 106
107#ifdef CONFIG_PERF_COUNTERS
108notrace void __weak perf_counter_do_pending(void)
109{
110 set_perf_counter_pending(0);
111}
112#endif
113
107notrace void raw_local_irq_restore(unsigned long en) 114notrace void raw_local_irq_restore(unsigned long en)
108{ 115{
109 /* 116 /*
@@ -135,6 +142,9 @@ notrace void raw_local_irq_restore(unsigned long en)
135 iseries_handle_interrupts(); 142 iseries_handle_interrupts();
136 } 143 }
137 144
145 if (get_perf_counter_pending())
146 perf_counter_do_pending();
147
138 /* 148 /*
139 * if (get_paca()->hard_enabled) return; 149 * if (get_paca()->hard_enabled) return;
140 * But again we need to take care that gcc gets hard_enabled directly 150 * But again we need to take care that gcc gets hard_enabled directly
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
new file mode 100644
index 000000000000..df3fe057dee9
--- /dev/null
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -0,0 +1,771 @@
1/*
2 * Performance counter support - powerpc architecture code
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/perf_counter.h>
14#include <linux/percpu.h>
15#include <linux/hardirq.h>
16#include <asm/reg.h>
17#include <asm/pmc.h>
18
19struct cpu_hw_counters {
20 int n_counters;
21 int n_percpu;
22 int disabled;
23 int n_added;
24 struct perf_counter *counter[MAX_HWCOUNTERS];
25 unsigned int events[MAX_HWCOUNTERS];
26 u64 mmcr[3];
27};
28DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
29
30struct power_pmu *ppmu;
31
32void perf_counter_print_debug(void)
33{
34}
35
36/*
37 * Return 1 for a software counter, 0 for a hardware counter
38 */
39static inline int is_software_counter(struct perf_counter *counter)
40{
41 return !counter->hw_event.raw && counter->hw_event.type < 0;
42}
43
44/*
45 * Read one performance monitor counter (PMC).
46 */
47static unsigned long read_pmc(int idx)
48{
49 unsigned long val;
50
51 switch (idx) {
52 case 1:
53 val = mfspr(SPRN_PMC1);
54 break;
55 case 2:
56 val = mfspr(SPRN_PMC2);
57 break;
58 case 3:
59 val = mfspr(SPRN_PMC3);
60 break;
61 case 4:
62 val = mfspr(SPRN_PMC4);
63 break;
64 case 5:
65 val = mfspr(SPRN_PMC5);
66 break;
67 case 6:
68 val = mfspr(SPRN_PMC6);
69 break;
70 case 7:
71 val = mfspr(SPRN_PMC7);
72 break;
73 case 8:
74 val = mfspr(SPRN_PMC8);
75 break;
76 default:
77 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
78 val = 0;
79 }
80 return val;
81}
82
83/*
84 * Write one PMC.
85 */
86static void write_pmc(int idx, unsigned long val)
87{
88 switch (idx) {
89 case 1:
90 mtspr(SPRN_PMC1, val);
91 break;
92 case 2:
93 mtspr(SPRN_PMC2, val);
94 break;
95 case 3:
96 mtspr(SPRN_PMC3, val);
97 break;
98 case 4:
99 mtspr(SPRN_PMC4, val);
100 break;
101 case 5:
102 mtspr(SPRN_PMC5, val);
103 break;
104 case 6:
105 mtspr(SPRN_PMC6, val);
106 break;
107 case 7:
108 mtspr(SPRN_PMC7, val);
109 break;
110 case 8:
111 mtspr(SPRN_PMC8, val);
112 break;
113 default:
114 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
115 }
116}
117
118/*
119 * Check if a set of events can all go on the PMU at once.
120 * If they can't, this will look at alternative codes for the events
121 * and see if any combination of alternative codes is feasible.
122 * The feasible set is returned in event[].
123 */
124static int power_check_constraints(unsigned int event[], int n_ev)
125{
126 u64 mask, value, nv;
127 unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
128 u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
129 u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
130 u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
131 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
132 int i, j;
133 u64 addf = ppmu->add_fields;
134 u64 tadd = ppmu->test_adder;
135
136 if (n_ev > ppmu->n_counter)
137 return -1;
138
139 /* First see if the events will go on as-is */
140 for (i = 0; i < n_ev; ++i) {
141 alternatives[i][0] = event[i];
142 if (ppmu->get_constraint(event[i], &amasks[i][0],
143 &avalues[i][0]))
144 return -1;
145 choice[i] = 0;
146 }
147 value = mask = 0;
148 for (i = 0; i < n_ev; ++i) {
149 nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
150 if ((((nv + tadd) ^ value) & mask) != 0 ||
151 (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
152 break;
153 value = nv;
154 mask |= amasks[i][0];
155 }
156 if (i == n_ev)
157 return 0; /* all OK */
158
159 /* doesn't work, gather alternatives... */
160 if (!ppmu->get_alternatives)
161 return -1;
162 for (i = 0; i < n_ev; ++i) {
163 n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]);
164 for (j = 1; j < n_alt[i]; ++j)
165 ppmu->get_constraint(alternatives[i][j],
166 &amasks[i][j], &avalues[i][j]);
167 }
168
169 /* enumerate all possibilities and see if any will work */
170 i = 0;
171 j = -1;
172 value = mask = nv = 0;
173 while (i < n_ev) {
174 if (j >= 0) {
175 /* we're backtracking, restore context */
176 value = svalues[i];
177 mask = smasks[i];
178 j = choice[i];
179 }
180 /*
181 * See if any alternative k for event i,
182 * where k > j, will satisfy the constraints.
183 */
184 while (++j < n_alt[i]) {
185 nv = (value | avalues[i][j]) +
186 (value & avalues[i][j] & addf);
187 if ((((nv + tadd) ^ value) & mask) == 0 &&
188 (((nv + tadd) ^ avalues[i][j])
189 & amasks[i][j]) == 0)
190 break;
191 }
192 if (j >= n_alt[i]) {
193 /*
194 * No feasible alternative, backtrack
195 * to event i-1 and continue enumerating its
196 * alternatives from where we got up to.
197 */
198 if (--i < 0)
199 return -1;
200 } else {
201 /*
202 * Found a feasible alternative for event i,
203 * remember where we got up to with this event,
204 * go on to the next event, and start with
205 * the first alternative for it.
206 */
207 choice[i] = j;
208 svalues[i] = value;
209 smasks[i] = mask;
210 value = nv;
211 mask |= amasks[i][j];
212 ++i;
213 j = -1;
214 }
215 }
216
217 /* OK, we have a feasible combination, tell the caller the solution */
218 for (i = 0; i < n_ev; ++i)
219 event[i] = alternatives[i][choice[i]];
220 return 0;
221}
222
223static void power_perf_read(struct perf_counter *counter)
224{
225 long val, delta, prev;
226
227 if (!counter->hw.idx)
228 return;
229 /*
230 * Performance monitor interrupts come even when interrupts
231 * are soft-disabled, as long as interrupts are hard-enabled.
232 * Therefore we treat them like NMIs.
233 */
234 do {
235 prev = atomic64_read(&counter->hw.prev_count);
236 barrier();
237 val = read_pmc(counter->hw.idx);
238 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
239
240 /* The counters are only 32 bits wide */
241 delta = (val - prev) & 0xfffffffful;
242 atomic64_add(delta, &counter->count);
243 atomic64_sub(delta, &counter->hw.period_left);
244}
245
246/*
247 * Disable all counters to prevent PMU interrupts and to allow
248 * counters to be added or removed.
249 */
250u64 hw_perf_save_disable(void)
251{
252 struct cpu_hw_counters *cpuhw;
253 unsigned long ret;
254 unsigned long flags;
255
256 local_irq_save(flags);
257 cpuhw = &__get_cpu_var(cpu_hw_counters);
258
259 ret = cpuhw->disabled;
260 if (!ret) {
261 cpuhw->disabled = 1;
262 cpuhw->n_added = 0;
263
264 /*
265 * Set the 'freeze counters' bit.
266 * The barrier is to make sure the mtspr has been
267 * executed and the PMU has frozen the counters
268 * before we return.
269 */
270 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
271 mb();
272 }
273 local_irq_restore(flags);
274 return ret;
275}
276
277/*
278 * Re-enable all counters if disable == 0.
279 * If we were previously disabled and counters were added, then
280 * put the new config on the PMU.
281 */
282void hw_perf_restore(u64 disable)
283{
284 struct perf_counter *counter;
285 struct cpu_hw_counters *cpuhw;
286 unsigned long flags;
287 long i;
288 unsigned long val;
289 s64 left;
290 unsigned int hwc_index[MAX_HWCOUNTERS];
291
292 if (disable)
293 return;
294 local_irq_save(flags);
295 cpuhw = &__get_cpu_var(cpu_hw_counters);
296 cpuhw->disabled = 0;
297
298 /*
299 * If we didn't change anything, or only removed counters,
300 * no need to recalculate MMCR* settings and reset the PMCs.
301 * Just reenable the PMU with the current MMCR* settings
302 * (possibly updated for removal of counters).
303 */
304 if (!cpuhw->n_added) {
305 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
306 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
307 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
308 goto out;
309 }
310
311 /*
312 * Compute MMCR* values for the new set of counters
313 */
314 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
315 cpuhw->mmcr)) {
316 /* shouldn't ever get here */
317 printk(KERN_ERR "oops compute_mmcr failed\n");
318 goto out;
319 }
320
321 /*
322 * Write the new configuration to MMCR* with the freeze
323 * bit set and set the hardware counters to their initial values.
324 * Then unfreeze the counters.
325 */
326 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
327 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
328 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
329 | MMCR0_FC);
330
331 /*
332 * Read off any pre-existing counters that need to move
333 * to another PMC.
334 */
335 for (i = 0; i < cpuhw->n_counters; ++i) {
336 counter = cpuhw->counter[i];
337 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
338 power_perf_read(counter);
339 write_pmc(counter->hw.idx, 0);
340 counter->hw.idx = 0;
341 }
342 }
343
344 /*
345 * Initialize the PMCs for all the new and moved counters.
346 */
347 for (i = 0; i < cpuhw->n_counters; ++i) {
348 counter = cpuhw->counter[i];
349 if (counter->hw.idx)
350 continue;
351 val = 0;
352 if (counter->hw_event.irq_period) {
353 left = atomic64_read(&counter->hw.period_left);
354 if (left < 0x80000000L)
355 val = 0x80000000L - left;
356 }
357 atomic64_set(&counter->hw.prev_count, val);
358 counter->hw.idx = hwc_index[i] + 1;
359 write_pmc(counter->hw.idx, val);
360 }
361 mb();
362 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
363 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
364
365 out:
366 local_irq_restore(flags);
367}
368
369static int collect_events(struct perf_counter *group, int max_count,
370 struct perf_counter *ctrs[], unsigned int *events)
371{
372 int n = 0;
373 struct perf_counter *counter;
374
375 if (!is_software_counter(group)) {
376 if (n >= max_count)
377 return -1;
378 ctrs[n] = group;
379 events[n++] = group->hw.config;
380 }
381 list_for_each_entry(counter, &group->sibling_list, list_entry) {
382 if (!is_software_counter(counter) &&
383 counter->state != PERF_COUNTER_STATE_OFF) {
384 if (n >= max_count)
385 return -1;
386 ctrs[n] = counter;
387 events[n++] = counter->hw.config;
388 }
389 }
390 return n;
391}
392
393static void counter_sched_in(struct perf_counter *counter, int cpu)
394{
395 counter->state = PERF_COUNTER_STATE_ACTIVE;
396 counter->oncpu = cpu;
397 if (is_software_counter(counter))
398 counter->hw_ops->enable(counter);
399}
400
401/*
402 * Called to enable a whole group of counters.
403 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
404 * Assumes the caller has disabled interrupts and has
405 * frozen the PMU with hw_perf_save_disable.
406 */
407int hw_perf_group_sched_in(struct perf_counter *group_leader,
408 struct perf_cpu_context *cpuctx,
409 struct perf_counter_context *ctx, int cpu)
410{
411 struct cpu_hw_counters *cpuhw;
412 long i, n, n0;
413 struct perf_counter *sub;
414
415 cpuhw = &__get_cpu_var(cpu_hw_counters);
416 n0 = cpuhw->n_counters;
417 n = collect_events(group_leader, ppmu->n_counter - n0,
418 &cpuhw->counter[n0], &cpuhw->events[n0]);
419 if (n < 0)
420 return -EAGAIN;
421 if (power_check_constraints(cpuhw->events, n + n0))
422 return -EAGAIN;
423 cpuhw->n_counters = n0 + n;
424 cpuhw->n_added += n;
425
426 /*
427 * OK, this group can go on; update counter states etc.,
428 * and enable any software counters
429 */
430 for (i = n0; i < n0 + n; ++i)
431 cpuhw->counter[i]->hw.config = cpuhw->events[i];
432 n = 1;
433 counter_sched_in(group_leader, cpu);
434 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
435 if (sub->state != PERF_COUNTER_STATE_OFF) {
436 counter_sched_in(sub, cpu);
437 ++n;
438 }
439 }
440 cpuctx->active_oncpu += n;
441 ctx->nr_active += n;
442
443 return 1;
444}
445
446/*
447 * Add a counter to the PMU.
448 * If all counters are not already frozen, then we disable and
449 * re-enable the PMU in order to get hw_perf_restore to do the
450 * actual work of reconfiguring the PMU.
451 */
452static int power_perf_enable(struct perf_counter *counter)
453{
454 struct cpu_hw_counters *cpuhw;
455 unsigned long flags;
456 u64 pmudis;
457 int n0;
458 int ret = -EAGAIN;
459
460 local_irq_save(flags);
461 pmudis = hw_perf_save_disable();
462
463 /*
464 * Add the counter to the list (if there is room)
465 * and check whether the total set is still feasible.
466 */
467 cpuhw = &__get_cpu_var(cpu_hw_counters);
468 n0 = cpuhw->n_counters;
469 if (n0 >= ppmu->n_counter)
470 goto out;
471 cpuhw->counter[n0] = counter;
472 cpuhw->events[n0] = counter->hw.config;
473 if (power_check_constraints(cpuhw->events, n0 + 1))
474 goto out;
475
476 counter->hw.config = cpuhw->events[n0];
477 ++cpuhw->n_counters;
478 ++cpuhw->n_added;
479
480 ret = 0;
481 out:
482 hw_perf_restore(pmudis);
483 local_irq_restore(flags);
484 return ret;
485}
486
487/*
488 * Remove a counter from the PMU.
489 */
490static void power_perf_disable(struct perf_counter *counter)
491{
492 struct cpu_hw_counters *cpuhw;
493 long i;
494 u64 pmudis;
495 unsigned long flags;
496
497 local_irq_save(flags);
498 pmudis = hw_perf_save_disable();
499
500 power_perf_read(counter);
501
502 cpuhw = &__get_cpu_var(cpu_hw_counters);
503 for (i = 0; i < cpuhw->n_counters; ++i) {
504 if (counter == cpuhw->counter[i]) {
505 while (++i < cpuhw->n_counters)
506 cpuhw->counter[i-1] = cpuhw->counter[i];
507 --cpuhw->n_counters;
508 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
509 write_pmc(counter->hw.idx, 0);
510 counter->hw.idx = 0;
511 break;
512 }
513 }
514 if (cpuhw->n_counters == 0) {
515 /* disable exceptions if no counters are running */
516 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
517 }
518
519 hw_perf_restore(pmudis);
520 local_irq_restore(flags);
521}
522
523struct hw_perf_counter_ops power_perf_ops = {
524 .enable = power_perf_enable,
525 .disable = power_perf_disable,
526 .read = power_perf_read
527};
528
529const struct hw_perf_counter_ops *
530hw_perf_counter_init(struct perf_counter *counter)
531{
532 unsigned long ev;
533 struct perf_counter *ctrs[MAX_HWCOUNTERS];
534 unsigned int events[MAX_HWCOUNTERS];
535 int n;
536
537 if (!ppmu)
538 return NULL;
539 if ((s64)counter->hw_event.irq_period < 0)
540 return NULL;
541 ev = counter->hw_event.type;
542 if (!counter->hw_event.raw) {
543 if (ev >= ppmu->n_generic ||
544 ppmu->generic_events[ev] == 0)
545 return NULL;
546 ev = ppmu->generic_events[ev];
547 }
548 counter->hw.config_base = ev;
549 counter->hw.idx = 0;
550
551 /*
552 * If this is in a group, check if it can go on with all the
553 * other hardware counters in the group. We assume the counter
554 * hasn't been linked into its leader's sibling list at this point.
555 */
556 n = 0;
557 if (counter->group_leader != counter) {
558 n = collect_events(counter->group_leader, ppmu->n_counter - 1,
559 ctrs, events);
560 if (n < 0)
561 return NULL;
562 }
563 events[n++] = ev;
564 if (power_check_constraints(events, n))
565 return NULL;
566
567 counter->hw.config = events[n - 1];
568 atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
569 return &power_perf_ops;
570}
571
572/*
573 * Handle wakeups.
574 */
575void perf_counter_do_pending(void)
576{
577 int i;
578 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
579 struct perf_counter *counter;
580
581 set_perf_counter_pending(0);
582 for (i = 0; i < cpuhw->n_counters; ++i) {
583 counter = cpuhw->counter[i];
584 if (counter && counter->wakeup_pending) {
585 counter->wakeup_pending = 0;
586 wake_up(&counter->waitq);
587 }
588 }
589}
590
591/*
592 * Record data for an irq counter.
593 * This function was lifted from the x86 code; maybe it should
594 * go in the core?
595 */
596static void perf_store_irq_data(struct perf_counter *counter, u64 data)
597{
598 struct perf_data *irqdata = counter->irqdata;
599
600 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
601 irqdata->overrun++;
602 } else {
603 u64 *p = (u64 *) &irqdata->data[irqdata->len];
604
605 *p = data;
606 irqdata->len += sizeof(u64);
607 }
608}
609
610/*
611 * Record all the values of the counters in a group
612 */
613static void perf_handle_group(struct perf_counter *counter)
614{
615 struct perf_counter *leader, *sub;
616
617 leader = counter->group_leader;
618 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
619 if (sub != counter)
620 sub->hw_ops->read(sub);
621 perf_store_irq_data(counter, sub->hw_event.type);
622 perf_store_irq_data(counter, atomic64_read(&sub->count));
623 }
624}
625
626/*
627 * A counter has overflowed; update its count and record
628 * things if requested. Note that interrupts are hard-disabled
629 * here so there is no possibility of being interrupted.
630 */
631static void record_and_restart(struct perf_counter *counter, long val,
632 struct pt_regs *regs)
633{
634 s64 prev, delta, left;
635 int record = 0;
636
637 /* we don't have to worry about interrupts here */
638 prev = atomic64_read(&counter->hw.prev_count);
639 delta = (val - prev) & 0xfffffffful;
640 atomic64_add(delta, &counter->count);
641
642 /*
643 * See if the total period for this counter has expired,
644 * and update for the next period.
645 */
646 val = 0;
647 left = atomic64_read(&counter->hw.period_left) - delta;
648 if (counter->hw_event.irq_period) {
649 if (left <= 0) {
650 left += counter->hw_event.irq_period;
651 if (left <= 0)
652 left = counter->hw_event.irq_period;
653 record = 1;
654 }
655 if (left < 0x80000000L)
656 val = 0x80000000L - left;
657 }
658 write_pmc(counter->hw.idx, val);
659 atomic64_set(&counter->hw.prev_count, val);
660 atomic64_set(&counter->hw.period_left, left);
661
662 /*
663 * Finally record data if requested.
664 */
665 if (record) {
666 switch (counter->hw_event.record_type) {
667 case PERF_RECORD_SIMPLE:
668 break;
669 case PERF_RECORD_IRQ:
670 perf_store_irq_data(counter, instruction_pointer(regs));
671 counter->wakeup_pending = 1;
672 break;
673 case PERF_RECORD_GROUP:
674 perf_handle_group(counter);
675 counter->wakeup_pending = 1;
676 break;
677 }
678 }
679}
680
681/*
682 * Performance monitor interrupt stuff
683 */
684static void perf_counter_interrupt(struct pt_regs *regs)
685{
686 int i;
687 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
688 struct perf_counter *counter;
689 long val;
690 int need_wakeup = 0, found = 0;
691
692 for (i = 0; i < cpuhw->n_counters; ++i) {
693 counter = cpuhw->counter[i];
694 val = read_pmc(counter->hw.idx);
695 if ((int)val < 0) {
696 /* counter has overflowed */
697 found = 1;
698 record_and_restart(counter, val, regs);
699 if (counter->wakeup_pending)
700 need_wakeup = 1;
701 }
702 }
703
704 /*
705 * In case we didn't find and reset the counter that caused
706 * the interrupt, scan all counters and reset any that are
707 * negative, to avoid getting continual interrupts.
708 * Any that we processed in the previous loop will not be negative.
709 */
710 if (!found) {
711 for (i = 0; i < ppmu->n_counter; ++i) {
712 val = read_pmc(i + 1);
713 if ((int)val < 0)
714 write_pmc(i + 1, 0);
715 }
716 }
717
718 /*
719 * Reset MMCR0 to its normal value. This will set PMXE and
720 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
721 * and thus allow interrupts to occur again.
722 * XXX might want to use MSR.PM to keep the counters frozen until
723 * we get back out of this interrupt.
724 */
725 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
726
727 /*
728 * If we need a wakeup, check whether interrupts were soft-enabled
729 * when we took the interrupt. If they were, we can wake stuff up
730 * immediately; otherwise we'll have to set a flag and do the
731 * wakeup when interrupts get soft-enabled.
732 */
733 if (need_wakeup) {
734 if (regs->softe) {
735 irq_enter();
736 perf_counter_do_pending();
737 irq_exit();
738 } else {
739 set_perf_counter_pending(1);
740 }
741 }
742}
743
744extern struct power_pmu ppc970_pmu;
745extern struct power_pmu power6_pmu;
746
747static int init_perf_counters(void)
748{
749 unsigned long pvr;
750
751 if (reserve_pmc_hardware(perf_counter_interrupt)) {
752 printk(KERN_ERR "Couldn't init performance monitor subsystem\n");
753 return -EBUSY;
754 }
755
756 /* XXX should get this from cputable */
757 pvr = mfspr(SPRN_PVR);
758 switch (PVR_VER(pvr)) {
759 case PV_970:
760 case PV_970FX:
761 case PV_970MP:
762 ppmu = &ppc970_pmu;
763 break;
764 case 0x3e:
765 ppmu = &power6_pmu;
766 break;
767 }
768 return 0;
769}
770
771arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
new file mode 100644
index 000000000000..b1f61f3c97bb
--- /dev/null
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -0,0 +1,283 @@
1/*
2 * Performance counter support for POWER6 processors.
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/perf_counter.h>
13#include <asm/reg.h>
14
15/*
16 * Bits in event code for POWER6
17 */
18#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
19#define PM_PMC_MSK 0x7
20#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
21#define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
22#define PM_UNIT_MSK 0xf
23#define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH)
24#define PM_LLAV 0x8000 /* Load lookahead match value */
25#define PM_LLA 0x4000 /* Load lookahead match enable */
26#define PM_BYTE_SH 12 /* Byte of event bus to use */
27#define PM_BYTE_MSK 3
28#define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
29#define PM_SUBUNIT_MSK 7
30#define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH)
31#define PM_PMCSEL_MSK 0xff /* PMCxSEL value */
32#define PM_BUSEVENT_MSK 0xf3700
33
34/*
35 * Bits in MMCR1 for POWER6
36 */
37#define MMCR1_TTM0SEL_SH 60
38#define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4)
39#define MMCR1_TTMSEL_MSK 0xf
40#define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK)
41#define MMCR1_NESTSEL_SH 45
42#define MMCR1_NESTSEL_MSK 0x7
43#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
44#define MMCR1_PMC1_LLA ((u64)1 << 44)
45#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39)
46#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35)
47#define MMCR1_PMC1SEL_SH 24
48#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
49#define MMCR1_PMCSEL_MSK 0xff
50
51/*
52 * Assign PMC numbers and compute MMCR1 value for a set of events
53 */
54static int p6_compute_mmcr(unsigned int event[], int n_ev,
55 unsigned int hwc[], u64 mmcr[])
56{
57 u64 mmcr1 = 0;
58 int i;
59 unsigned int pmc, ev, b, u, s, psel;
60 unsigned int ttmset = 0;
61 unsigned int pmc_inuse = 0;
62
63 if (n_ev > 4)
64 return -1;
65 for (i = 0; i < n_ev; ++i) {
66 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
67 if (pmc) {
68 if (pmc_inuse & (1 << (pmc - 1)))
69 return -1; /* collision! */
70 pmc_inuse |= 1 << (pmc - 1);
71 }
72 }
73 for (i = 0; i < n_ev; ++i) {
74 ev = event[i];
75 pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
76 if (pmc) {
77 --pmc;
78 } else {
79 /* can go on any PMC; find a free one */
80 for (pmc = 0; pmc < 4; ++pmc)
81 if (!(pmc_inuse & (1 << pmc)))
82 break;
83 pmc_inuse |= 1 << pmc;
84 }
85 hwc[i] = pmc;
86 psel = ev & PM_PMCSEL_MSK;
87 if (ev & PM_BUSEVENT_MSK) {
88 /* this event uses the event bus */
89 b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK;
90 u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK;
91 /* check for conflict on this byte of event bus */
92 if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
93 return -1;
94 mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b);
95 ttmset |= 1 << b;
96 if (u == 5) {
97 /* Nest events have a further mux */
98 s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
99 if ((ttmset & 0x10) &&
100 MMCR1_NESTSEL(mmcr1) != s)
101 return -1;
102 ttmset |= 0x10;
103 mmcr1 |= (u64)s << MMCR1_NESTSEL_SH;
104 }
105 if (0x30 <= psel && psel <= 0x3d) {
106 /* these need the PMCx_ADDR_SEL bits */
107 if (b >= 2)
108 mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc;
109 }
110 /* bus select values are different for PMC3/4 */
111 if (pmc >= 2 && (psel & 0x90) == 0x80)
112 psel ^= 0x20;
113 }
114 if (ev & PM_LLA) {
115 mmcr1 |= MMCR1_PMC1_LLA >> pmc;
116 if (ev & PM_LLAV)
117 mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc;
118 }
119 mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc);
120 }
121 mmcr[0] = 0;
122 if (pmc_inuse & 1)
123 mmcr[0] = MMCR0_PMC1CE;
124 if (pmc_inuse & 0xe)
125 mmcr[0] |= MMCR0_PMCjCE;
126 mmcr[1] = mmcr1;
127 mmcr[2] = 0;
128 return 0;
129}
130
131/*
132 * Layout of constraint bits:
133 *
134 * 0-1 add field: number of uses of PMC1 (max 1)
135 * 2-3, 4-5, 6-7: ditto for PMC2, 3, 4
136 * 8-10 select field: nest (subunit) event selector
137 * 16-19 select field: unit on byte 0 of event bus
138 * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
139 */
140static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp)
141{
142 int pmc, byte, sh;
143 unsigned int mask = 0, value = 0;
144
145 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
146 if (pmc) {
147 if (pmc > 4)
148 return -1;
149 sh = (pmc - 1) * 2;
150 mask |= 2 << sh;
151 value |= 1 << sh;
152 }
153 if (event & PM_BUSEVENT_MSK) {
154 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
155 sh = byte * 4;
156 mask |= PM_UNIT_MSKS << sh;
157 value |= (event & PM_UNIT_MSKS) << sh;
158 if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
159 mask |= PM_SUBUNIT_MSKS;
160 value |= event & PM_SUBUNIT_MSKS;
161 }
162 }
163 *maskp = mask;
164 *valp = value;
165 return 0;
166}
167
168#define MAX_ALT 4 /* at most 4 alternatives for any event */
169
170static const unsigned int event_alternatives[][MAX_ALT] = {
171 { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */
172 { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */
173 { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */
174 { 0x10000a, 0x2000f4 }, /* PM_RUN_CYC */
175 { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */
176 { 0x10000e, 0x400010 }, /* PM_PURR */
177 { 0x100010, 0x4000f8 }, /* PM_FLUSH */
178 { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */
179 { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */
180 { 0x100054, 0x2000f0 }, /* PM_ST_FIN */
181 { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */
182 { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */
183 { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */
184 { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */
185 { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */
186 { 0x200012, 0x300012 }, /* PM_INST_DISP */
187 { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */
188 { 0x2000f8, 0x300010 }, /* PM_EXT_INT */
189 { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */
190 { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */
191 { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */
192 { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */
193 { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */
194};
195
196/*
197 * This could be made more efficient with a binary search on
198 * a presorted list, if necessary
199 */
200static int find_alternatives_list(unsigned int event)
201{
202 int i, j;
203 unsigned int alt;
204
205 for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
206 if (event < event_alternatives[i][0])
207 return -1;
208 for (j = 0; j < MAX_ALT; ++j) {
209 alt = event_alternatives[i][j];
210 if (!alt || event < alt)
211 break;
212 if (event == alt)
213 return i;
214 }
215 }
216 return -1;
217}
218
219static int p6_get_alternatives(unsigned int event, unsigned int alt[])
220{
221 int i, j;
222 unsigned int aevent, psel, pmc;
223 unsigned int nalt = 1;
224
225 alt[0] = event;
226
227 /* check the alternatives table */
228 i = find_alternatives_list(event);
229 if (i >= 0) {
230 /* copy out alternatives from list */
231 for (j = 0; j < MAX_ALT; ++j) {
232 aevent = event_alternatives[i][j];
233 if (!aevent)
234 break;
235 if (aevent != event)
236 alt[nalt++] = aevent;
237 }
238
239 } else {
240 /* Check for alternative ways of computing sum events */
241 /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */
242 psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */
243 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
244 if (pmc && (psel == 0x32 || psel == 0x34))
245 alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) |
246 ((5 - pmc) << PM_PMC_SH);
247
248 /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */
249 if (pmc && (psel == 0x38 || psel == 0x3a))
250 alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) |
251 ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH);
252 }
253
254 return nalt;
255}
256
257static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
258{
259 /* Set PMCxSEL to 0 to disable PMCx */
260 mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
261}
262
263static int power6_generic_events[] = {
264 [PERF_COUNT_CPU_CYCLES] = 0x1e,
265 [PERF_COUNT_INSTRUCTIONS] = 2,
266 [PERF_COUNT_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */
267 [PERF_COUNT_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */
268 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */
269 [PERF_COUNT_BRANCH_MISSES] = 0x400052, /* BR_MPRED */
270};
271
272struct power_pmu power6_pmu = {
273 .n_counter = 4,
274 .max_alternatives = MAX_ALT,
275 .add_fields = 0x55,
276 .test_adder = 0,
277 .compute_mmcr = p6_compute_mmcr,
278 .get_constraint = p6_get_constraint,
279 .get_alternatives = p6_get_alternatives,
280 .disable_pmc = p6_disable_pmc,
281 .n_generic = ARRAY_SIZE(power6_generic_events),
282 .generic_events = power6_generic_events,
283};
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
new file mode 100644
index 000000000000..c3256580be1a
--- /dev/null
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -0,0 +1,375 @@
1/*
2 * Performance counter support for PPC970-family processors.
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/string.h>
12#include <linux/perf_counter.h>
13#include <asm/reg.h>
14
15/*
16 * Bits in event code for PPC970
17 */
18#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
19#define PM_PMC_MSK 0xf
20#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */
21#define PM_UNIT_MSK 0xf
22#define PM_BYTE_SH 4 /* Byte number of event bus to use */
23#define PM_BYTE_MSK 3
24#define PM_PMCSEL_MSK 0xf
25
26/* Values in PM_UNIT field */
27#define PM_NONE 0
28#define PM_FPU 1
29#define PM_VPU 2
30#define PM_ISU 3
31#define PM_IFU 4
32#define PM_IDU 5
33#define PM_STS 6
34#define PM_LSU0 7
35#define PM_LSU1U 8
36#define PM_LSU1L 9
37#define PM_LASTUNIT 9
38
39/*
40 * Bits in MMCR0 for PPC970
41 */
42#define MMCR0_PMC1SEL_SH 8
43#define MMCR0_PMC2SEL_SH 1
44#define MMCR_PMCSEL_MSK 0x1f
45
46/*
47 * Bits in MMCR1 for PPC970
48 */
49#define MMCR1_TTM0SEL_SH 62
50#define MMCR1_TTM1SEL_SH 59
51#define MMCR1_TTM3SEL_SH 53
52#define MMCR1_TTMSEL_MSK 3
53#define MMCR1_TD_CP_DBG0SEL_SH 50
54#define MMCR1_TD_CP_DBG1SEL_SH 48
55#define MMCR1_TD_CP_DBG2SEL_SH 46
56#define MMCR1_TD_CP_DBG3SEL_SH 44
57#define MMCR1_PMC1_ADDER_SEL_SH 39
58#define MMCR1_PMC2_ADDER_SEL_SH 38
59#define MMCR1_PMC6_ADDER_SEL_SH 37
60#define MMCR1_PMC5_ADDER_SEL_SH 36
61#define MMCR1_PMC8_ADDER_SEL_SH 35
62#define MMCR1_PMC7_ADDER_SEL_SH 34
63#define MMCR1_PMC3_ADDER_SEL_SH 33
64#define MMCR1_PMC4_ADDER_SEL_SH 32
65#define MMCR1_PMC3SEL_SH 27
66#define MMCR1_PMC4SEL_SH 22
67#define MMCR1_PMC5SEL_SH 17
68#define MMCR1_PMC6SEL_SH 12
69#define MMCR1_PMC7SEL_SH 7
70#define MMCR1_PMC8SEL_SH 2
71
72static short mmcr1_adder_bits[8] = {
73 MMCR1_PMC1_ADDER_SEL_SH,
74 MMCR1_PMC2_ADDER_SEL_SH,
75 MMCR1_PMC3_ADDER_SEL_SH,
76 MMCR1_PMC4_ADDER_SEL_SH,
77 MMCR1_PMC5_ADDER_SEL_SH,
78 MMCR1_PMC6_ADDER_SEL_SH,
79 MMCR1_PMC7_ADDER_SEL_SH,
80 MMCR1_PMC8_ADDER_SEL_SH
81};
82
83/*
84 * Bits in MMCRA
85 */
86
87/*
88 * Layout of constraint bits:
89 * 6666555555555544444444443333333333222222222211111111110000000000
90 * 3210987654321098765432109876543210987654321098765432109876543210
91 * <><>[ >[ >[ >< >< >< >< ><><><><><><><><>
92 * T0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8
93 *
94 * T0 - TTM0 constraint
95 * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000
96 *
97 * T1 - TTM1 constraint
98 * 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000
99 *
100 * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS
101 * 43: UC3 error 0x0800_0000_0000
102 * 42: FPU|IFU|VPU events needed 0x0400_0000_0000
103 * 41: ISU events needed 0x0200_0000_0000
104 * 40: IDU|STS events needed 0x0100_0000_0000
105 *
106 * PS1
107 * 39: PS1 error 0x0080_0000_0000
108 * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
109 *
110 * PS2
111 * 35: PS2 error 0x0008_0000_0000
112 * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
113 *
114 * B0
115 * 28-31: Byte 0 event source 0xf000_0000
116 * Encoding as for the event code
117 *
118 * B1, B2, B3
119 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
120 *
121 * P1
122 * 15: P1 error 0x8000
123 * 14-15: Count of events needing PMC1
124 *
125 * P2..P8
126 * 0-13: Count of events needing PMC2..PMC8
127 */
128
129/* Masks and values for using events from the various units */
130static u64 unit_cons[PM_LASTUNIT+1][2] = {
131 [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
132 [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
133 [PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
134 [PM_IFU] = { 0xc80000000000ull, 0x840000000000ull },
135 [PM_IDU] = { 0x380000000000ull, 0x010000000000ull },
136 [PM_STS] = { 0x380000000000ull, 0x310000000000ull },
137};
138
139static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp)
140{
141 int pmc, byte, unit, sh;
142 u64 mask = 0, value = 0;
143 int grp = -1;
144
145 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
146 if (pmc) {
147 if (pmc > 8)
148 return -1;
149 sh = (pmc - 1) * 2;
150 mask |= 2 << sh;
151 value |= 1 << sh;
152 grp = ((pmc - 1) >> 1) & 1;
153 }
154 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
155 if (unit) {
156 if (unit > PM_LASTUNIT)
157 return -1;
158 mask |= unit_cons[unit][0];
159 value |= unit_cons[unit][1];
160 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
161 /*
162 * Bus events on bytes 0 and 2 can be counted
163 * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
164 */
165 if (!pmc)
166 grp = byte & 1;
167 /* Set byte lane select field */
168 mask |= 0xfULL << (28 - 4 * byte);
169 value |= (u64)unit << (28 - 4 * byte);
170 }
171 if (grp == 0) {
172 /* increment PMC1/2/5/6 field */
173 mask |= 0x8000000000ull;
174 value |= 0x1000000000ull;
175 } else if (grp == 1) {
176 /* increment PMC3/4/7/8 field */
177 mask |= 0x800000000ull;
178 value |= 0x100000000ull;
179 }
180 *maskp = mask;
181 *valp = value;
182 return 0;
183}
184
185static int p970_get_alternatives(unsigned int event, unsigned int alt[])
186{
187 alt[0] = event;
188
189 /* 2 alternatives for LSU empty */
190 if (event == 0x2002 || event == 0x3002) {
191 alt[1] = event ^ 0x1000;
192 return 2;
193 }
194
195 return 1;
196}
197
198static int p970_compute_mmcr(unsigned int event[], int n_ev,
199 unsigned int hwc[], u64 mmcr[])
200{
201 u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
202 unsigned int pmc, unit, byte, psel;
203 unsigned int ttm, grp;
204 unsigned int pmc_inuse = 0;
205 unsigned int pmc_grp_use[2];
206 unsigned char busbyte[4];
207 unsigned char unituse[16];
208 unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 };
209 unsigned char ttmuse[2];
210 unsigned char pmcsel[8];
211 int i;
212
213 if (n_ev > 8)
214 return -1;
215
216 /* First pass to count resource use */
217 pmc_grp_use[0] = pmc_grp_use[1] = 0;
218 memset(busbyte, 0, sizeof(busbyte));
219 memset(unituse, 0, sizeof(unituse));
220 for (i = 0; i < n_ev; ++i) {
221 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
222 if (pmc) {
223 if (pmc_inuse & (1 << (pmc - 1)))
224 return -1;
225 pmc_inuse |= 1 << (pmc - 1);
226 /* count 1/2/5/6 vs 3/4/7/8 use */
227 ++pmc_grp_use[((pmc - 1) >> 1) & 1];
228 }
229 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
230 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
231 if (unit) {
232 if (unit > PM_LASTUNIT)
233 return -1;
234 if (!pmc)
235 ++pmc_grp_use[byte & 1];
236 if (busbyte[byte] && busbyte[byte] != unit)
237 return -1;
238 busbyte[byte] = unit;
239 unituse[unit] = 1;
240 }
241 }
242 if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
243 return -1;
244
245 /*
246 * Assign resources and set multiplexer selects.
247 *
248 * PM_ISU can go either on TTM0 or TTM1, but that's the only
249 * choice we have to deal with.
250 */
251 if (unituse[PM_ISU] &
252 (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU]))
253 unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */
254 /* Set TTM[01]SEL fields. */
255 ttmuse[0] = ttmuse[1] = 0;
256 for (i = PM_FPU; i <= PM_STS; ++i) {
257 if (!unituse[i])
258 continue;
259 ttm = unitmap[i];
260 ++ttmuse[(ttm >> 2) & 1];
261 mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH;
262 }
263 /* Check only one unit per TTMx */
264 if (ttmuse[0] > 1 || ttmuse[1] > 1)
265 return -1;
266
267 /* Set byte lane select fields and TTM3SEL. */
268 for (byte = 0; byte < 4; ++byte) {
269 unit = busbyte[byte];
270 if (!unit)
271 continue;
272 if (unit <= PM_STS)
273 ttm = (unitmap[unit] >> 2) & 1;
274 else if (unit == PM_LSU0)
275 ttm = 2;
276 else {
277 ttm = 3;
278 if (unit == PM_LSU1L && byte >= 2)
279 mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
280 }
281 mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
282 }
283
284 /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
285 memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */
286 for (i = 0; i < n_ev; ++i) {
287 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
288 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
289 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
290 psel = event[i] & PM_PMCSEL_MSK;
291 if (!pmc) {
292 /* Bus event or any-PMC direct event */
293 if (unit)
294 psel |= 0x10 | ((byte & 2) << 2);
295 else
296 psel |= 8;
297 for (pmc = 0; pmc < 8; ++pmc) {
298 if (pmc_inuse & (1 << pmc))
299 continue;
300 grp = (pmc >> 1) & 1;
301 if (unit) {
302 if (grp == (byte & 1))
303 break;
304 } else if (pmc_grp_use[grp] < 4) {
305 ++pmc_grp_use[grp];
306 break;
307 }
308 }
309 pmc_inuse |= 1 << pmc;
310 } else {
311 /* Direct event */
312 --pmc;
313 if (psel == 0 && (byte & 2))
314 /* add events on higher-numbered bus */
315 mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
316 }
317 pmcsel[pmc] = psel;
318 hwc[i] = pmc;
319 }
320 for (pmc = 0; pmc < 2; ++pmc)
321 mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
322 for (; pmc < 8; ++pmc)
323 mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
324 if (pmc_inuse & 1)
325 mmcr0 |= MMCR0_PMC1CE;
326 if (pmc_inuse & 0xfe)
327 mmcr0 |= MMCR0_PMCjCE;
328
329 mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
330
331 /* Return MMCRx values */
332 mmcr[0] = mmcr0;
333 mmcr[1] = mmcr1;
334 mmcr[2] = mmcra;
335 return 0;
336}
337
338static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
339{
340 int shift, i;
341
342 if (pmc <= 1) {
343 shift = MMCR0_PMC1SEL_SH - 7 * pmc;
344 i = 0;
345 } else {
346 shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
347 i = 1;
348 }
349 /*
350 * Setting the PMCxSEL field to 0x08 disables PMC x.
351 */
352 mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift);
353}
354
355static int ppc970_generic_events[] = {
356 [PERF_COUNT_CPU_CYCLES] = 7,
357 [PERF_COUNT_INSTRUCTIONS] = 1,
358 [PERF_COUNT_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */
359 [PERF_COUNT_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */
360 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */
361 [PERF_COUNT_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */
362};
363
364struct power_pmu ppc970_pmu = {
365 .n_counter = 8,
366 .max_alternatives = 2,
367 .add_fields = 0x001100005555ull,
368 .test_adder = 0x013300000000ull,
369 .compute_mmcr = p970_compute_mmcr,
370 .get_constraint = p970_get_constraint,
371 .get_alternatives = p970_get_alternatives,
372 .disable_pmc = p970_disable_pmc,
373 .n_generic = ARRAY_SIZE(ppc970_generic_events),
374 .generic_events = ppc970_generic_events,
375};