diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 39 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/perf_counter.h | 72 | ||||
-rw-r--r-- | arch/powerpc/include/asm/systbl.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/unistd.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 846 | ||||
-rw-r--r-- | arch/powerpc/kernel/power4-pmu.c | 557 | ||||
-rw-r--r-- | arch/powerpc/kernel/power5+-pmu.c | 452 | ||||
-rw-r--r-- | arch/powerpc/kernel/power5-pmu.c | 475 | ||||
-rw-r--r-- | arch/powerpc/kernel/power6-pmu.c | 283 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc970-pmu.c | 375 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/Kconfig.cputype | 1 |
17 files changed, 3127 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index b7e034b0a6dd..20a44d0c9fdd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -131,5 +131,44 @@ static inline int irqs_disabled_flags(unsigned long flags) | |||
131 | */ | 131 | */ |
132 | struct irq_chip; | 132 | struct irq_chip; |
133 | 133 | ||
134 | #ifdef CONFIG_PERF_COUNTERS | ||
135 | static inline unsigned long test_perf_counter_pending(void) | ||
136 | { | ||
137 | unsigned long x; | ||
138 | |||
139 | asm volatile("lbz %0,%1(13)" | ||
140 | : "=r" (x) | ||
141 | : "i" (offsetof(struct paca_struct, perf_counter_pending))); | ||
142 | return x; | ||
143 | } | ||
144 | |||
145 | static inline void set_perf_counter_pending(void) | ||
146 | { | ||
147 | asm volatile("stb %0,%1(13)" : : | ||
148 | "r" (1), | ||
149 | "i" (offsetof(struct paca_struct, perf_counter_pending))); | ||
150 | } | ||
151 | |||
152 | static inline void clear_perf_counter_pending(void) | ||
153 | { | ||
154 | asm volatile("stb %0,%1(13)" : : | ||
155 | "r" (0), | ||
156 | "i" (offsetof(struct paca_struct, perf_counter_pending))); | ||
157 | } | ||
158 | |||
159 | extern void perf_counter_do_pending(void); | ||
160 | |||
161 | #else | ||
162 | |||
163 | static inline unsigned long test_perf_counter_pending(void) | ||
164 | { | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static inline void set_perf_counter_pending(void) {} | ||
169 | static inline void clear_perf_counter_pending(void) {} | ||
170 | static inline void perf_counter_do_pending(void) {} | ||
171 | #endif /* CONFIG_PERF_COUNTERS */ | ||
172 | |||
134 | #endif /* __KERNEL__ */ | 173 | #endif /* __KERNEL__ */ |
135 | #endif /* _ASM_POWERPC_HW_IRQ_H */ | 174 | #endif /* _ASM_POWERPC_HW_IRQ_H */ |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 082b3aedf145..6ef055723019 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -99,6 +99,7 @@ struct paca_struct { | |||
99 | u8 soft_enabled; /* irq soft-enable flag */ | 99 | u8 soft_enabled; /* irq soft-enable flag */ |
100 | u8 hard_enabled; /* set if irqs are enabled in MSR */ | 100 | u8 hard_enabled; /* set if irqs are enabled in MSR */ |
101 | u8 io_sync; /* writel() needs spin_unlock sync */ | 101 | u8 io_sync; /* writel() needs spin_unlock sync */ |
102 | u8 perf_counter_pending; /* PM interrupt while soft-disabled */ | ||
102 | 103 | ||
103 | /* Stuff for accurate time accounting */ | 104 | /* Stuff for accurate time accounting */ |
104 | u64 user_time; /* accumulated usermode TB ticks */ | 105 | u64 user_time; /* accumulated usermode TB ticks */ |
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h new file mode 100644 index 000000000000..9d7ff6d7fb56 --- /dev/null +++ b/arch/powerpc/include/asm/perf_counter.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Performance counter support - PowerPC-specific definitions. | ||
3 | * | ||
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/types.h> | ||
12 | |||
13 | #define MAX_HWCOUNTERS 8 | ||
14 | #define MAX_EVENT_ALTERNATIVES 8 | ||
15 | |||
16 | /* | ||
17 | * This struct provides the constants and functions needed to | ||
18 | * describe the PMU on a particular POWER-family CPU. | ||
19 | */ | ||
20 | struct power_pmu { | ||
21 | int n_counter; | ||
22 | int max_alternatives; | ||
23 | u64 add_fields; | ||
24 | u64 test_adder; | ||
25 | int (*compute_mmcr)(unsigned int events[], int n_ev, | ||
26 | unsigned int hwc[], u64 mmcr[]); | ||
27 | int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); | ||
28 | int (*get_alternatives)(unsigned int event, unsigned int alt[]); | ||
29 | void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); | ||
30 | int n_generic; | ||
31 | int *generic_events; | ||
32 | }; | ||
33 | |||
34 | extern struct power_pmu *ppmu; | ||
35 | |||
36 | /* | ||
37 | * The power_pmu.get_constraint function returns a 64-bit value and | ||
38 | * a 64-bit mask that express the constraints between this event and | ||
39 | * other events. | ||
40 | * | ||
41 | * The value and mask are divided up into (non-overlapping) bitfields | ||
42 | * of three different types: | ||
43 | * | ||
44 | * Select field: this expresses the constraint that some set of bits | ||
45 | * in MMCR* needs to be set to a specific value for this event. For a | ||
46 | * select field, the mask contains 1s in every bit of the field, and | ||
47 | * the value contains a unique value for each possible setting of the | ||
48 | * MMCR* bits. The constraint checking code will ensure that two events | ||
49 | * that set the same field in their masks have the same value in their | ||
50 | * value dwords. | ||
51 | * | ||
52 | * Add field: this expresses the constraint that there can be at most | ||
53 | * N events in a particular class. A field of k bits can be used for | ||
54 | * N <= 2^(k-1) - 1. The mask has the most significant bit of the field | ||
55 | * set (and the other bits 0), and the value has only the least significant | ||
56 | * bit of the field set. In addition, the 'add_fields' and 'test_adder' | ||
57 | * in the struct power_pmu for this processor come into play. The | ||
58 | * add_fields value contains 1 in the LSB of the field, and the | ||
59 | * test_adder contains 2^(k-1) - 1 - N in the field. | ||
60 | * | ||
61 | * NAND field: this expresses the constraint that you may not have events | ||
62 | * in all of a set of classes. (For example, on PPC970, you can't select | ||
63 | * events from the FPU, ISU and IDU simultaneously, although any two are | ||
64 | * possible.) For N classes, the field is N+1 bits wide, and each class | ||
65 | * is assigned one bit from the least-significant N bits. The mask has | ||
66 | * only the most-significant bit set, and the value has only the bit | ||
67 | * for the event's class set. The test_adder has the least significant | ||
68 | * bit set in the field. | ||
69 | * | ||
70 | * If an event is not subject to the constraint expressed by a particular | ||
71 | * field, then it will have 0 in both the mask and value for that field. | ||
72 | */ | ||
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index d98a30dfd41c..a0b92de51c7e 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -322,6 +322,6 @@ SYSCALL_SPU(epoll_create1) | |||
322 | SYSCALL_SPU(dup3) | 322 | SYSCALL_SPU(dup3) |
323 | SYSCALL_SPU(pipe2) | 323 | SYSCALL_SPU(pipe2) |
324 | SYSCALL(inotify_init1) | 324 | SYSCALL(inotify_init1) |
325 | SYSCALL(ni_syscall) | 325 | SYSCALL_SPU(perf_counter_open) |
326 | COMPAT_SYS_SPU(preadv) | 326 | COMPAT_SYS_SPU(preadv) |
327 | COMPAT_SYS_SPU(pwritev) | 327 | COMPAT_SYS_SPU(pwritev) |
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 3f06f8ec81c5..4badac2d11d1 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -341,6 +341,7 @@ | |||
341 | #define __NR_dup3 316 | 341 | #define __NR_dup3 316 |
342 | #define __NR_pipe2 317 | 342 | #define __NR_pipe2 317 |
343 | #define __NR_inotify_init1 318 | 343 | #define __NR_inotify_init1 318 |
344 | #define __NR_perf_counter_open 319 | ||
344 | #define __NR_preadv 320 | 345 | #define __NR_preadv 320 |
345 | #define __NR_pwritev 321 | 346 | #define __NR_pwritev 321 |
346 | 347 | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 71901fbda4a5..9ba1bb731fcc 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -94,6 +94,8 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o | |||
94 | 94 | ||
95 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 95 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
96 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 96 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
97 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \ | ||
98 | power5-pmu.o power5+-pmu.o power6-pmu.o | ||
97 | 99 | ||
98 | obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o | 100 | obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o |
99 | 101 | ||
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 1e40bc053946..e981d1ce1914 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -131,6 +131,7 @@ int main(void) | |||
131 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); | 131 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); |
132 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 132 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
133 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); | 133 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); |
134 | DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); | ||
134 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | 135 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); |
135 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | 136 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); |
136 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 137 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index abfc32330479..43e073477c34 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
526 | 2: | 526 | 2: |
527 | TRACE_AND_RESTORE_IRQ(r5); | 527 | TRACE_AND_RESTORE_IRQ(r5); |
528 | 528 | ||
529 | #ifdef CONFIG_PERF_COUNTERS | ||
530 | /* check paca->perf_counter_pending if we're enabling ints */ | ||
531 | lbz r3,PACAPERFPEND(r13) | ||
532 | and. r3,r3,r5 | ||
533 | beq 27f | ||
534 | bl .perf_counter_do_pending | ||
535 | 27: | ||
536 | #endif /* CONFIG_PERF_COUNTERS */ | ||
537 | |||
529 | /* extract EE bit and use it to restore paca->hard_enabled */ | 538 | /* extract EE bit and use it to restore paca->hard_enabled */ |
530 | ld r3,_MSR(r1) | 539 | ld r3,_MSR(r1) |
531 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ | 540 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 8c1a4966867e..feff792ed0f9 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -135,6 +135,11 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
135 | iseries_handle_interrupts(); | 135 | iseries_handle_interrupts(); |
136 | } | 136 | } |
137 | 137 | ||
138 | if (test_perf_counter_pending()) { | ||
139 | clear_perf_counter_pending(); | ||
140 | perf_counter_do_pending(); | ||
141 | } | ||
142 | |||
138 | /* | 143 | /* |
139 | * if (get_paca()->hard_enabled) return; | 144 | * if (get_paca()->hard_enabled) return; |
140 | * But again we need to take care that gcc gets hard_enabled directly | 145 | * But again we need to take care that gcc gets hard_enabled directly |
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c new file mode 100644 index 000000000000..f88c35d0710a --- /dev/null +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -0,0 +1,846 @@ | |||
1 | /* | ||
2 | * Performance counter support - powerpc architecture code | ||
3 | * | ||
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/perf_counter.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/hardirq.h> | ||
16 | #include <asm/reg.h> | ||
17 | #include <asm/pmc.h> | ||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/firmware.h> | ||
20 | |||
21 | struct cpu_hw_counters { | ||
22 | int n_counters; | ||
23 | int n_percpu; | ||
24 | int disabled; | ||
25 | int n_added; | ||
26 | struct perf_counter *counter[MAX_HWCOUNTERS]; | ||
27 | unsigned int events[MAX_HWCOUNTERS]; | ||
28 | u64 mmcr[3]; | ||
29 | u8 pmcs_enabled; | ||
30 | }; | ||
31 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | ||
32 | |||
33 | struct power_pmu *ppmu; | ||
34 | |||
35 | /* | ||
36 | * Normally, to ignore kernel events we set the FCS (freeze counters | ||
37 | * in supervisor mode) bit in MMCR0, but if the kernel runs with the | ||
38 | * hypervisor bit set in the MSR, or if we are running on a processor | ||
39 | * where the hypervisor bit is forced to 1 (as on Apple G5 processors), | ||
40 | * then we need to use the FCHV bit to ignore kernel events. | ||
41 | */ | ||
42 | static unsigned int freeze_counters_kernel = MMCR0_FCS; | ||
43 | |||
44 | static void perf_counter_interrupt(struct pt_regs *regs); | ||
45 | |||
46 | void perf_counter_print_debug(void) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Read one performance monitor counter (PMC). | ||
52 | */ | ||
53 | static unsigned long read_pmc(int idx) | ||
54 | { | ||
55 | unsigned long val; | ||
56 | |||
57 | switch (idx) { | ||
58 | case 1: | ||
59 | val = mfspr(SPRN_PMC1); | ||
60 | break; | ||
61 | case 2: | ||
62 | val = mfspr(SPRN_PMC2); | ||
63 | break; | ||
64 | case 3: | ||
65 | val = mfspr(SPRN_PMC3); | ||
66 | break; | ||
67 | case 4: | ||
68 | val = mfspr(SPRN_PMC4); | ||
69 | break; | ||
70 | case 5: | ||
71 | val = mfspr(SPRN_PMC5); | ||
72 | break; | ||
73 | case 6: | ||
74 | val = mfspr(SPRN_PMC6); | ||
75 | break; | ||
76 | case 7: | ||
77 | val = mfspr(SPRN_PMC7); | ||
78 | break; | ||
79 | case 8: | ||
80 | val = mfspr(SPRN_PMC8); | ||
81 | break; | ||
82 | default: | ||
83 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | ||
84 | val = 0; | ||
85 | } | ||
86 | return val; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Write one PMC. | ||
91 | */ | ||
92 | static void write_pmc(int idx, unsigned long val) | ||
93 | { | ||
94 | switch (idx) { | ||
95 | case 1: | ||
96 | mtspr(SPRN_PMC1, val); | ||
97 | break; | ||
98 | case 2: | ||
99 | mtspr(SPRN_PMC2, val); | ||
100 | break; | ||
101 | case 3: | ||
102 | mtspr(SPRN_PMC3, val); | ||
103 | break; | ||
104 | case 4: | ||
105 | mtspr(SPRN_PMC4, val); | ||
106 | break; | ||
107 | case 5: | ||
108 | mtspr(SPRN_PMC5, val); | ||
109 | break; | ||
110 | case 6: | ||
111 | mtspr(SPRN_PMC6, val); | ||
112 | break; | ||
113 | case 7: | ||
114 | mtspr(SPRN_PMC7, val); | ||
115 | break; | ||
116 | case 8: | ||
117 | mtspr(SPRN_PMC8, val); | ||
118 | break; | ||
119 | default: | ||
120 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Check if a set of events can all go on the PMU at once. | ||
126 | * If they can't, this will look at alternative codes for the events | ||
127 | * and see if any combination of alternative codes is feasible. | ||
128 | * The feasible set is returned in event[]. | ||
129 | */ | ||
130 | static int power_check_constraints(unsigned int event[], int n_ev) | ||
131 | { | ||
132 | u64 mask, value, nv; | ||
133 | unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | ||
134 | u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | ||
135 | u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | ||
136 | u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; | ||
137 | int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; | ||
138 | int i, j; | ||
139 | u64 addf = ppmu->add_fields; | ||
140 | u64 tadd = ppmu->test_adder; | ||
141 | |||
142 | if (n_ev > ppmu->n_counter) | ||
143 | return -1; | ||
144 | |||
145 | /* First see if the events will go on as-is */ | ||
146 | for (i = 0; i < n_ev; ++i) { | ||
147 | alternatives[i][0] = event[i]; | ||
148 | if (ppmu->get_constraint(event[i], &amasks[i][0], | ||
149 | &avalues[i][0])) | ||
150 | return -1; | ||
151 | choice[i] = 0; | ||
152 | } | ||
153 | value = mask = 0; | ||
154 | for (i = 0; i < n_ev; ++i) { | ||
155 | nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf); | ||
156 | if ((((nv + tadd) ^ value) & mask) != 0 || | ||
157 | (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0) | ||
158 | break; | ||
159 | value = nv; | ||
160 | mask |= amasks[i][0]; | ||
161 | } | ||
162 | if (i == n_ev) | ||
163 | return 0; /* all OK */ | ||
164 | |||
165 | /* doesn't work, gather alternatives... */ | ||
166 | if (!ppmu->get_alternatives) | ||
167 | return -1; | ||
168 | for (i = 0; i < n_ev; ++i) { | ||
169 | n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]); | ||
170 | for (j = 1; j < n_alt[i]; ++j) | ||
171 | ppmu->get_constraint(alternatives[i][j], | ||
172 | &amasks[i][j], &avalues[i][j]); | ||
173 | } | ||
174 | |||
175 | /* enumerate all possibilities and see if any will work */ | ||
176 | i = 0; | ||
177 | j = -1; | ||
178 | value = mask = nv = 0; | ||
179 | while (i < n_ev) { | ||
180 | if (j >= 0) { | ||
181 | /* we're backtracking, restore context */ | ||
182 | value = svalues[i]; | ||
183 | mask = smasks[i]; | ||
184 | j = choice[i]; | ||
185 | } | ||
186 | /* | ||
187 | * See if any alternative k for event i, | ||
188 | * where k > j, will satisfy the constraints. | ||
189 | */ | ||
190 | while (++j < n_alt[i]) { | ||
191 | nv = (value | avalues[i][j]) + | ||
192 | (value & avalues[i][j] & addf); | ||
193 | if ((((nv + tadd) ^ value) & mask) == 0 && | ||
194 | (((nv + tadd) ^ avalues[i][j]) | ||
195 | & amasks[i][j]) == 0) | ||
196 | break; | ||
197 | } | ||
198 | if (j >= n_alt[i]) { | ||
199 | /* | ||
200 | * No feasible alternative, backtrack | ||
201 | * to event i-1 and continue enumerating its | ||
202 | * alternatives from where we got up to. | ||
203 | */ | ||
204 | if (--i < 0) | ||
205 | return -1; | ||
206 | } else { | ||
207 | /* | ||
208 | * Found a feasible alternative for event i, | ||
209 | * remember where we got up to with this event, | ||
210 | * go on to the next event, and start with | ||
211 | * the first alternative for it. | ||
212 | */ | ||
213 | choice[i] = j; | ||
214 | svalues[i] = value; | ||
215 | smasks[i] = mask; | ||
216 | value = nv; | ||
217 | mask |= amasks[i][j]; | ||
218 | ++i; | ||
219 | j = -1; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | /* OK, we have a feasible combination, tell the caller the solution */ | ||
224 | for (i = 0; i < n_ev; ++i) | ||
225 | event[i] = alternatives[i][choice[i]]; | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Check if newly-added counters have consistent settings for | ||
231 | * exclude_{user,kernel,hv} with each other and any previously | ||
232 | * added counters. | ||
233 | */ | ||
234 | static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new) | ||
235 | { | ||
236 | int eu, ek, eh; | ||
237 | int i, n; | ||
238 | struct perf_counter *counter; | ||
239 | |||
240 | n = n_prev + n_new; | ||
241 | if (n <= 1) | ||
242 | return 0; | ||
243 | |||
244 | eu = ctrs[0]->hw_event.exclude_user; | ||
245 | ek = ctrs[0]->hw_event.exclude_kernel; | ||
246 | eh = ctrs[0]->hw_event.exclude_hv; | ||
247 | if (n_prev == 0) | ||
248 | n_prev = 1; | ||
249 | for (i = n_prev; i < n; ++i) { | ||
250 | counter = ctrs[i]; | ||
251 | if (counter->hw_event.exclude_user != eu || | ||
252 | counter->hw_event.exclude_kernel != ek || | ||
253 | counter->hw_event.exclude_hv != eh) | ||
254 | return -EAGAIN; | ||
255 | } | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void power_perf_read(struct perf_counter *counter) | ||
260 | { | ||
261 | long val, delta, prev; | ||
262 | |||
263 | if (!counter->hw.idx) | ||
264 | return; | ||
265 | /* | ||
266 | * Performance monitor interrupts come even when interrupts | ||
267 | * are soft-disabled, as long as interrupts are hard-enabled. | ||
268 | * Therefore we treat them like NMIs. | ||
269 | */ | ||
270 | do { | ||
271 | prev = atomic64_read(&counter->hw.prev_count); | ||
272 | barrier(); | ||
273 | val = read_pmc(counter->hw.idx); | ||
274 | } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); | ||
275 | |||
276 | /* The counters are only 32 bits wide */ | ||
277 | delta = (val - prev) & 0xfffffffful; | ||
278 | atomic64_add(delta, &counter->count); | ||
279 | atomic64_sub(delta, &counter->hw.period_left); | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Disable all counters to prevent PMU interrupts and to allow | ||
284 | * counters to be added or removed. | ||
285 | */ | ||
286 | u64 hw_perf_save_disable(void) | ||
287 | { | ||
288 | struct cpu_hw_counters *cpuhw; | ||
289 | unsigned long ret; | ||
290 | unsigned long flags; | ||
291 | |||
292 | local_irq_save(flags); | ||
293 | cpuhw = &__get_cpu_var(cpu_hw_counters); | ||
294 | |||
295 | ret = cpuhw->disabled; | ||
296 | if (!ret) { | ||
297 | cpuhw->disabled = 1; | ||
298 | cpuhw->n_added = 0; | ||
299 | |||
300 | /* | ||
301 | * Check if we ever enabled the PMU on this cpu. | ||
302 | */ | ||
303 | if (!cpuhw->pmcs_enabled) { | ||
304 | if (ppc_md.enable_pmcs) | ||
305 | ppc_md.enable_pmcs(); | ||
306 | cpuhw->pmcs_enabled = 1; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Set the 'freeze counters' bit. | ||
311 | * The barrier is to make sure the mtspr has been | ||
312 | * executed and the PMU has frozen the counters | ||
313 | * before we return. | ||
314 | */ | ||
315 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); | ||
316 | mb(); | ||
317 | } | ||
318 | local_irq_restore(flags); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Re-enable all counters if disable == 0. | ||
324 | * If we were previously disabled and counters were added, then | ||
325 | * put the new config on the PMU. | ||
326 | */ | ||
327 | void hw_perf_restore(u64 disable) | ||
328 | { | ||
329 | struct perf_counter *counter; | ||
330 | struct cpu_hw_counters *cpuhw; | ||
331 | unsigned long flags; | ||
332 | long i; | ||
333 | unsigned long val; | ||
334 | s64 left; | ||
335 | unsigned int hwc_index[MAX_HWCOUNTERS]; | ||
336 | |||
337 | if (disable) | ||
338 | return; | ||
339 | local_irq_save(flags); | ||
340 | cpuhw = &__get_cpu_var(cpu_hw_counters); | ||
341 | cpuhw->disabled = 0; | ||
342 | |||
343 | /* | ||
344 | * If we didn't change anything, or only removed counters, | ||
345 | * no need to recalculate MMCR* settings and reset the PMCs. | ||
346 | * Just reenable the PMU with the current MMCR* settings | ||
347 | * (possibly updated for removal of counters). | ||
348 | */ | ||
349 | if (!cpuhw->n_added) { | ||
350 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | ||
351 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | ||
352 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | ||
353 | if (cpuhw->n_counters == 0) | ||
354 | get_lppaca()->pmcregs_in_use = 0; | ||
355 | goto out; | ||
356 | } | ||
357 | |||
358 | /* | ||
359 | * Compute MMCR* values for the new set of counters | ||
360 | */ | ||
361 | if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, | ||
362 | cpuhw->mmcr)) { | ||
363 | /* shouldn't ever get here */ | ||
364 | printk(KERN_ERR "oops compute_mmcr failed\n"); | ||
365 | goto out; | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Add in MMCR0 freeze bits corresponding to the | ||
370 | * hw_event.exclude_* bits for the first counter. | ||
371 | * We have already checked that all counters have the | ||
372 | * same values for these bits as the first counter. | ||
373 | */ | ||
374 | counter = cpuhw->counter[0]; | ||
375 | if (counter->hw_event.exclude_user) | ||
376 | cpuhw->mmcr[0] |= MMCR0_FCP; | ||
377 | if (counter->hw_event.exclude_kernel) | ||
378 | cpuhw->mmcr[0] |= freeze_counters_kernel; | ||
379 | if (counter->hw_event.exclude_hv) | ||
380 | cpuhw->mmcr[0] |= MMCR0_FCHV; | ||
381 | |||
382 | /* | ||
383 | * Write the new configuration to MMCR* with the freeze | ||
384 | * bit set and set the hardware counters to their initial values. | ||
385 | * Then unfreeze the counters. | ||
386 | */ | ||
387 | get_lppaca()->pmcregs_in_use = 1; | ||
388 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | ||
389 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | ||
390 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | ||
391 | | MMCR0_FC); | ||
392 | |||
393 | /* | ||
394 | * Read off any pre-existing counters that need to move | ||
395 | * to another PMC. | ||
396 | */ | ||
397 | for (i = 0; i < cpuhw->n_counters; ++i) { | ||
398 | counter = cpuhw->counter[i]; | ||
399 | if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { | ||
400 | power_perf_read(counter); | ||
401 | write_pmc(counter->hw.idx, 0); | ||
402 | counter->hw.idx = 0; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Initialize the PMCs for all the new and moved counters. | ||
408 | */ | ||
409 | for (i = 0; i < cpuhw->n_counters; ++i) { | ||
410 | counter = cpuhw->counter[i]; | ||
411 | if (counter->hw.idx) | ||
412 | continue; | ||
413 | val = 0; | ||
414 | if (counter->hw_event.irq_period) { | ||
415 | left = atomic64_read(&counter->hw.period_left); | ||
416 | if (left < 0x80000000L) | ||
417 | val = 0x80000000L - left; | ||
418 | } | ||
419 | atomic64_set(&counter->hw.prev_count, val); | ||
420 | counter->hw.idx = hwc_index[i] + 1; | ||
421 | write_pmc(counter->hw.idx, val); | ||
422 | perf_counter_update_userpage(counter); | ||
423 | } | ||
424 | mb(); | ||
425 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; | ||
426 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | ||
427 | |||
428 | out: | ||
429 | local_irq_restore(flags); | ||
430 | } | ||
431 | |||
432 | static int collect_events(struct perf_counter *group, int max_count, | ||
433 | struct perf_counter *ctrs[], unsigned int *events) | ||
434 | { | ||
435 | int n = 0; | ||
436 | struct perf_counter *counter; | ||
437 | |||
438 | if (!is_software_counter(group)) { | ||
439 | if (n >= max_count) | ||
440 | return -1; | ||
441 | ctrs[n] = group; | ||
442 | events[n++] = group->hw.config; | ||
443 | } | ||
444 | list_for_each_entry(counter, &group->sibling_list, list_entry) { | ||
445 | if (!is_software_counter(counter) && | ||
446 | counter->state != PERF_COUNTER_STATE_OFF) { | ||
447 | if (n >= max_count) | ||
448 | return -1; | ||
449 | ctrs[n] = counter; | ||
450 | events[n++] = counter->hw.config; | ||
451 | } | ||
452 | } | ||
453 | return n; | ||
454 | } | ||
455 | |||
456 | static void counter_sched_in(struct perf_counter *counter, int cpu) | ||
457 | { | ||
458 | counter->state = PERF_COUNTER_STATE_ACTIVE; | ||
459 | counter->oncpu = cpu; | ||
460 | counter->tstamp_running += counter->ctx->time_now - | ||
461 | counter->tstamp_stopped; | ||
462 | if (is_software_counter(counter)) | ||
463 | counter->hw_ops->enable(counter); | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Called to enable a whole group of counters. | ||
468 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. | ||
469 | * Assumes the caller has disabled interrupts and has | ||
470 | * frozen the PMU with hw_perf_save_disable. | ||
471 | */ | ||
472 | int hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
473 | struct perf_cpu_context *cpuctx, | ||
474 | struct perf_counter_context *ctx, int cpu) | ||
475 | { | ||
476 | struct cpu_hw_counters *cpuhw; | ||
477 | long i, n, n0; | ||
478 | struct perf_counter *sub; | ||
479 | |||
480 | cpuhw = &__get_cpu_var(cpu_hw_counters); | ||
481 | n0 = cpuhw->n_counters; | ||
482 | n = collect_events(group_leader, ppmu->n_counter - n0, | ||
483 | &cpuhw->counter[n0], &cpuhw->events[n0]); | ||
484 | if (n < 0) | ||
485 | return -EAGAIN; | ||
486 | if (check_excludes(cpuhw->counter, n0, n)) | ||
487 | return -EAGAIN; | ||
488 | if (power_check_constraints(cpuhw->events, n + n0)) | ||
489 | return -EAGAIN; | ||
490 | cpuhw->n_counters = n0 + n; | ||
491 | cpuhw->n_added += n; | ||
492 | |||
493 | /* | ||
494 | * OK, this group can go on; update counter states etc., | ||
495 | * and enable any software counters | ||
496 | */ | ||
497 | for (i = n0; i < n0 + n; ++i) | ||
498 | cpuhw->counter[i]->hw.config = cpuhw->events[i]; | ||
499 | cpuctx->active_oncpu += n; | ||
500 | n = 1; | ||
501 | counter_sched_in(group_leader, cpu); | ||
502 | list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { | ||
503 | if (sub->state != PERF_COUNTER_STATE_OFF) { | ||
504 | counter_sched_in(sub, cpu); | ||
505 | ++n; | ||
506 | } | ||
507 | } | ||
508 | ctx->nr_active += n; | ||
509 | |||
510 | return 1; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Add a counter to the PMU. | ||
515 | * If all counters are not already frozen, then we disable and | ||
516 | * re-enable the PMU in order to get hw_perf_restore to do the | ||
517 | * actual work of reconfiguring the PMU. | ||
518 | */ | ||
519 | static int power_perf_enable(struct perf_counter *counter) | ||
520 | { | ||
521 | struct cpu_hw_counters *cpuhw; | ||
522 | unsigned long flags; | ||
523 | u64 pmudis; | ||
524 | int n0; | ||
525 | int ret = -EAGAIN; | ||
526 | |||
527 | local_irq_save(flags); | ||
528 | pmudis = hw_perf_save_disable(); | ||
529 | |||
530 | /* | ||
531 | * Add the counter to the list (if there is room) | ||
532 | * and check whether the total set is still feasible. | ||
533 | */ | ||
534 | cpuhw = &__get_cpu_var(cpu_hw_counters); | ||
535 | n0 = cpuhw->n_counters; | ||
536 | if (n0 >= ppmu->n_counter) | ||
537 | goto out; | ||
538 | cpuhw->counter[n0] = counter; | ||
539 | cpuhw->events[n0] = counter->hw.config; | ||
540 | if (check_excludes(cpuhw->counter, n0, 1)) | ||
541 | goto out; | ||
542 | if (power_check_constraints(cpuhw->events, n0 + 1)) | ||
543 | goto out; | ||
544 | |||
545 | counter->hw.config = cpuhw->events[n0]; | ||
546 | ++cpuhw->n_counters; | ||
547 | ++cpuhw->n_added; | ||
548 | |||
549 | ret = 0; | ||
550 | out: | ||
551 | hw_perf_restore(pmudis); | ||
552 | local_irq_restore(flags); | ||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Remove a counter from the PMU. | ||
558 | */ | ||
559 | static void power_perf_disable(struct perf_counter *counter) | ||
560 | { | ||
561 | struct cpu_hw_counters *cpuhw; | ||
562 | long i; | ||
563 | u64 pmudis; | ||
564 | unsigned long flags; | ||
565 | |||
566 | local_irq_save(flags); | ||
567 | pmudis = hw_perf_save_disable(); | ||
568 | |||
569 | power_perf_read(counter); | ||
570 | |||
571 | cpuhw = &__get_cpu_var(cpu_hw_counters); | ||
572 | for (i = 0; i < cpuhw->n_counters; ++i) { | ||
573 | if (counter == cpuhw->counter[i]) { | ||
574 | while (++i < cpuhw->n_counters) | ||
575 | cpuhw->counter[i-1] = cpuhw->counter[i]; | ||
576 | --cpuhw->n_counters; | ||
577 | ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); | ||
578 | write_pmc(counter->hw.idx, 0); | ||
579 | counter->hw.idx = 0; | ||
580 | perf_counter_update_userpage(counter); | ||
581 | break; | ||
582 | } | ||
583 | } | ||
584 | if (cpuhw->n_counters == 0) { | ||
585 | /* disable exceptions if no counters are running */ | ||
586 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | ||
587 | } | ||
588 | |||
589 | hw_perf_restore(pmudis); | ||
590 | local_irq_restore(flags); | ||
591 | } | ||
592 | |||
593 | struct hw_perf_counter_ops power_perf_ops = { | ||
594 | .enable = power_perf_enable, | ||
595 | .disable = power_perf_disable, | ||
596 | .read = power_perf_read | ||
597 | }; | ||
598 | |||
599 | /* Number of perf_counters counting hardware events */ | ||
600 | static atomic_t num_counters; | ||
601 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | ||
602 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
603 | |||
604 | /* | ||
605 | * Release the PMU if this is the last perf_counter. | ||
606 | */ | ||
607 | static void hw_perf_counter_destroy(struct perf_counter *counter) | ||
608 | { | ||
609 | if (!atomic_add_unless(&num_counters, -1, 1)) { | ||
610 | mutex_lock(&pmc_reserve_mutex); | ||
611 | if (atomic_dec_return(&num_counters) == 0) | ||
612 | release_pmc_hardware(); | ||
613 | mutex_unlock(&pmc_reserve_mutex); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | const struct hw_perf_counter_ops * | ||
618 | hw_perf_counter_init(struct perf_counter *counter) | ||
619 | { | ||
620 | unsigned long ev; | ||
621 | struct perf_counter *ctrs[MAX_HWCOUNTERS]; | ||
622 | unsigned int events[MAX_HWCOUNTERS]; | ||
623 | int n; | ||
624 | int err; | ||
625 | |||
626 | if (!ppmu) | ||
627 | return ERR_PTR(-ENXIO); | ||
628 | if ((s64)counter->hw_event.irq_period < 0) | ||
629 | return ERR_PTR(-EINVAL); | ||
630 | if (!perf_event_raw(&counter->hw_event)) { | ||
631 | ev = perf_event_id(&counter->hw_event); | ||
632 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | ||
633 | return ERR_PTR(-EOPNOTSUPP); | ||
634 | ev = ppmu->generic_events[ev]; | ||
635 | } else { | ||
636 | ev = perf_event_config(&counter->hw_event); | ||
637 | } | ||
638 | counter->hw.config_base = ev; | ||
639 | counter->hw.idx = 0; | ||
640 | |||
641 | /* | ||
642 | * If we are not running on a hypervisor, force the | ||
643 | * exclude_hv bit to 0 so that we don't care what | ||
644 | * the user set it to. | ||
645 | */ | ||
646 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | ||
647 | counter->hw_event.exclude_hv = 0; | ||
648 | |||
649 | /* | ||
650 | * If this is in a group, check if it can go on with all the | ||
651 | * other hardware counters in the group. We assume the counter | ||
652 | * hasn't been linked into its leader's sibling list at this point. | ||
653 | */ | ||
654 | n = 0; | ||
655 | if (counter->group_leader != counter) { | ||
656 | n = collect_events(counter->group_leader, ppmu->n_counter - 1, | ||
657 | ctrs, events); | ||
658 | if (n < 0) | ||
659 | return ERR_PTR(-EINVAL); | ||
660 | } | ||
661 | events[n] = ev; | ||
662 | ctrs[n] = counter; | ||
663 | if (check_excludes(ctrs, n, 1)) | ||
664 | return ERR_PTR(-EINVAL); | ||
665 | if (power_check_constraints(events, n + 1)) | ||
666 | return ERR_PTR(-EINVAL); | ||
667 | |||
668 | counter->hw.config = events[n]; | ||
669 | atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); | ||
670 | |||
671 | /* | ||
672 | * See if we need to reserve the PMU. | ||
673 | * If no counters are currently in use, then we have to take a | ||
674 | * mutex to ensure that we don't race with another task doing | ||
675 | * reserve_pmc_hardware or release_pmc_hardware. | ||
676 | */ | ||
677 | err = 0; | ||
678 | if (!atomic_inc_not_zero(&num_counters)) { | ||
679 | mutex_lock(&pmc_reserve_mutex); | ||
680 | if (atomic_read(&num_counters) == 0 && | ||
681 | reserve_pmc_hardware(perf_counter_interrupt)) | ||
682 | err = -EBUSY; | ||
683 | else | ||
684 | atomic_inc(&num_counters); | ||
685 | mutex_unlock(&pmc_reserve_mutex); | ||
686 | } | ||
687 | counter->destroy = hw_perf_counter_destroy; | ||
688 | |||
689 | if (err) | ||
690 | return ERR_PTR(err); | ||
691 | return &power_perf_ops; | ||
692 | } | ||
693 | |||
694 | /* | ||
695 | * A counter has overflowed; update its count and record | ||
696 | * things if requested. Note that interrupts are hard-disabled | ||
697 | * here so there is no possibility of being interrupted. | ||
698 | */ | ||
699 | static void record_and_restart(struct perf_counter *counter, long val, | ||
700 | struct pt_regs *regs) | ||
701 | { | ||
702 | s64 prev, delta, left; | ||
703 | int record = 0; | ||
704 | |||
705 | /* we don't have to worry about interrupts here */ | ||
706 | prev = atomic64_read(&counter->hw.prev_count); | ||
707 | delta = (val - prev) & 0xfffffffful; | ||
708 | atomic64_add(delta, &counter->count); | ||
709 | |||
710 | /* | ||
711 | * See if the total period for this counter has expired, | ||
712 | * and update for the next period. | ||
713 | */ | ||
714 | val = 0; | ||
715 | left = atomic64_read(&counter->hw.period_left) - delta; | ||
716 | if (counter->hw_event.irq_period) { | ||
717 | if (left <= 0) { | ||
718 | left += counter->hw_event.irq_period; | ||
719 | if (left <= 0) | ||
720 | left = counter->hw_event.irq_period; | ||
721 | record = 1; | ||
722 | } | ||
723 | if (left < 0x80000000L) | ||
724 | val = 0x80000000L - left; | ||
725 | } | ||
726 | write_pmc(counter->hw.idx, val); | ||
727 | atomic64_set(&counter->hw.prev_count, val); | ||
728 | atomic64_set(&counter->hw.period_left, left); | ||
729 | perf_counter_update_userpage(counter); | ||
730 | |||
731 | /* | ||
732 | * Finally record data if requested. | ||
733 | */ | ||
734 | if (record) | ||
735 | perf_counter_overflow(counter, 1, regs); | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * Performance monitor interrupt stuff | ||
740 | */ | ||
741 | static void perf_counter_interrupt(struct pt_regs *regs) | ||
742 | { | ||
743 | int i; | ||
744 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); | ||
745 | struct perf_counter *counter; | ||
746 | long val; | ||
747 | int found = 0; | ||
748 | |||
749 | for (i = 0; i < cpuhw->n_counters; ++i) { | ||
750 | counter = cpuhw->counter[i]; | ||
751 | val = read_pmc(counter->hw.idx); | ||
752 | if ((int)val < 0) { | ||
753 | /* counter has overflowed */ | ||
754 | found = 1; | ||
755 | record_and_restart(counter, val, regs); | ||
756 | } | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * In case we didn't find and reset the counter that caused | ||
761 | * the interrupt, scan all counters and reset any that are | ||
762 | * negative, to avoid getting continual interrupts. | ||
763 | * Any that we processed in the previous loop will not be negative. | ||
764 | */ | ||
765 | if (!found) { | ||
766 | for (i = 0; i < ppmu->n_counter; ++i) { | ||
767 | val = read_pmc(i + 1); | ||
768 | if ((int)val < 0) | ||
769 | write_pmc(i + 1, 0); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | /* | ||
774 | * Reset MMCR0 to its normal value. This will set PMXE and | ||
775 | * clear FC (freeze counters) and PMAO (perf mon alert occurred) | ||
776 | * and thus allow interrupts to occur again. | ||
777 | * XXX might want to use MSR.PM to keep the counters frozen until | ||
778 | * we get back out of this interrupt. | ||
779 | */ | ||
780 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | ||
781 | |||
782 | /* | ||
783 | * If we need a wakeup, check whether interrupts were soft-enabled | ||
784 | * when we took the interrupt. If they were, we can wake stuff up | ||
785 | * immediately; otherwise we'll have do the wakeup when interrupts | ||
786 | * get soft-enabled. | ||
787 | */ | ||
788 | if (test_perf_counter_pending() && regs->softe) { | ||
789 | irq_enter(); | ||
790 | clear_perf_counter_pending(); | ||
791 | perf_counter_do_pending(); | ||
792 | irq_exit(); | ||
793 | } | ||
794 | } | ||
795 | |||
796 | void hw_perf_counter_setup(int cpu) | ||
797 | { | ||
798 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); | ||
799 | |||
800 | memset(cpuhw, 0, sizeof(*cpuhw)); | ||
801 | cpuhw->mmcr[0] = MMCR0_FC; | ||
802 | } | ||
803 | |||
804 | extern struct power_pmu power4_pmu; | ||
805 | extern struct power_pmu ppc970_pmu; | ||
806 | extern struct power_pmu power5_pmu; | ||
807 | extern struct power_pmu power5p_pmu; | ||
808 | extern struct power_pmu power6_pmu; | ||
809 | |||
810 | static int init_perf_counters(void) | ||
811 | { | ||
812 | unsigned long pvr; | ||
813 | |||
814 | /* XXX should get this from cputable */ | ||
815 | pvr = mfspr(SPRN_PVR); | ||
816 | switch (PVR_VER(pvr)) { | ||
817 | case PV_POWER4: | ||
818 | case PV_POWER4p: | ||
819 | ppmu = &power4_pmu; | ||
820 | break; | ||
821 | case PV_970: | ||
822 | case PV_970FX: | ||
823 | case PV_970MP: | ||
824 | ppmu = &ppc970_pmu; | ||
825 | break; | ||
826 | case PV_POWER5: | ||
827 | ppmu = &power5_pmu; | ||
828 | break; | ||
829 | case PV_POWER5p: | ||
830 | ppmu = &power5p_pmu; | ||
831 | break; | ||
832 | case 0x3e: | ||
833 | ppmu = &power6_pmu; | ||
834 | break; | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * Use FCHV to ignore kernel events if MSR.HV is set. | ||
839 | */ | ||
840 | if (mfmsr() & MSR_HV) | ||
841 | freeze_counters_kernel = MMCR0_FCHV; | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | arch_initcall(init_perf_counters); | ||
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c new file mode 100644 index 000000000000..1407b19ab619 --- /dev/null +++ b/arch/powerpc/kernel/power4-pmu.c | |||
@@ -0,0 +1,557 @@ | |||
1 | /* | ||
2 | * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors. | ||
3 | * | ||
4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/perf_counter.h> | ||
13 | #include <asm/reg.h> | ||
14 | |||
15 | /* | ||
16 | * Bits in event code for POWER4 | ||
17 | */ | ||
18 | #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ | ||
19 | #define PM_PMC_MSK 0xf | ||
20 | #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ | ||
21 | #define PM_UNIT_MSK 0xf | ||
22 | #define PM_LOWER_SH 6 | ||
23 | #define PM_LOWER_MSK 1 | ||
24 | #define PM_LOWER_MSKS 0x40 | ||
25 | #define PM_BYTE_SH 4 /* Byte number of event bus to use */ | ||
26 | #define PM_BYTE_MSK 3 | ||
27 | #define PM_PMCSEL_MSK 7 | ||
28 | |||
29 | /* | ||
30 | * Unit code values | ||
31 | */ | ||
32 | #define PM_FPU 1 | ||
33 | #define PM_ISU1 2 | ||
34 | #define PM_IFU 3 | ||
35 | #define PM_IDU0 4 | ||
36 | #define PM_ISU1_ALT 6 | ||
37 | #define PM_ISU2 7 | ||
38 | #define PM_IFU_ALT 8 | ||
39 | #define PM_LSU0 9 | ||
40 | #define PM_LSU1 0xc | ||
41 | #define PM_GPS 0xf | ||
42 | |||
43 | /* | ||
44 | * Bits in MMCR0 for POWER4 | ||
45 | */ | ||
46 | #define MMCR0_PMC1SEL_SH 8 | ||
47 | #define MMCR0_PMC2SEL_SH 1 | ||
48 | #define MMCR_PMCSEL_MSK 0x1f | ||
49 | |||
50 | /* | ||
51 | * Bits in MMCR1 for POWER4 | ||
52 | */ | ||
53 | #define MMCR1_TTM0SEL_SH 62 | ||
54 | #define MMCR1_TTC0SEL_SH 61 | ||
55 | #define MMCR1_TTM1SEL_SH 59 | ||
56 | #define MMCR1_TTC1SEL_SH 58 | ||
57 | #define MMCR1_TTM2SEL_SH 56 | ||
58 | #define MMCR1_TTC2SEL_SH 55 | ||
59 | #define MMCR1_TTM3SEL_SH 53 | ||
60 | #define MMCR1_TTC3SEL_SH 52 | ||
61 | #define MMCR1_TTMSEL_MSK 3 | ||
62 | #define MMCR1_TD_CP_DBG0SEL_SH 50 | ||
63 | #define MMCR1_TD_CP_DBG1SEL_SH 48 | ||
64 | #define MMCR1_TD_CP_DBG2SEL_SH 46 | ||
65 | #define MMCR1_TD_CP_DBG3SEL_SH 44 | ||
66 | #define MMCR1_DEBUG0SEL_SH 43 | ||
67 | #define MMCR1_DEBUG1SEL_SH 42 | ||
68 | #define MMCR1_DEBUG2SEL_SH 41 | ||
69 | #define MMCR1_DEBUG3SEL_SH 40 | ||
70 | #define MMCR1_PMC1_ADDER_SEL_SH 39 | ||
71 | #define MMCR1_PMC2_ADDER_SEL_SH 38 | ||
72 | #define MMCR1_PMC6_ADDER_SEL_SH 37 | ||
73 | #define MMCR1_PMC5_ADDER_SEL_SH 36 | ||
74 | #define MMCR1_PMC8_ADDER_SEL_SH 35 | ||
75 | #define MMCR1_PMC7_ADDER_SEL_SH 34 | ||
76 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | ||
77 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | ||
78 | #define MMCR1_PMC3SEL_SH 27 | ||
79 | #define MMCR1_PMC4SEL_SH 22 | ||
80 | #define MMCR1_PMC5SEL_SH 17 | ||
81 | #define MMCR1_PMC6SEL_SH 12 | ||
82 | #define MMCR1_PMC7SEL_SH 7 | ||
83 | #define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */ | ||
84 | |||
85 | static short mmcr1_adder_bits[8] = { | ||
86 | MMCR1_PMC1_ADDER_SEL_SH, | ||
87 | MMCR1_PMC2_ADDER_SEL_SH, | ||
88 | MMCR1_PMC3_ADDER_SEL_SH, | ||
89 | MMCR1_PMC4_ADDER_SEL_SH, | ||
90 | MMCR1_PMC5_ADDER_SEL_SH, | ||
91 | MMCR1_PMC6_ADDER_SEL_SH, | ||
92 | MMCR1_PMC7_ADDER_SEL_SH, | ||
93 | MMCR1_PMC8_ADDER_SEL_SH | ||
94 | }; | ||
95 | |||
96 | /* | ||
97 | * Bits in MMCRA | ||
98 | */ | ||
99 | #define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */ | ||
100 | |||
101 | /* | ||
102 | * Layout of constraint bits: | ||
103 | * 6666555555555544444444443333333333222222222211111111110000000000 | ||
104 | * 3210987654321098765432109876543210987654321098765432109876543210 | ||
105 | * |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><> | ||
106 | * | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 | ||
107 | * \SMPL ||\TTC3SEL | ||
108 | * |\TTC_IFU_SEL | ||
109 | * \TTM2SEL0 | ||
110 | * | ||
111 | * SMPL - SAMPLE_ENABLE constraint | ||
112 | * 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000 | ||
113 | * | ||
114 | * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2 | ||
115 | * 55: UC1 error 0x0080_0000_0000_0000 | ||
116 | * 54: FPU events needed 0x0040_0000_0000_0000 | ||
117 | * 53: ISU1 events needed 0x0020_0000_0000_0000 | ||
118 | * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000 | ||
119 | * | ||
120 | * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0 | ||
121 | * 51: UC2 error 0x0008_0000_0000_0000 | ||
122 | * 50: FPU events needed 0x0004_0000_0000_0000 | ||
123 | * 49: IFU events needed 0x0002_0000_0000_0000 | ||
124 | * 48: LSU0 events needed 0x0001_0000_0000_0000 | ||
125 | * | ||
126 | * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1 | ||
127 | * 47: UC3 error 0x8000_0000_0000 | ||
128 | * 46: LSU0 events needed 0x4000_0000_0000 | ||
129 | * 45: IFU events needed 0x2000_0000_0000 | ||
130 | * 44: IDU0|ISU2 events needed 0x1000_0000_0000 | ||
131 | * 43: ISU1 events needed 0x0800_0000_0000 | ||
132 | * | ||
133 | * TTM2SEL0 | ||
134 | * 42: 0 = IDU0 events needed | ||
135 | * 1 = ISU2 events needed 0x0400_0000_0000 | ||
136 | * | ||
137 | * TTC_IFU_SEL | ||
138 | * 41: 0 = IFU.U events needed | ||
139 | * 1 = IFU.L events needed 0x0200_0000_0000 | ||
140 | * | ||
141 | * TTC3SEL | ||
142 | * 40: 0 = LSU1.U events needed | ||
143 | * 1 = LSU1.L events needed 0x0100_0000_0000 | ||
144 | * | ||
145 | * PS1 | ||
146 | * 39: PS1 error 0x0080_0000_0000 | ||
147 | * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 | ||
148 | * | ||
149 | * PS2 | ||
150 | * 35: PS2 error 0x0008_0000_0000 | ||
151 | * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 | ||
152 | * | ||
153 | * B0 | ||
154 | * 28-31: Byte 0 event source 0xf000_0000 | ||
155 | * 1 = FPU | ||
156 | * 2 = ISU1 | ||
157 | * 3 = IFU | ||
158 | * 4 = IDU0 | ||
159 | * 7 = ISU2 | ||
160 | * 9 = LSU0 | ||
161 | * c = LSU1 | ||
162 | * f = GPS | ||
163 | * | ||
164 | * B1, B2, B3 | ||
165 | * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources | ||
166 | * | ||
167 | * P8 | ||
168 | * 15: P8 error 0x8000 | ||
169 | * 14-15: Count of events needing PMC8 | ||
170 | * | ||
171 | * P1..P7 | ||
172 | * 0-13: Count of events needing PMC1..PMC7 | ||
173 | * | ||
174 | * Note: this doesn't allow events using IFU.U to be combined with events | ||
175 | * using IFU.L, though that is feasible (using TTM0 and TTM2). However | ||
176 | * there are no listed events for IFU.L (they are debug events not | ||
177 | * verified for performance monitoring) so this shouldn't cause a | ||
178 | * problem. | ||
179 | */ | ||
180 | |||
181 | static struct unitinfo { | ||
182 | u64 value, mask; | ||
183 | int unit; | ||
184 | int lowerbit; | ||
185 | } p4_unitinfo[16] = { | ||
186 | [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 }, | ||
187 | [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 }, | ||
188 | [PM_ISU1_ALT] = | ||
189 | { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 }, | ||
190 | [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 }, | ||
191 | [PM_IFU_ALT] = | ||
192 | { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 }, | ||
193 | [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 }, | ||
194 | [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 }, | ||
195 | [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 }, | ||
196 | [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 }, | ||
197 | [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 } | ||
198 | }; | ||
199 | |||
200 | static unsigned char direct_marked_event[8] = { | ||
201 | (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ | ||
202 | (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ | ||
203 | (1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */ | ||
204 | (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ | ||
205 | (1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */ | ||
206 | (1<<3) | (1<<4) | (1<<5), | ||
207 | /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ | ||
208 | (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ | ||
209 | (1<<4), /* PMC8: PM_MRK_LSU_FIN */ | ||
210 | }; | ||
211 | |||
212 | /* | ||
213 | * Returns 1 if event counts things relating to marked instructions | ||
214 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | ||
215 | */ | ||
216 | static int p4_marked_instr_event(unsigned int event) | ||
217 | { | ||
218 | int pmc, psel, unit, byte, bit; | ||
219 | unsigned int mask; | ||
220 | |||
221 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
222 | psel = event & PM_PMCSEL_MSK; | ||
223 | if (pmc) { | ||
224 | if (direct_marked_event[pmc - 1] & (1 << psel)) | ||
225 | return 1; | ||
226 | if (psel == 0) /* add events */ | ||
227 | bit = (pmc <= 4)? pmc - 1: 8 - pmc; | ||
228 | else if (psel == 6) /* decode events */ | ||
229 | bit = 4; | ||
230 | else | ||
231 | return 0; | ||
232 | } else | ||
233 | bit = psel; | ||
234 | |||
235 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
236 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
237 | mask = 0; | ||
238 | switch (unit) { | ||
239 | case PM_LSU1: | ||
240 | if (event & PM_LOWER_MSKS) | ||
241 | mask = 1 << 28; /* byte 7 bit 4 */ | ||
242 | else | ||
243 | mask = 6 << 24; /* byte 3 bits 1 and 2 */ | ||
244 | break; | ||
245 | case PM_LSU0: | ||
246 | /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */ | ||
247 | mask = 0x083dff00; | ||
248 | } | ||
249 | return (mask >> (byte * 8 + bit)) & 1; | ||
250 | } | ||
251 | |||
252 | static int p4_get_constraint(unsigned int event, u64 *maskp, u64 *valp) | ||
253 | { | ||
254 | int pmc, byte, unit, lower, sh; | ||
255 | u64 mask = 0, value = 0; | ||
256 | int grp = -1; | ||
257 | |||
258 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
259 | if (pmc) { | ||
260 | if (pmc > 8) | ||
261 | return -1; | ||
262 | sh = (pmc - 1) * 2; | ||
263 | mask |= 2 << sh; | ||
264 | value |= 1 << sh; | ||
265 | grp = ((pmc - 1) >> 1) & 1; | ||
266 | } | ||
267 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
268 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
269 | if (unit) { | ||
270 | lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; | ||
271 | |||
272 | /* | ||
273 | * Bus events on bytes 0 and 2 can be counted | ||
274 | * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. | ||
275 | */ | ||
276 | if (!pmc) | ||
277 | grp = byte & 1; | ||
278 | |||
279 | if (!p4_unitinfo[unit].unit) | ||
280 | return -1; | ||
281 | mask |= p4_unitinfo[unit].mask; | ||
282 | value |= p4_unitinfo[unit].value; | ||
283 | sh = p4_unitinfo[unit].lowerbit; | ||
284 | if (sh > 1) | ||
285 | value |= (u64)lower << sh; | ||
286 | else if (lower != sh) | ||
287 | return -1; | ||
288 | unit = p4_unitinfo[unit].unit; | ||
289 | |||
290 | /* Set byte lane select field */ | ||
291 | mask |= 0xfULL << (28 - 4 * byte); | ||
292 | value |= (u64)unit << (28 - 4 * byte); | ||
293 | } | ||
294 | if (grp == 0) { | ||
295 | /* increment PMC1/2/5/6 field */ | ||
296 | mask |= 0x8000000000ull; | ||
297 | value |= 0x1000000000ull; | ||
298 | } else { | ||
299 | /* increment PMC3/4/7/8 field */ | ||
300 | mask |= 0x800000000ull; | ||
301 | value |= 0x100000000ull; | ||
302 | } | ||
303 | |||
304 | /* Marked instruction events need sample_enable set */ | ||
305 | if (p4_marked_instr_event(event)) { | ||
306 | mask |= 1ull << 56; | ||
307 | value |= 1ull << 56; | ||
308 | } | ||
309 | |||
310 | /* PMCSEL=6 decode events on byte 2 need sample_enable clear */ | ||
311 | if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) | ||
312 | mask |= 1ull << 56; | ||
313 | |||
314 | *maskp = mask; | ||
315 | *valp = value; | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | static unsigned int ppc_inst_cmpl[] = { | ||
320 | 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 | ||
321 | }; | ||
322 | |||
323 | static int p4_get_alternatives(unsigned int event, unsigned int alt[]) | ||
324 | { | ||
325 | int i, j, na; | ||
326 | |||
327 | alt[0] = event; | ||
328 | na = 1; | ||
329 | |||
330 | /* 2 possibilities for PM_GRP_DISP_REJECT */ | ||
331 | if (event == 0x8003 || event == 0x0224) { | ||
332 | alt[1] = event ^ (0x8003 ^ 0x0224); | ||
333 | return 2; | ||
334 | } | ||
335 | |||
336 | /* 2 possibilities for PM_ST_MISS_L1 */ | ||
337 | if (event == 0x0c13 || event == 0x0c23) { | ||
338 | alt[1] = event ^ (0x0c13 ^ 0x0c23); | ||
339 | return 2; | ||
340 | } | ||
341 | |||
342 | /* several possibilities for PM_INST_CMPL */ | ||
343 | for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) { | ||
344 | if (event == ppc_inst_cmpl[i]) { | ||
345 | for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j) | ||
346 | if (j != i) | ||
347 | alt[na++] = ppc_inst_cmpl[j]; | ||
348 | break; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | return na; | ||
353 | } | ||
354 | |||
355 | static int p4_compute_mmcr(unsigned int event[], int n_ev, | ||
356 | unsigned int hwc[], u64 mmcr[]) | ||
357 | { | ||
358 | u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; | ||
359 | unsigned int pmc, unit, byte, psel, lower; | ||
360 | unsigned int ttm, grp; | ||
361 | unsigned int pmc_inuse = 0; | ||
362 | unsigned int pmc_grp_use[2]; | ||
363 | unsigned char busbyte[4]; | ||
364 | unsigned char unituse[16]; | ||
365 | unsigned int unitlower = 0; | ||
366 | int i; | ||
367 | |||
368 | if (n_ev > 8) | ||
369 | return -1; | ||
370 | |||
371 | /* First pass to count resource use */ | ||
372 | pmc_grp_use[0] = pmc_grp_use[1] = 0; | ||
373 | memset(busbyte, 0, sizeof(busbyte)); | ||
374 | memset(unituse, 0, sizeof(unituse)); | ||
375 | for (i = 0; i < n_ev; ++i) { | ||
376 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
377 | if (pmc) { | ||
378 | if (pmc_inuse & (1 << (pmc - 1))) | ||
379 | return -1; | ||
380 | pmc_inuse |= 1 << (pmc - 1); | ||
381 | /* count 1/2/5/6 vs 3/4/7/8 use */ | ||
382 | ++pmc_grp_use[((pmc - 1) >> 1) & 1]; | ||
383 | } | ||
384 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
385 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
386 | lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; | ||
387 | if (unit) { | ||
388 | if (!pmc) | ||
389 | ++pmc_grp_use[byte & 1]; | ||
390 | if (unit == 6 || unit == 8) | ||
391 | /* map alt ISU1/IFU codes: 6->2, 8->3 */ | ||
392 | unit = (unit >> 1) - 1; | ||
393 | if (busbyte[byte] && busbyte[byte] != unit) | ||
394 | return -1; | ||
395 | busbyte[byte] = unit; | ||
396 | lower <<= unit; | ||
397 | if (unituse[unit] && lower != (unitlower & lower)) | ||
398 | return -1; | ||
399 | unituse[unit] = 1; | ||
400 | unitlower |= lower; | ||
401 | } | ||
402 | } | ||
403 | if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) | ||
404 | return -1; | ||
405 | |||
406 | /* | ||
407 | * Assign resources and set multiplexer selects. | ||
408 | * | ||
409 | * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2. | ||
410 | * Each TTMx can only select one unit, but since | ||
411 | * units 2 and 6 are both ISU1, and 3 and 8 are both IFU, | ||
412 | * we have some choices. | ||
413 | */ | ||
414 | if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) { | ||
415 | unituse[6] = 1; /* Move 2 to 6 */ | ||
416 | unituse[2] = 0; | ||
417 | } | ||
418 | if (unituse[3] & (unituse[1] | unituse[2])) { | ||
419 | unituse[8] = 1; /* Move 3 to 8 */ | ||
420 | unituse[3] = 0; | ||
421 | unitlower = (unitlower & ~8) | ((unitlower & 8) << 5); | ||
422 | } | ||
423 | /* Check only one unit per TTMx */ | ||
424 | if (unituse[1] + unituse[2] + unituse[3] > 1 || | ||
425 | unituse[4] + unituse[6] + unituse[7] > 1 || | ||
426 | unituse[8] + unituse[9] > 1 || | ||
427 | (unituse[5] | unituse[10] | unituse[11] | | ||
428 | unituse[13] | unituse[14])) | ||
429 | return -1; | ||
430 | |||
431 | /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */ | ||
432 | mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH; | ||
433 | mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH; | ||
434 | mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH; | ||
435 | |||
436 | /* Set TTCxSEL fields. */ | ||
437 | if (unitlower & 0xe) | ||
438 | mmcr1 |= 1ull << MMCR1_TTC0SEL_SH; | ||
439 | if (unitlower & 0xf0) | ||
440 | mmcr1 |= 1ull << MMCR1_TTC1SEL_SH; | ||
441 | if (unitlower & 0xf00) | ||
442 | mmcr1 |= 1ull << MMCR1_TTC2SEL_SH; | ||
443 | if (unitlower & 0x7000) | ||
444 | mmcr1 |= 1ull << MMCR1_TTC3SEL_SH; | ||
445 | |||
446 | /* Set byte lane select fields. */ | ||
447 | for (byte = 0; byte < 4; ++byte) { | ||
448 | unit = busbyte[byte]; | ||
449 | if (!unit) | ||
450 | continue; | ||
451 | if (unit == 0xf) { | ||
452 | /* special case for GPS */ | ||
453 | mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte); | ||
454 | } else { | ||
455 | if (!unituse[unit]) | ||
456 | ttm = unit - 1; /* 2->1, 3->2 */ | ||
457 | else | ||
458 | ttm = unit >> 2; | ||
459 | mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | ||
464 | for (i = 0; i < n_ev; ++i) { | ||
465 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
466 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
467 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
468 | psel = event[i] & PM_PMCSEL_MSK; | ||
469 | if (!pmc) { | ||
470 | /* Bus event or 00xxx direct event (off or cycles) */ | ||
471 | if (unit) | ||
472 | psel |= 0x10 | ((byte & 2) << 2); | ||
473 | for (pmc = 0; pmc < 8; ++pmc) { | ||
474 | if (pmc_inuse & (1 << pmc)) | ||
475 | continue; | ||
476 | grp = (pmc >> 1) & 1; | ||
477 | if (unit) { | ||
478 | if (grp == (byte & 1)) | ||
479 | break; | ||
480 | } else if (pmc_grp_use[grp] < 4) { | ||
481 | ++pmc_grp_use[grp]; | ||
482 | break; | ||
483 | } | ||
484 | } | ||
485 | pmc_inuse |= 1 << pmc; | ||
486 | } else { | ||
487 | /* Direct event */ | ||
488 | --pmc; | ||
489 | if (psel == 0 && (byte & 2)) | ||
490 | /* add events on higher-numbered bus */ | ||
491 | mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; | ||
492 | else if (psel == 6 && byte == 3) | ||
493 | /* seem to need to set sample_enable here */ | ||
494 | mmcra |= MMCRA_SAMPLE_ENABLE; | ||
495 | psel |= 8; | ||
496 | } | ||
497 | if (pmc <= 1) | ||
498 | mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc); | ||
499 | else | ||
500 | mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); | ||
501 | if (pmc == 7) /* PMC8 */ | ||
502 | mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH; | ||
503 | hwc[i] = pmc; | ||
504 | if (p4_marked_instr_event(event[i])) | ||
505 | mmcra |= MMCRA_SAMPLE_ENABLE; | ||
506 | } | ||
507 | |||
508 | if (pmc_inuse & 1) | ||
509 | mmcr0 |= MMCR0_PMC1CE; | ||
510 | if (pmc_inuse & 0xfe) | ||
511 | mmcr0 |= MMCR0_PMCjCE; | ||
512 | |||
513 | mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ | ||
514 | |||
515 | /* Return MMCRx values */ | ||
516 | mmcr[0] = mmcr0; | ||
517 | mmcr[1] = mmcr1; | ||
518 | mmcr[2] = mmcra; | ||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static void p4_disable_pmc(unsigned int pmc, u64 mmcr[]) | ||
523 | { | ||
524 | /* | ||
525 | * Setting the PMCxSEL field to 0 disables PMC x. | ||
526 | * (Note that pmc is 0-based here, not 1-based.) | ||
527 | */ | ||
528 | if (pmc <= 1) { | ||
529 | mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc)); | ||
530 | } else { | ||
531 | mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2))); | ||
532 | if (pmc == 7) | ||
533 | mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH); | ||
534 | } | ||
535 | } | ||
536 | |||
537 | static int p4_generic_events[] = { | ||
538 | [PERF_COUNT_CPU_CYCLES] = 7, | ||
539 | [PERF_COUNT_INSTRUCTIONS] = 0x1001, | ||
540 | [PERF_COUNT_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ | ||
541 | [PERF_COUNT_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ | ||
542 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ | ||
543 | [PERF_COUNT_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ | ||
544 | }; | ||
545 | |||
546 | struct power_pmu power4_pmu = { | ||
547 | .n_counter = 8, | ||
548 | .max_alternatives = 5, | ||
549 | .add_fields = 0x0000001100005555ull, | ||
550 | .test_adder = 0x0011083300000000ull, | ||
551 | .compute_mmcr = p4_compute_mmcr, | ||
552 | .get_constraint = p4_get_constraint, | ||
553 | .get_alternatives = p4_get_alternatives, | ||
554 | .disable_pmc = p4_disable_pmc, | ||
555 | .n_generic = ARRAY_SIZE(p4_generic_events), | ||
556 | .generic_events = p4_generic_events, | ||
557 | }; | ||
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c new file mode 100644 index 000000000000..cec21ea65b0e --- /dev/null +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -0,0 +1,452 @@ | |||
1 | /* | ||
2 | * Performance counter support for POWER5 (not POWER5++) processors. | ||
3 | * | ||
4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/perf_counter.h> | ||
13 | #include <asm/reg.h> | ||
14 | |||
15 | /* | ||
16 | * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) | ||
17 | */ | ||
18 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ | ||
19 | #define PM_PMC_MSK 0xf | ||
20 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | ||
21 | #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ | ||
22 | #define PM_UNIT_MSK 0xf | ||
23 | #define PM_BYTE_SH 12 /* Byte number of event bus to use */ | ||
24 | #define PM_BYTE_MSK 7 | ||
25 | #define PM_GRS_SH 8 /* Storage subsystem mux select */ | ||
26 | #define PM_GRS_MSK 7 | ||
27 | #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ | ||
28 | #define PM_PMCSEL_MSK 0x7f | ||
29 | |||
30 | /* Values in PM_UNIT field */ | ||
31 | #define PM_FPU 0 | ||
32 | #define PM_ISU0 1 | ||
33 | #define PM_IFU 2 | ||
34 | #define PM_ISU1 3 | ||
35 | #define PM_IDU 4 | ||
36 | #define PM_ISU0_ALT 6 | ||
37 | #define PM_GRS 7 | ||
38 | #define PM_LSU0 8 | ||
39 | #define PM_LSU1 0xc | ||
40 | #define PM_LASTUNIT 0xc | ||
41 | |||
42 | /* | ||
43 | * Bits in MMCR1 for POWER5+ | ||
44 | */ | ||
45 | #define MMCR1_TTM0SEL_SH 62 | ||
46 | #define MMCR1_TTM1SEL_SH 60 | ||
47 | #define MMCR1_TTM2SEL_SH 58 | ||
48 | #define MMCR1_TTM3SEL_SH 56 | ||
49 | #define MMCR1_TTMSEL_MSK 3 | ||
50 | #define MMCR1_TD_CP_DBG0SEL_SH 54 | ||
51 | #define MMCR1_TD_CP_DBG1SEL_SH 52 | ||
52 | #define MMCR1_TD_CP_DBG2SEL_SH 50 | ||
53 | #define MMCR1_TD_CP_DBG3SEL_SH 48 | ||
54 | #define MMCR1_GRS_L2SEL_SH 46 | ||
55 | #define MMCR1_GRS_L2SEL_MSK 3 | ||
56 | #define MMCR1_GRS_L3SEL_SH 44 | ||
57 | #define MMCR1_GRS_L3SEL_MSK 3 | ||
58 | #define MMCR1_GRS_MCSEL_SH 41 | ||
59 | #define MMCR1_GRS_MCSEL_MSK 7 | ||
60 | #define MMCR1_GRS_FABSEL_SH 39 | ||
61 | #define MMCR1_GRS_FABSEL_MSK 3 | ||
62 | #define MMCR1_PMC1_ADDER_SEL_SH 35 | ||
63 | #define MMCR1_PMC2_ADDER_SEL_SH 34 | ||
64 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | ||
65 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | ||
66 | #define MMCR1_PMC1SEL_SH 25 | ||
67 | #define MMCR1_PMC2SEL_SH 17 | ||
68 | #define MMCR1_PMC3SEL_SH 9 | ||
69 | #define MMCR1_PMC4SEL_SH 1 | ||
70 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | ||
71 | #define MMCR1_PMCSEL_MSK 0x7f | ||
72 | |||
73 | /* | ||
74 | * Bits in MMCRA | ||
75 | */ | ||
76 | |||
77 | /* | ||
78 | * Layout of constraint bits: | ||
79 | * 6666555555555544444444443333333333222222222211111111110000000000 | ||
80 | * 3210987654321098765432109876543210987654321098765432109876543210 | ||
81 | * [ ><><>< ><> <><>[ > < >< >< >< ><><><><> | ||
82 | * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P4P3P2P1 | ||
83 | * | ||
84 | * NC - number of counters | ||
85 | * 51: NC error 0x0008_0000_0000_0000 | ||
86 | * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 | ||
87 | * | ||
88 | * G0..G3 - GRS mux constraints | ||
89 | * 46-47: GRS_L2SEL value | ||
90 | * 44-45: GRS_L3SEL value | ||
91 | * 41-44: GRS_MCSEL value | ||
92 | * 39-40: GRS_FABSEL value | ||
93 | * Note that these match up with their bit positions in MMCR1 | ||
94 | * | ||
95 | * T0 - TTM0 constraint | ||
96 | * 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000 | ||
97 | * | ||
98 | * T1 - TTM1 constraint | ||
99 | * 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000 | ||
100 | * | ||
101 | * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS | ||
102 | * 33: UC3 error 0x02_0000_0000 | ||
103 | * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000 | ||
104 | * 31: ISU0 events needed 0x01_8000_0000 | ||
105 | * 30: IDU|GRS events needed 0x00_4000_0000 | ||
106 | * | ||
107 | * B0 | ||
108 | * 20-23: Byte 0 event source 0x00f0_0000 | ||
109 | * Encoding as for the event code | ||
110 | * | ||
111 | * B1, B2, B3 | ||
112 | * 16-19, 12-15, 8-11: Byte 1, 2, 3 event sources | ||
113 | * | ||
114 | * P4 | ||
115 | * 7: P1 error 0x80 | ||
116 | * 6-7: Count of events needing PMC4 | ||
117 | * | ||
118 | * P1..P3 | ||
119 | * 0-6: Count of events needing PMC1..PMC3 | ||
120 | */ | ||
121 | |||
122 | static const int grsel_shift[8] = { | ||
123 | MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, | ||
124 | MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, | ||
125 | MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH | ||
126 | }; | ||
127 | |||
128 | /* Masks and values for using events from the various units */ | ||
129 | static u64 unit_cons[PM_LASTUNIT+1][2] = { | ||
130 | [PM_FPU] = { 0x3200000000ull, 0x0100000000ull }, | ||
131 | [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull }, | ||
132 | [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull }, | ||
133 | [PM_IFU] = { 0x3200000000ull, 0x2100000000ull }, | ||
134 | [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull }, | ||
135 | [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull }, | ||
136 | }; | ||
137 | |||
138 | static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp) | ||
139 | { | ||
140 | int pmc, byte, unit, sh; | ||
141 | int bit, fmask; | ||
142 | u64 mask = 0, value = 0; | ||
143 | |||
144 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
145 | if (pmc) { | ||
146 | if (pmc > 4) | ||
147 | return -1; | ||
148 | sh = (pmc - 1) * 2; | ||
149 | mask |= 2 << sh; | ||
150 | value |= 1 << sh; | ||
151 | } | ||
152 | if (event & PM_BUSEVENT_MSK) { | ||
153 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
154 | if (unit > PM_LASTUNIT) | ||
155 | return -1; | ||
156 | if (unit == PM_ISU0_ALT) | ||
157 | unit = PM_ISU0; | ||
158 | mask |= unit_cons[unit][0]; | ||
159 | value |= unit_cons[unit][1]; | ||
160 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
161 | if (byte >= 4) { | ||
162 | if (unit != PM_LSU1) | ||
163 | return -1; | ||
164 | /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ | ||
165 | ++unit; | ||
166 | byte &= 3; | ||
167 | } | ||
168 | if (unit == PM_GRS) { | ||
169 | bit = event & 7; | ||
170 | fmask = (bit == 6)? 7: 3; | ||
171 | sh = grsel_shift[bit]; | ||
172 | mask |= (u64)fmask << sh; | ||
173 | value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; | ||
174 | } | ||
175 | /* Set byte lane select field */ | ||
176 | mask |= 0xfULL << (20 - 4 * byte); | ||
177 | value |= (u64)unit << (20 - 4 * byte); | ||
178 | } | ||
179 | mask |= 0x8000000000000ull; | ||
180 | value |= 0x1000000000000ull; | ||
181 | *maskp = mask; | ||
182 | *valp = value; | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | #define MAX_ALT 3 /* at most 3 alternatives for any event */ | ||
187 | |||
188 | static const unsigned int event_alternatives[][MAX_ALT] = { | ||
189 | { 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */ | ||
190 | { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ | ||
191 | { 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */ | ||
192 | { 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */ | ||
193 | { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ | ||
194 | { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */ | ||
195 | { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */ | ||
196 | { 0x100009, 0x200009 }, /* PM_INST_CMPL */ | ||
197 | { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */ | ||
198 | { 0x300009, 0x400009 }, /* PM_INST_DISP */ | ||
199 | }; | ||
200 | |||
201 | /* | ||
202 | * Scan the alternatives table for a match and return the | ||
203 | * index into the alternatives table if found, else -1. | ||
204 | */ | ||
205 | static int find_alternative(unsigned int event) | ||
206 | { | ||
207 | int i, j; | ||
208 | |||
209 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | ||
210 | if (event < event_alternatives[i][0]) | ||
211 | break; | ||
212 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | ||
213 | if (event == event_alternatives[i][j]) | ||
214 | return i; | ||
215 | } | ||
216 | return -1; | ||
217 | } | ||
218 | |||
219 | static const unsigned char bytedecode_alternatives[4][4] = { | ||
220 | /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, | ||
221 | /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, | ||
222 | /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, | ||
223 | /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } | ||
224 | }; | ||
225 | |||
226 | /* | ||
227 | * Some direct events for decodes of event bus byte 3 have alternative | ||
228 | * PMCSEL values on other counters. This returns the alternative | ||
229 | * event code for those that do, or -1 otherwise. This also handles | ||
230 | * alternative PCMSEL values for add events. | ||
231 | */ | ||
232 | static int find_alternative_bdecode(unsigned int event) | ||
233 | { | ||
234 | int pmc, altpmc, pp, j; | ||
235 | |||
236 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
237 | if (pmc == 0 || pmc > 4) | ||
238 | return -1; | ||
239 | altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ | ||
240 | pp = event & PM_PMCSEL_MSK; | ||
241 | for (j = 0; j < 4; ++j) { | ||
242 | if (bytedecode_alternatives[pmc - 1][j] == pp) { | ||
243 | return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | | ||
244 | (altpmc << PM_PMC_SH) | | ||
245 | bytedecode_alternatives[altpmc - 1][j]; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | /* new decode alternatives for power5+ */ | ||
250 | if (pmc == 1 && (pp == 0x0d || pp == 0x0e)) | ||
251 | return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); | ||
252 | if (pmc == 3 && (pp == 0x2e || pp == 0x2f)) | ||
253 | return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); | ||
254 | |||
255 | /* alternative add event encodings */ | ||
256 | if (pp == 0x10 || pp == 0x28) | ||
257 | return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | | ||
258 | (altpmc << PM_PMC_SH); | ||
259 | |||
260 | return -1; | ||
261 | } | ||
262 | |||
263 | static int power5p_get_alternatives(unsigned int event, unsigned int alt[]) | ||
264 | { | ||
265 | int i, j, ae, nalt = 1; | ||
266 | |||
267 | alt[0] = event; | ||
268 | nalt = 1; | ||
269 | i = find_alternative(event); | ||
270 | if (i >= 0) { | ||
271 | for (j = 0; j < MAX_ALT; ++j) { | ||
272 | ae = event_alternatives[i][j]; | ||
273 | if (ae && ae != event) | ||
274 | alt[nalt++] = ae; | ||
275 | } | ||
276 | } else { | ||
277 | ae = find_alternative_bdecode(event); | ||
278 | if (ae > 0) | ||
279 | alt[nalt++] = ae; | ||
280 | } | ||
281 | return nalt; | ||
282 | } | ||
283 | |||
284 | static int power5p_compute_mmcr(unsigned int event[], int n_ev, | ||
285 | unsigned int hwc[], u64 mmcr[]) | ||
286 | { | ||
287 | u64 mmcr1 = 0; | ||
288 | unsigned int pmc, unit, byte, psel; | ||
289 | unsigned int ttm; | ||
290 | int i, isbus, bit, grsel; | ||
291 | unsigned int pmc_inuse = 0; | ||
292 | unsigned char busbyte[4]; | ||
293 | unsigned char unituse[16]; | ||
294 | int ttmuse; | ||
295 | |||
296 | if (n_ev > 4) | ||
297 | return -1; | ||
298 | |||
299 | /* First pass to count resource use */ | ||
300 | memset(busbyte, 0, sizeof(busbyte)); | ||
301 | memset(unituse, 0, sizeof(unituse)); | ||
302 | for (i = 0; i < n_ev; ++i) { | ||
303 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
304 | if (pmc) { | ||
305 | if (pmc > 4) | ||
306 | return -1; | ||
307 | if (pmc_inuse & (1 << (pmc - 1))) | ||
308 | return -1; | ||
309 | pmc_inuse |= 1 << (pmc - 1); | ||
310 | } | ||
311 | if (event[i] & PM_BUSEVENT_MSK) { | ||
312 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
313 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
314 | if (unit > PM_LASTUNIT) | ||
315 | return -1; | ||
316 | if (unit == PM_ISU0_ALT) | ||
317 | unit = PM_ISU0; | ||
318 | if (byte >= 4) { | ||
319 | if (unit != PM_LSU1) | ||
320 | return -1; | ||
321 | ++unit; | ||
322 | byte &= 3; | ||
323 | } | ||
324 | if (busbyte[byte] && busbyte[byte] != unit) | ||
325 | return -1; | ||
326 | busbyte[byte] = unit; | ||
327 | unituse[unit] = 1; | ||
328 | } | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Assign resources and set multiplexer selects. | ||
333 | * | ||
334 | * PM_ISU0 can go either on TTM0 or TTM1, but that's the only | ||
335 | * choice we have to deal with. | ||
336 | */ | ||
337 | if (unituse[PM_ISU0] & | ||
338 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { | ||
339 | unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ | ||
340 | unituse[PM_ISU0] = 0; | ||
341 | } | ||
342 | /* Set TTM[01]SEL fields. */ | ||
343 | ttmuse = 0; | ||
344 | for (i = PM_FPU; i <= PM_ISU1; ++i) { | ||
345 | if (!unituse[i]) | ||
346 | continue; | ||
347 | if (ttmuse++) | ||
348 | return -1; | ||
349 | mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH; | ||
350 | } | ||
351 | ttmuse = 0; | ||
352 | for (; i <= PM_GRS; ++i) { | ||
353 | if (!unituse[i]) | ||
354 | continue; | ||
355 | if (ttmuse++) | ||
356 | return -1; | ||
357 | mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH; | ||
358 | } | ||
359 | if (ttmuse > 1) | ||
360 | return -1; | ||
361 | |||
362 | /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ | ||
363 | for (byte = 0; byte < 4; ++byte) { | ||
364 | unit = busbyte[byte]; | ||
365 | if (!unit) | ||
366 | continue; | ||
367 | if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { | ||
368 | /* get ISU0 through TTM1 rather than TTM0 */ | ||
369 | unit = PM_ISU0_ALT; | ||
370 | } else if (unit == PM_LSU1 + 1) { | ||
371 | /* select lower word of LSU1 for this byte */ | ||
372 | mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); | ||
373 | } | ||
374 | ttm = unit >> 2; | ||
375 | mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); | ||
376 | } | ||
377 | |||
378 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | ||
379 | for (i = 0; i < n_ev; ++i) { | ||
380 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
381 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
382 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
383 | psel = event[i] & PM_PMCSEL_MSK; | ||
384 | isbus = event[i] & PM_BUSEVENT_MSK; | ||
385 | if (!pmc) { | ||
386 | /* Bus event or any-PMC direct event */ | ||
387 | for (pmc = 0; pmc < 4; ++pmc) { | ||
388 | if (!(pmc_inuse & (1 << pmc))) | ||
389 | break; | ||
390 | } | ||
391 | if (pmc >= 4) | ||
392 | return -1; | ||
393 | pmc_inuse |= 1 << pmc; | ||
394 | } else { | ||
395 | /* Direct event */ | ||
396 | --pmc; | ||
397 | if (isbus && (byte & 2) && | ||
398 | (psel == 8 || psel == 0x10 || psel == 0x28)) | ||
399 | /* add events on higher-numbered bus */ | ||
400 | mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); | ||
401 | } | ||
402 | if (isbus && unit == PM_GRS) { | ||
403 | bit = psel & 7; | ||
404 | grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; | ||
405 | mmcr1 |= (u64)grsel << grsel_shift[bit]; | ||
406 | } | ||
407 | if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1)) | ||
408 | /* select alternate byte lane */ | ||
409 | psel |= 0x10; | ||
410 | if (pmc <= 3) | ||
411 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); | ||
412 | hwc[i] = pmc; | ||
413 | } | ||
414 | |||
415 | /* Return MMCRx values */ | ||
416 | mmcr[0] = 0; | ||
417 | if (pmc_inuse & 1) | ||
418 | mmcr[0] = MMCR0_PMC1CE; | ||
419 | if (pmc_inuse & 0x3e) | ||
420 | mmcr[0] |= MMCR0_PMCjCE; | ||
421 | mmcr[1] = mmcr1; | ||
422 | mmcr[2] = 0; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[]) | ||
427 | { | ||
428 | if (pmc <= 3) | ||
429 | mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); | ||
430 | } | ||
431 | |||
432 | static int power5p_generic_events[] = { | ||
433 | [PERF_COUNT_CPU_CYCLES] = 0xf, | ||
434 | [PERF_COUNT_INSTRUCTIONS] = 0x100009, | ||
435 | [PERF_COUNT_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ | ||
436 | [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ | ||
437 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ | ||
438 | [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ | ||
439 | }; | ||
440 | |||
441 | struct power_pmu power5p_pmu = { | ||
442 | .n_counter = 4, | ||
443 | .max_alternatives = MAX_ALT, | ||
444 | .add_fields = 0x7000000000055ull, | ||
445 | .test_adder = 0x3000040000000ull, | ||
446 | .compute_mmcr = power5p_compute_mmcr, | ||
447 | .get_constraint = power5p_get_constraint, | ||
448 | .get_alternatives = power5p_get_alternatives, | ||
449 | .disable_pmc = power5p_disable_pmc, | ||
450 | .n_generic = ARRAY_SIZE(power5p_generic_events), | ||
451 | .generic_events = power5p_generic_events, | ||
452 | }; | ||
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c new file mode 100644 index 000000000000..379ed1087cca --- /dev/null +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * Performance counter support for POWER5 (not POWER5++) processors. | ||
3 | * | ||
4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/perf_counter.h> | ||
13 | #include <asm/reg.h> | ||
14 | |||
15 | /* | ||
16 | * Bits in event code for POWER5 (not POWER5++) | ||
17 | */ | ||
18 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ | ||
19 | #define PM_PMC_MSK 0xf | ||
20 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | ||
21 | #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ | ||
22 | #define PM_UNIT_MSK 0xf | ||
23 | #define PM_BYTE_SH 12 /* Byte number of event bus to use */ | ||
24 | #define PM_BYTE_MSK 7 | ||
25 | #define PM_GRS_SH 8 /* Storage subsystem mux select */ | ||
26 | #define PM_GRS_MSK 7 | ||
27 | #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ | ||
28 | #define PM_PMCSEL_MSK 0x7f | ||
29 | |||
30 | /* Values in PM_UNIT field */ | ||
31 | #define PM_FPU 0 | ||
32 | #define PM_ISU0 1 | ||
33 | #define PM_IFU 2 | ||
34 | #define PM_ISU1 3 | ||
35 | #define PM_IDU 4 | ||
36 | #define PM_ISU0_ALT 6 | ||
37 | #define PM_GRS 7 | ||
38 | #define PM_LSU0 8 | ||
39 | #define PM_LSU1 0xc | ||
40 | #define PM_LASTUNIT 0xc | ||
41 | |||
42 | /* | ||
43 | * Bits in MMCR1 for POWER5 | ||
44 | */ | ||
45 | #define MMCR1_TTM0SEL_SH 62 | ||
46 | #define MMCR1_TTM1SEL_SH 60 | ||
47 | #define MMCR1_TTM2SEL_SH 58 | ||
48 | #define MMCR1_TTM3SEL_SH 56 | ||
49 | #define MMCR1_TTMSEL_MSK 3 | ||
50 | #define MMCR1_TD_CP_DBG0SEL_SH 54 | ||
51 | #define MMCR1_TD_CP_DBG1SEL_SH 52 | ||
52 | #define MMCR1_TD_CP_DBG2SEL_SH 50 | ||
53 | #define MMCR1_TD_CP_DBG3SEL_SH 48 | ||
54 | #define MMCR1_GRS_L2SEL_SH 46 | ||
55 | #define MMCR1_GRS_L2SEL_MSK 3 | ||
56 | #define MMCR1_GRS_L3SEL_SH 44 | ||
57 | #define MMCR1_GRS_L3SEL_MSK 3 | ||
58 | #define MMCR1_GRS_MCSEL_SH 41 | ||
59 | #define MMCR1_GRS_MCSEL_MSK 7 | ||
60 | #define MMCR1_GRS_FABSEL_SH 39 | ||
61 | #define MMCR1_GRS_FABSEL_MSK 3 | ||
62 | #define MMCR1_PMC1_ADDER_SEL_SH 35 | ||
63 | #define MMCR1_PMC2_ADDER_SEL_SH 34 | ||
64 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | ||
65 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | ||
66 | #define MMCR1_PMC1SEL_SH 25 | ||
67 | #define MMCR1_PMC2SEL_SH 17 | ||
68 | #define MMCR1_PMC3SEL_SH 9 | ||
69 | #define MMCR1_PMC4SEL_SH 1 | ||
70 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | ||
71 | #define MMCR1_PMCSEL_MSK 0x7f | ||
72 | |||
73 | /* | ||
74 | * Bits in MMCRA | ||
75 | */ | ||
76 | |||
77 | /* | ||
78 | * Layout of constraint bits: | ||
79 | * 6666555555555544444444443333333333222222222211111111110000000000 | ||
80 | * 3210987654321098765432109876543210987654321098765432109876543210 | ||
81 | * <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><> | ||
82 | * T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1 | ||
83 | * | ||
84 | * T0 - TTM0 constraint | ||
85 | * 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000 | ||
86 | * | ||
87 | * T1 - TTM1 constraint | ||
88 | * 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000 | ||
89 | * | ||
90 | * NC - number of counters | ||
91 | * 51: NC error 0x0008_0000_0000_0000 | ||
92 | * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 | ||
93 | * | ||
94 | * G0..G3 - GRS mux constraints | ||
95 | * 46-47: GRS_L2SEL value | ||
96 | * 44-45: GRS_L3SEL value | ||
97 | * 41-44: GRS_MCSEL value | ||
98 | * 39-40: GRS_FABSEL value | ||
99 | * Note that these match up with their bit positions in MMCR1 | ||
100 | * | ||
101 | * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS | ||
102 | * 37: UC3 error 0x20_0000_0000 | ||
103 | * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000 | ||
104 | * 35: ISU0 events needed 0x08_0000_0000 | ||
105 | * 34: IDU|GRS events needed 0x04_0000_0000 | ||
106 | * | ||
107 | * PS1 | ||
108 | * 33: PS1 error 0x2_0000_0000 | ||
109 | * 31-32: count of events needing PMC1/2 0x1_8000_0000 | ||
110 | * | ||
111 | * PS2 | ||
112 | * 30: PS2 error 0x4000_0000 | ||
113 | * 28-29: count of events needing PMC3/4 0x3000_0000 | ||
114 | * | ||
115 | * B0 | ||
116 | * 24-27: Byte 0 event source 0x0f00_0000 | ||
117 | * Encoding as for the event code | ||
118 | * | ||
119 | * B1, B2, B3 | ||
120 | * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources | ||
121 | * | ||
122 | * P1..P6 | ||
123 | * 0-11: Count of events needing PMC1..PMC6 | ||
124 | */ | ||
125 | |||
126 | static const int grsel_shift[8] = { | ||
127 | MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, | ||
128 | MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, | ||
129 | MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH | ||
130 | }; | ||
131 | |||
132 | /* Masks and values for using events from the various units */ | ||
133 | static u64 unit_cons[PM_LASTUNIT+1][2] = { | ||
134 | [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull }, | ||
135 | [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull }, | ||
136 | [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull }, | ||
137 | [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull }, | ||
138 | [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull }, | ||
139 | [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull }, | ||
140 | }; | ||
141 | |||
142 | static int power5_get_constraint(unsigned int event, u64 *maskp, u64 *valp) | ||
143 | { | ||
144 | int pmc, byte, unit, sh; | ||
145 | int bit, fmask; | ||
146 | u64 mask = 0, value = 0; | ||
147 | int grp = -1; | ||
148 | |||
149 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
150 | if (pmc) { | ||
151 | if (pmc > 6) | ||
152 | return -1; | ||
153 | sh = (pmc - 1) * 2; | ||
154 | mask |= 2 << sh; | ||
155 | value |= 1 << sh; | ||
156 | if (pmc <= 4) | ||
157 | grp = (pmc - 1) >> 1; | ||
158 | else if (event != 0x500009 && event != 0x600005) | ||
159 | return -1; | ||
160 | } | ||
161 | if (event & PM_BUSEVENT_MSK) { | ||
162 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
163 | if (unit > PM_LASTUNIT) | ||
164 | return -1; | ||
165 | if (unit == PM_ISU0_ALT) | ||
166 | unit = PM_ISU0; | ||
167 | mask |= unit_cons[unit][0]; | ||
168 | value |= unit_cons[unit][1]; | ||
169 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
170 | if (byte >= 4) { | ||
171 | if (unit != PM_LSU1) | ||
172 | return -1; | ||
173 | /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ | ||
174 | ++unit; | ||
175 | byte &= 3; | ||
176 | } | ||
177 | if (unit == PM_GRS) { | ||
178 | bit = event & 7; | ||
179 | fmask = (bit == 6)? 7: 3; | ||
180 | sh = grsel_shift[bit]; | ||
181 | mask |= (u64)fmask << sh; | ||
182 | value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; | ||
183 | } | ||
184 | /* | ||
185 | * Bus events on bytes 0 and 2 can be counted | ||
186 | * on PMC1/2; bytes 1 and 3 on PMC3/4. | ||
187 | */ | ||
188 | if (!pmc) | ||
189 | grp = byte & 1; | ||
190 | /* Set byte lane select field */ | ||
191 | mask |= 0xfULL << (24 - 4 * byte); | ||
192 | value |= (u64)unit << (24 - 4 * byte); | ||
193 | } | ||
194 | if (grp == 0) { | ||
195 | /* increment PMC1/2 field */ | ||
196 | mask |= 0x200000000ull; | ||
197 | value |= 0x080000000ull; | ||
198 | } else if (grp == 1) { | ||
199 | /* increment PMC3/4 field */ | ||
200 | mask |= 0x40000000ull; | ||
201 | value |= 0x10000000ull; | ||
202 | } | ||
203 | if (pmc < 5) { | ||
204 | /* need a counter from PMC1-4 set */ | ||
205 | mask |= 0x8000000000000ull; | ||
206 | value |= 0x1000000000000ull; | ||
207 | } | ||
208 | *maskp = mask; | ||
209 | *valp = value; | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | #define MAX_ALT 3 /* at most 3 alternatives for any event */ | ||
214 | |||
215 | static const unsigned int event_alternatives[][MAX_ALT] = { | ||
216 | { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ | ||
217 | { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ | ||
218 | { 0x100005, 0x600005 }, /* PM_RUN_CYC */ | ||
219 | { 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */ | ||
220 | { 0x300009, 0x400009 }, /* PM_INST_DISP */ | ||
221 | }; | ||
222 | |||
223 | /* | ||
224 | * Scan the alternatives table for a match and return the | ||
225 | * index into the alternatives table if found, else -1. | ||
226 | */ | ||
227 | static int find_alternative(unsigned int event) | ||
228 | { | ||
229 | int i, j; | ||
230 | |||
231 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | ||
232 | if (event < event_alternatives[i][0]) | ||
233 | break; | ||
234 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | ||
235 | if (event == event_alternatives[i][j]) | ||
236 | return i; | ||
237 | } | ||
238 | return -1; | ||
239 | } | ||
240 | |||
241 | static const unsigned char bytedecode_alternatives[4][4] = { | ||
242 | /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, | ||
243 | /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, | ||
244 | /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, | ||
245 | /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } | ||
246 | }; | ||
247 | |||
248 | /* | ||
249 | * Some direct events for decodes of event bus byte 3 have alternative | ||
250 | * PMCSEL values on other counters. This returns the alternative | ||
251 | * event code for those that do, or -1 otherwise. | ||
252 | */ | ||
253 | static int find_alternative_bdecode(unsigned int event) | ||
254 | { | ||
255 | int pmc, altpmc, pp, j; | ||
256 | |||
257 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
258 | if (pmc == 0 || pmc > 4) | ||
259 | return -1; | ||
260 | altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ | ||
261 | pp = event & PM_PMCSEL_MSK; | ||
262 | for (j = 0; j < 4; ++j) { | ||
263 | if (bytedecode_alternatives[pmc - 1][j] == pp) { | ||
264 | return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | | ||
265 | (altpmc << PM_PMC_SH) | | ||
266 | bytedecode_alternatives[altpmc - 1][j]; | ||
267 | } | ||
268 | } | ||
269 | return -1; | ||
270 | } | ||
271 | |||
272 | static int power5_get_alternatives(unsigned int event, unsigned int alt[]) | ||
273 | { | ||
274 | int i, j, ae, nalt = 1; | ||
275 | |||
276 | alt[0] = event; | ||
277 | nalt = 1; | ||
278 | i = find_alternative(event); | ||
279 | if (i >= 0) { | ||
280 | for (j = 0; j < MAX_ALT; ++j) { | ||
281 | ae = event_alternatives[i][j]; | ||
282 | if (ae && ae != event) | ||
283 | alt[nalt++] = ae; | ||
284 | } | ||
285 | } else { | ||
286 | ae = find_alternative_bdecode(event); | ||
287 | if (ae > 0) | ||
288 | alt[nalt++] = ae; | ||
289 | } | ||
290 | return nalt; | ||
291 | } | ||
292 | |||
293 | static int power5_compute_mmcr(unsigned int event[], int n_ev, | ||
294 | unsigned int hwc[], u64 mmcr[]) | ||
295 | { | ||
296 | u64 mmcr1 = 0; | ||
297 | unsigned int pmc, unit, byte, psel; | ||
298 | unsigned int ttm, grp; | ||
299 | int i, isbus, bit, grsel; | ||
300 | unsigned int pmc_inuse = 0; | ||
301 | unsigned int pmc_grp_use[2]; | ||
302 | unsigned char busbyte[4]; | ||
303 | unsigned char unituse[16]; | ||
304 | int ttmuse; | ||
305 | |||
306 | if (n_ev > 6) | ||
307 | return -1; | ||
308 | |||
309 | /* First pass to count resource use */ | ||
310 | pmc_grp_use[0] = pmc_grp_use[1] = 0; | ||
311 | memset(busbyte, 0, sizeof(busbyte)); | ||
312 | memset(unituse, 0, sizeof(unituse)); | ||
313 | for (i = 0; i < n_ev; ++i) { | ||
314 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
315 | if (pmc) { | ||
316 | if (pmc > 6) | ||
317 | return -1; | ||
318 | if (pmc_inuse & (1 << (pmc - 1))) | ||
319 | return -1; | ||
320 | pmc_inuse |= 1 << (pmc - 1); | ||
321 | /* count 1/2 vs 3/4 use */ | ||
322 | if (pmc <= 4) | ||
323 | ++pmc_grp_use[(pmc - 1) >> 1]; | ||
324 | } | ||
325 | if (event[i] & PM_BUSEVENT_MSK) { | ||
326 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
327 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
328 | if (unit > PM_LASTUNIT) | ||
329 | return -1; | ||
330 | if (unit == PM_ISU0_ALT) | ||
331 | unit = PM_ISU0; | ||
332 | if (byte >= 4) { | ||
333 | if (unit != PM_LSU1) | ||
334 | return -1; | ||
335 | ++unit; | ||
336 | byte &= 3; | ||
337 | } | ||
338 | if (!pmc) | ||
339 | ++pmc_grp_use[byte & 1]; | ||
340 | if (busbyte[byte] && busbyte[byte] != unit) | ||
341 | return -1; | ||
342 | busbyte[byte] = unit; | ||
343 | unituse[unit] = 1; | ||
344 | } | ||
345 | } | ||
346 | if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2) | ||
347 | return -1; | ||
348 | |||
349 | /* | ||
350 | * Assign resources and set multiplexer selects. | ||
351 | * | ||
352 | * PM_ISU0 can go either on TTM0 or TTM1, but that's the only | ||
353 | * choice we have to deal with. | ||
354 | */ | ||
355 | if (unituse[PM_ISU0] & | ||
356 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { | ||
357 | unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ | ||
358 | unituse[PM_ISU0] = 0; | ||
359 | } | ||
360 | /* Set TTM[01]SEL fields. */ | ||
361 | ttmuse = 0; | ||
362 | for (i = PM_FPU; i <= PM_ISU1; ++i) { | ||
363 | if (!unituse[i]) | ||
364 | continue; | ||
365 | if (ttmuse++) | ||
366 | return -1; | ||
367 | mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH; | ||
368 | } | ||
369 | ttmuse = 0; | ||
370 | for (; i <= PM_GRS; ++i) { | ||
371 | if (!unituse[i]) | ||
372 | continue; | ||
373 | if (ttmuse++) | ||
374 | return -1; | ||
375 | mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH; | ||
376 | } | ||
377 | if (ttmuse > 1) | ||
378 | return -1; | ||
379 | |||
380 | /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ | ||
381 | for (byte = 0; byte < 4; ++byte) { | ||
382 | unit = busbyte[byte]; | ||
383 | if (!unit) | ||
384 | continue; | ||
385 | if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { | ||
386 | /* get ISU0 through TTM1 rather than TTM0 */ | ||
387 | unit = PM_ISU0_ALT; | ||
388 | } else if (unit == PM_LSU1 + 1) { | ||
389 | /* select lower word of LSU1 for this byte */ | ||
390 | mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); | ||
391 | } | ||
392 | ttm = unit >> 2; | ||
393 | mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); | ||
394 | } | ||
395 | |||
396 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | ||
397 | for (i = 0; i < n_ev; ++i) { | ||
398 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
399 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
400 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
401 | psel = event[i] & PM_PMCSEL_MSK; | ||
402 | isbus = event[i] & PM_BUSEVENT_MSK; | ||
403 | if (!pmc) { | ||
404 | /* Bus event or any-PMC direct event */ | ||
405 | for (pmc = 0; pmc < 4; ++pmc) { | ||
406 | if (pmc_inuse & (1 << pmc)) | ||
407 | continue; | ||
408 | grp = (pmc >> 1) & 1; | ||
409 | if (isbus) { | ||
410 | if (grp == (byte & 1)) | ||
411 | break; | ||
412 | } else if (pmc_grp_use[grp] < 2) { | ||
413 | ++pmc_grp_use[grp]; | ||
414 | break; | ||
415 | } | ||
416 | } | ||
417 | pmc_inuse |= 1 << pmc; | ||
418 | } else if (pmc <= 4) { | ||
419 | /* Direct event */ | ||
420 | --pmc; | ||
421 | if ((psel == 8 || psel == 0x10) && isbus && (byte & 2)) | ||
422 | /* add events on higher-numbered bus */ | ||
423 | mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); | ||
424 | } else { | ||
425 | /* Instructions or run cycles on PMC5/6 */ | ||
426 | --pmc; | ||
427 | } | ||
428 | if (isbus && unit == PM_GRS) { | ||
429 | bit = psel & 7; | ||
430 | grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; | ||
431 | mmcr1 |= (u64)grsel << grsel_shift[bit]; | ||
432 | } | ||
433 | if (pmc <= 3) | ||
434 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); | ||
435 | hwc[i] = pmc; | ||
436 | } | ||
437 | |||
438 | /* Return MMCRx values */ | ||
439 | mmcr[0] = 0; | ||
440 | if (pmc_inuse & 1) | ||
441 | mmcr[0] = MMCR0_PMC1CE; | ||
442 | if (pmc_inuse & 0x3e) | ||
443 | mmcr[0] |= MMCR0_PMCjCE; | ||
444 | mmcr[1] = mmcr1; | ||
445 | mmcr[2] = 0; | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static void power5_disable_pmc(unsigned int pmc, u64 mmcr[]) | ||
450 | { | ||
451 | if (pmc <= 3) | ||
452 | mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); | ||
453 | } | ||
454 | |||
455 | static int power5_generic_events[] = { | ||
456 | [PERF_COUNT_CPU_CYCLES] = 0xf, | ||
457 | [PERF_COUNT_INSTRUCTIONS] = 0x100009, | ||
458 | [PERF_COUNT_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ | ||
459 | [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ | ||
460 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ | ||
461 | [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ | ||
462 | }; | ||
463 | |||
464 | struct power_pmu power5_pmu = { | ||
465 | .n_counter = 6, | ||
466 | .max_alternatives = MAX_ALT, | ||
467 | .add_fields = 0x7000090000555ull, | ||
468 | .test_adder = 0x3000490000000ull, | ||
469 | .compute_mmcr = power5_compute_mmcr, | ||
470 | .get_constraint = power5_get_constraint, | ||
471 | .get_alternatives = power5_get_alternatives, | ||
472 | .disable_pmc = power5_disable_pmc, | ||
473 | .n_generic = ARRAY_SIZE(power5_generic_events), | ||
474 | .generic_events = power5_generic_events, | ||
475 | }; | ||
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c new file mode 100644 index 000000000000..b1f61f3c97bb --- /dev/null +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -0,0 +1,283 @@ | |||
1 | /* | ||
2 | * Performance counter support for POWER6 processors. | ||
3 | * | ||
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/perf_counter.h> | ||
13 | #include <asm/reg.h> | ||
14 | |||
15 | /* | ||
16 | * Bits in event code for POWER6 | ||
17 | */ | ||
18 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ | ||
19 | #define PM_PMC_MSK 0x7 | ||
20 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | ||
21 | #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ | ||
22 | #define PM_UNIT_MSK 0xf | ||
23 | #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) | ||
24 | #define PM_LLAV 0x8000 /* Load lookahead match value */ | ||
25 | #define PM_LLA 0x4000 /* Load lookahead match enable */ | ||
26 | #define PM_BYTE_SH 12 /* Byte of event bus to use */ | ||
27 | #define PM_BYTE_MSK 3 | ||
28 | #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ | ||
29 | #define PM_SUBUNIT_MSK 7 | ||
30 | #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) | ||
31 | #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ | ||
32 | #define PM_BUSEVENT_MSK 0xf3700 | ||
33 | |||
34 | /* | ||
35 | * Bits in MMCR1 for POWER6 | ||
36 | */ | ||
37 | #define MMCR1_TTM0SEL_SH 60 | ||
38 | #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) | ||
39 | #define MMCR1_TTMSEL_MSK 0xf | ||
40 | #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) | ||
41 | #define MMCR1_NESTSEL_SH 45 | ||
42 | #define MMCR1_NESTSEL_MSK 0x7 | ||
43 | #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) | ||
44 | #define MMCR1_PMC1_LLA ((u64)1 << 44) | ||
45 | #define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39) | ||
46 | #define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35) | ||
47 | #define MMCR1_PMC1SEL_SH 24 | ||
48 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | ||
49 | #define MMCR1_PMCSEL_MSK 0xff | ||
50 | |||
51 | /* | ||
52 | * Assign PMC numbers and compute MMCR1 value for a set of events | ||
53 | */ | ||
54 | static int p6_compute_mmcr(unsigned int event[], int n_ev, | ||
55 | unsigned int hwc[], u64 mmcr[]) | ||
56 | { | ||
57 | u64 mmcr1 = 0; | ||
58 | int i; | ||
59 | unsigned int pmc, ev, b, u, s, psel; | ||
60 | unsigned int ttmset = 0; | ||
61 | unsigned int pmc_inuse = 0; | ||
62 | |||
63 | if (n_ev > 4) | ||
64 | return -1; | ||
65 | for (i = 0; i < n_ev; ++i) { | ||
66 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
67 | if (pmc) { | ||
68 | if (pmc_inuse & (1 << (pmc - 1))) | ||
69 | return -1; /* collision! */ | ||
70 | pmc_inuse |= 1 << (pmc - 1); | ||
71 | } | ||
72 | } | ||
73 | for (i = 0; i < n_ev; ++i) { | ||
74 | ev = event[i]; | ||
75 | pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; | ||
76 | if (pmc) { | ||
77 | --pmc; | ||
78 | } else { | ||
79 | /* can go on any PMC; find a free one */ | ||
80 | for (pmc = 0; pmc < 4; ++pmc) | ||
81 | if (!(pmc_inuse & (1 << pmc))) | ||
82 | break; | ||
83 | pmc_inuse |= 1 << pmc; | ||
84 | } | ||
85 | hwc[i] = pmc; | ||
86 | psel = ev & PM_PMCSEL_MSK; | ||
87 | if (ev & PM_BUSEVENT_MSK) { | ||
88 | /* this event uses the event bus */ | ||
89 | b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
90 | u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
91 | /* check for conflict on this byte of event bus */ | ||
92 | if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) | ||
93 | return -1; | ||
94 | mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b); | ||
95 | ttmset |= 1 << b; | ||
96 | if (u == 5) { | ||
97 | /* Nest events have a further mux */ | ||
98 | s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; | ||
99 | if ((ttmset & 0x10) && | ||
100 | MMCR1_NESTSEL(mmcr1) != s) | ||
101 | return -1; | ||
102 | ttmset |= 0x10; | ||
103 | mmcr1 |= (u64)s << MMCR1_NESTSEL_SH; | ||
104 | } | ||
105 | if (0x30 <= psel && psel <= 0x3d) { | ||
106 | /* these need the PMCx_ADDR_SEL bits */ | ||
107 | if (b >= 2) | ||
108 | mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; | ||
109 | } | ||
110 | /* bus select values are different for PMC3/4 */ | ||
111 | if (pmc >= 2 && (psel & 0x90) == 0x80) | ||
112 | psel ^= 0x20; | ||
113 | } | ||
114 | if (ev & PM_LLA) { | ||
115 | mmcr1 |= MMCR1_PMC1_LLA >> pmc; | ||
116 | if (ev & PM_LLAV) | ||
117 | mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; | ||
118 | } | ||
119 | mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc); | ||
120 | } | ||
121 | mmcr[0] = 0; | ||
122 | if (pmc_inuse & 1) | ||
123 | mmcr[0] = MMCR0_PMC1CE; | ||
124 | if (pmc_inuse & 0xe) | ||
125 | mmcr[0] |= MMCR0_PMCjCE; | ||
126 | mmcr[1] = mmcr1; | ||
127 | mmcr[2] = 0; | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Layout of constraint bits: | ||
133 | * | ||
134 | * 0-1 add field: number of uses of PMC1 (max 1) | ||
135 | * 2-3, 4-5, 6-7: ditto for PMC2, 3, 4 | ||
136 | * 8-10 select field: nest (subunit) event selector | ||
137 | * 16-19 select field: unit on byte 0 of event bus | ||
138 | * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 | ||
139 | */ | ||
140 | static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp) | ||
141 | { | ||
142 | int pmc, byte, sh; | ||
143 | unsigned int mask = 0, value = 0; | ||
144 | |||
145 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
146 | if (pmc) { | ||
147 | if (pmc > 4) | ||
148 | return -1; | ||
149 | sh = (pmc - 1) * 2; | ||
150 | mask |= 2 << sh; | ||
151 | value |= 1 << sh; | ||
152 | } | ||
153 | if (event & PM_BUSEVENT_MSK) { | ||
154 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
155 | sh = byte * 4; | ||
156 | mask |= PM_UNIT_MSKS << sh; | ||
157 | value |= (event & PM_UNIT_MSKS) << sh; | ||
158 | if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { | ||
159 | mask |= PM_SUBUNIT_MSKS; | ||
160 | value |= event & PM_SUBUNIT_MSKS; | ||
161 | } | ||
162 | } | ||
163 | *maskp = mask; | ||
164 | *valp = value; | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | #define MAX_ALT 4 /* at most 4 alternatives for any event */ | ||
169 | |||
170 | static const unsigned int event_alternatives[][MAX_ALT] = { | ||
171 | { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ | ||
172 | { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ | ||
173 | { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ | ||
174 | { 0x10000a, 0x2000f4 }, /* PM_RUN_CYC */ | ||
175 | { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ | ||
176 | { 0x10000e, 0x400010 }, /* PM_PURR */ | ||
177 | { 0x100010, 0x4000f8 }, /* PM_FLUSH */ | ||
178 | { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ | ||
179 | { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ | ||
180 | { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ | ||
181 | { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ | ||
182 | { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ | ||
183 | { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ | ||
184 | { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ | ||
185 | { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ | ||
186 | { 0x200012, 0x300012 }, /* PM_INST_DISP */ | ||
187 | { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ | ||
188 | { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ | ||
189 | { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ | ||
190 | { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ | ||
191 | { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ | ||
192 | { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ | ||
193 | { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ | ||
194 | }; | ||
195 | |||
196 | /* | ||
197 | * This could be made more efficient with a binary search on | ||
198 | * a presorted list, if necessary | ||
199 | */ | ||
200 | static int find_alternatives_list(unsigned int event) | ||
201 | { | ||
202 | int i, j; | ||
203 | unsigned int alt; | ||
204 | |||
205 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | ||
206 | if (event < event_alternatives[i][0]) | ||
207 | return -1; | ||
208 | for (j = 0; j < MAX_ALT; ++j) { | ||
209 | alt = event_alternatives[i][j]; | ||
210 | if (!alt || event < alt) | ||
211 | break; | ||
212 | if (event == alt) | ||
213 | return i; | ||
214 | } | ||
215 | } | ||
216 | return -1; | ||
217 | } | ||
218 | |||
219 | static int p6_get_alternatives(unsigned int event, unsigned int alt[]) | ||
220 | { | ||
221 | int i, j; | ||
222 | unsigned int aevent, psel, pmc; | ||
223 | unsigned int nalt = 1; | ||
224 | |||
225 | alt[0] = event; | ||
226 | |||
227 | /* check the alternatives table */ | ||
228 | i = find_alternatives_list(event); | ||
229 | if (i >= 0) { | ||
230 | /* copy out alternatives from list */ | ||
231 | for (j = 0; j < MAX_ALT; ++j) { | ||
232 | aevent = event_alternatives[i][j]; | ||
233 | if (!aevent) | ||
234 | break; | ||
235 | if (aevent != event) | ||
236 | alt[nalt++] = aevent; | ||
237 | } | ||
238 | |||
239 | } else { | ||
240 | /* Check for alternative ways of computing sum events */ | ||
241 | /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ | ||
242 | psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ | ||
243 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
244 | if (pmc && (psel == 0x32 || psel == 0x34)) | ||
245 | alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | | ||
246 | ((5 - pmc) << PM_PMC_SH); | ||
247 | |||
248 | /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ | ||
249 | if (pmc && (psel == 0x38 || psel == 0x3a)) | ||
250 | alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | | ||
251 | ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); | ||
252 | } | ||
253 | |||
254 | return nalt; | ||
255 | } | ||
256 | |||
257 | static void p6_disable_pmc(unsigned int pmc, u64 mmcr[]) | ||
258 | { | ||
259 | /* Set PMCxSEL to 0 to disable PMCx */ | ||
260 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); | ||
261 | } | ||
262 | |||
263 | static int power6_generic_events[] = { | ||
264 | [PERF_COUNT_CPU_CYCLES] = 0x1e, | ||
265 | [PERF_COUNT_INSTRUCTIONS] = 2, | ||
266 | [PERF_COUNT_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ | ||
267 | [PERF_COUNT_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ | ||
268 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ | ||
269 | [PERF_COUNT_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ | ||
270 | }; | ||
271 | |||
272 | struct power_pmu power6_pmu = { | ||
273 | .n_counter = 4, | ||
274 | .max_alternatives = MAX_ALT, | ||
275 | .add_fields = 0x55, | ||
276 | .test_adder = 0, | ||
277 | .compute_mmcr = p6_compute_mmcr, | ||
278 | .get_constraint = p6_get_constraint, | ||
279 | .get_alternatives = p6_get_alternatives, | ||
280 | .disable_pmc = p6_disable_pmc, | ||
281 | .n_generic = ARRAY_SIZE(power6_generic_events), | ||
282 | .generic_events = power6_generic_events, | ||
283 | }; | ||
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c new file mode 100644 index 000000000000..c3256580be1a --- /dev/null +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * Performance counter support for PPC970-family processors. | ||
3 | * | ||
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/perf_counter.h> | ||
13 | #include <asm/reg.h> | ||
14 | |||
15 | /* | ||
16 | * Bits in event code for PPC970 | ||
17 | */ | ||
18 | #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ | ||
19 | #define PM_PMC_MSK 0xf | ||
20 | #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ | ||
21 | #define PM_UNIT_MSK 0xf | ||
22 | #define PM_BYTE_SH 4 /* Byte number of event bus to use */ | ||
23 | #define PM_BYTE_MSK 3 | ||
24 | #define PM_PMCSEL_MSK 0xf | ||
25 | |||
26 | /* Values in PM_UNIT field */ | ||
27 | #define PM_NONE 0 | ||
28 | #define PM_FPU 1 | ||
29 | #define PM_VPU 2 | ||
30 | #define PM_ISU 3 | ||
31 | #define PM_IFU 4 | ||
32 | #define PM_IDU 5 | ||
33 | #define PM_STS 6 | ||
34 | #define PM_LSU0 7 | ||
35 | #define PM_LSU1U 8 | ||
36 | #define PM_LSU1L 9 | ||
37 | #define PM_LASTUNIT 9 | ||
38 | |||
39 | /* | ||
40 | * Bits in MMCR0 for PPC970 | ||
41 | */ | ||
42 | #define MMCR0_PMC1SEL_SH 8 | ||
43 | #define MMCR0_PMC2SEL_SH 1 | ||
44 | #define MMCR_PMCSEL_MSK 0x1f | ||
45 | |||
46 | /* | ||
47 | * Bits in MMCR1 for PPC970 | ||
48 | */ | ||
49 | #define MMCR1_TTM0SEL_SH 62 | ||
50 | #define MMCR1_TTM1SEL_SH 59 | ||
51 | #define MMCR1_TTM3SEL_SH 53 | ||
52 | #define MMCR1_TTMSEL_MSK 3 | ||
53 | #define MMCR1_TD_CP_DBG0SEL_SH 50 | ||
54 | #define MMCR1_TD_CP_DBG1SEL_SH 48 | ||
55 | #define MMCR1_TD_CP_DBG2SEL_SH 46 | ||
56 | #define MMCR1_TD_CP_DBG3SEL_SH 44 | ||
57 | #define MMCR1_PMC1_ADDER_SEL_SH 39 | ||
58 | #define MMCR1_PMC2_ADDER_SEL_SH 38 | ||
59 | #define MMCR1_PMC6_ADDER_SEL_SH 37 | ||
60 | #define MMCR1_PMC5_ADDER_SEL_SH 36 | ||
61 | #define MMCR1_PMC8_ADDER_SEL_SH 35 | ||
62 | #define MMCR1_PMC7_ADDER_SEL_SH 34 | ||
63 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | ||
64 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | ||
65 | #define MMCR1_PMC3SEL_SH 27 | ||
66 | #define MMCR1_PMC4SEL_SH 22 | ||
67 | #define MMCR1_PMC5SEL_SH 17 | ||
68 | #define MMCR1_PMC6SEL_SH 12 | ||
69 | #define MMCR1_PMC7SEL_SH 7 | ||
70 | #define MMCR1_PMC8SEL_SH 2 | ||
71 | |||
72 | static short mmcr1_adder_bits[8] = { | ||
73 | MMCR1_PMC1_ADDER_SEL_SH, | ||
74 | MMCR1_PMC2_ADDER_SEL_SH, | ||
75 | MMCR1_PMC3_ADDER_SEL_SH, | ||
76 | MMCR1_PMC4_ADDER_SEL_SH, | ||
77 | MMCR1_PMC5_ADDER_SEL_SH, | ||
78 | MMCR1_PMC6_ADDER_SEL_SH, | ||
79 | MMCR1_PMC7_ADDER_SEL_SH, | ||
80 | MMCR1_PMC8_ADDER_SEL_SH | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * Bits in MMCRA | ||
85 | */ | ||
86 | |||
87 | /* | ||
88 | * Layout of constraint bits: | ||
89 | * 6666555555555544444444443333333333222222222211111111110000000000 | ||
90 | * 3210987654321098765432109876543210987654321098765432109876543210 | ||
91 | * <><>[ >[ >[ >< >< >< >< ><><><><><><><><> | ||
92 | * T0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 | ||
93 | * | ||
94 | * T0 - TTM0 constraint | ||
95 | * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000 | ||
96 | * | ||
97 | * T1 - TTM1 constraint | ||
98 | * 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000 | ||
99 | * | ||
100 | * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS | ||
101 | * 43: UC3 error 0x0800_0000_0000 | ||
102 | * 42: FPU|IFU|VPU events needed 0x0400_0000_0000 | ||
103 | * 41: ISU events needed 0x0200_0000_0000 | ||
104 | * 40: IDU|STS events needed 0x0100_0000_0000 | ||
105 | * | ||
106 | * PS1 | ||
107 | * 39: PS1 error 0x0080_0000_0000 | ||
108 | * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 | ||
109 | * | ||
110 | * PS2 | ||
111 | * 35: PS2 error 0x0008_0000_0000 | ||
112 | * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 | ||
113 | * | ||
114 | * B0 | ||
115 | * 28-31: Byte 0 event source 0xf000_0000 | ||
116 | * Encoding as for the event code | ||
117 | * | ||
118 | * B1, B2, B3 | ||
119 | * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources | ||
120 | * | ||
121 | * P1 | ||
122 | * 15: P1 error 0x8000 | ||
123 | * 14-15: Count of events needing PMC1 | ||
124 | * | ||
125 | * P2..P8 | ||
126 | * 0-13: Count of events needing PMC2..PMC8 | ||
127 | */ | ||
128 | |||
129 | /* Masks and values for using events from the various units */ | ||
130 | static u64 unit_cons[PM_LASTUNIT+1][2] = { | ||
131 | [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull }, | ||
132 | [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull }, | ||
133 | [PM_ISU] = { 0x080000000000ull, 0x020000000000ull }, | ||
134 | [PM_IFU] = { 0xc80000000000ull, 0x840000000000ull }, | ||
135 | [PM_IDU] = { 0x380000000000ull, 0x010000000000ull }, | ||
136 | [PM_STS] = { 0x380000000000ull, 0x310000000000ull }, | ||
137 | }; | ||
138 | |||
139 | static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) | ||
140 | { | ||
141 | int pmc, byte, unit, sh; | ||
142 | u64 mask = 0, value = 0; | ||
143 | int grp = -1; | ||
144 | |||
145 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | ||
146 | if (pmc) { | ||
147 | if (pmc > 8) | ||
148 | return -1; | ||
149 | sh = (pmc - 1) * 2; | ||
150 | mask |= 2 << sh; | ||
151 | value |= 1 << sh; | ||
152 | grp = ((pmc - 1) >> 1) & 1; | ||
153 | } | ||
154 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
155 | if (unit) { | ||
156 | if (unit > PM_LASTUNIT) | ||
157 | return -1; | ||
158 | mask |= unit_cons[unit][0]; | ||
159 | value |= unit_cons[unit][1]; | ||
160 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
161 | /* | ||
162 | * Bus events on bytes 0 and 2 can be counted | ||
163 | * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. | ||
164 | */ | ||
165 | if (!pmc) | ||
166 | grp = byte & 1; | ||
167 | /* Set byte lane select field */ | ||
168 | mask |= 0xfULL << (28 - 4 * byte); | ||
169 | value |= (u64)unit << (28 - 4 * byte); | ||
170 | } | ||
171 | if (grp == 0) { | ||
172 | /* increment PMC1/2/5/6 field */ | ||
173 | mask |= 0x8000000000ull; | ||
174 | value |= 0x1000000000ull; | ||
175 | } else if (grp == 1) { | ||
176 | /* increment PMC3/4/7/8 field */ | ||
177 | mask |= 0x800000000ull; | ||
178 | value |= 0x100000000ull; | ||
179 | } | ||
180 | *maskp = mask; | ||
181 | *valp = value; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int p970_get_alternatives(unsigned int event, unsigned int alt[]) | ||
186 | { | ||
187 | alt[0] = event; | ||
188 | |||
189 | /* 2 alternatives for LSU empty */ | ||
190 | if (event == 0x2002 || event == 0x3002) { | ||
191 | alt[1] = event ^ 0x1000; | ||
192 | return 2; | ||
193 | } | ||
194 | |||
195 | return 1; | ||
196 | } | ||
197 | |||
198 | static int p970_compute_mmcr(unsigned int event[], int n_ev, | ||
199 | unsigned int hwc[], u64 mmcr[]) | ||
200 | { | ||
201 | u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; | ||
202 | unsigned int pmc, unit, byte, psel; | ||
203 | unsigned int ttm, grp; | ||
204 | unsigned int pmc_inuse = 0; | ||
205 | unsigned int pmc_grp_use[2]; | ||
206 | unsigned char busbyte[4]; | ||
207 | unsigned char unituse[16]; | ||
208 | unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 }; | ||
209 | unsigned char ttmuse[2]; | ||
210 | unsigned char pmcsel[8]; | ||
211 | int i; | ||
212 | |||
213 | if (n_ev > 8) | ||
214 | return -1; | ||
215 | |||
216 | /* First pass to count resource use */ | ||
217 | pmc_grp_use[0] = pmc_grp_use[1] = 0; | ||
218 | memset(busbyte, 0, sizeof(busbyte)); | ||
219 | memset(unituse, 0, sizeof(unituse)); | ||
220 | for (i = 0; i < n_ev; ++i) { | ||
221 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
222 | if (pmc) { | ||
223 | if (pmc_inuse & (1 << (pmc - 1))) | ||
224 | return -1; | ||
225 | pmc_inuse |= 1 << (pmc - 1); | ||
226 | /* count 1/2/5/6 vs 3/4/7/8 use */ | ||
227 | ++pmc_grp_use[((pmc - 1) >> 1) & 1]; | ||
228 | } | ||
229 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
230 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
231 | if (unit) { | ||
232 | if (unit > PM_LASTUNIT) | ||
233 | return -1; | ||
234 | if (!pmc) | ||
235 | ++pmc_grp_use[byte & 1]; | ||
236 | if (busbyte[byte] && busbyte[byte] != unit) | ||
237 | return -1; | ||
238 | busbyte[byte] = unit; | ||
239 | unituse[unit] = 1; | ||
240 | } | ||
241 | } | ||
242 | if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) | ||
243 | return -1; | ||
244 | |||
245 | /* | ||
246 | * Assign resources and set multiplexer selects. | ||
247 | * | ||
248 | * PM_ISU can go either on TTM0 or TTM1, but that's the only | ||
249 | * choice we have to deal with. | ||
250 | */ | ||
251 | if (unituse[PM_ISU] & | ||
252 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU])) | ||
253 | unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */ | ||
254 | /* Set TTM[01]SEL fields. */ | ||
255 | ttmuse[0] = ttmuse[1] = 0; | ||
256 | for (i = PM_FPU; i <= PM_STS; ++i) { | ||
257 | if (!unituse[i]) | ||
258 | continue; | ||
259 | ttm = unitmap[i]; | ||
260 | ++ttmuse[(ttm >> 2) & 1]; | ||
261 | mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH; | ||
262 | } | ||
263 | /* Check only one unit per TTMx */ | ||
264 | if (ttmuse[0] > 1 || ttmuse[1] > 1) | ||
265 | return -1; | ||
266 | |||
267 | /* Set byte lane select fields and TTM3SEL. */ | ||
268 | for (byte = 0; byte < 4; ++byte) { | ||
269 | unit = busbyte[byte]; | ||
270 | if (!unit) | ||
271 | continue; | ||
272 | if (unit <= PM_STS) | ||
273 | ttm = (unitmap[unit] >> 2) & 1; | ||
274 | else if (unit == PM_LSU0) | ||
275 | ttm = 2; | ||
276 | else { | ||
277 | ttm = 3; | ||
278 | if (unit == PM_LSU1L && byte >= 2) | ||
279 | mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); | ||
280 | } | ||
281 | mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); | ||
282 | } | ||
283 | |||
284 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | ||
285 | memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */ | ||
286 | for (i = 0; i < n_ev; ++i) { | ||
287 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | ||
288 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | ||
289 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | ||
290 | psel = event[i] & PM_PMCSEL_MSK; | ||
291 | if (!pmc) { | ||
292 | /* Bus event or any-PMC direct event */ | ||
293 | if (unit) | ||
294 | psel |= 0x10 | ((byte & 2) << 2); | ||
295 | else | ||
296 | psel |= 8; | ||
297 | for (pmc = 0; pmc < 8; ++pmc) { | ||
298 | if (pmc_inuse & (1 << pmc)) | ||
299 | continue; | ||
300 | grp = (pmc >> 1) & 1; | ||
301 | if (unit) { | ||
302 | if (grp == (byte & 1)) | ||
303 | break; | ||
304 | } else if (pmc_grp_use[grp] < 4) { | ||
305 | ++pmc_grp_use[grp]; | ||
306 | break; | ||
307 | } | ||
308 | } | ||
309 | pmc_inuse |= 1 << pmc; | ||
310 | } else { | ||
311 | /* Direct event */ | ||
312 | --pmc; | ||
313 | if (psel == 0 && (byte & 2)) | ||
314 | /* add events on higher-numbered bus */ | ||
315 | mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; | ||
316 | } | ||
317 | pmcsel[pmc] = psel; | ||
318 | hwc[i] = pmc; | ||
319 | } | ||
320 | for (pmc = 0; pmc < 2; ++pmc) | ||
321 | mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc); | ||
322 | for (; pmc < 8; ++pmc) | ||
323 | mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); | ||
324 | if (pmc_inuse & 1) | ||
325 | mmcr0 |= MMCR0_PMC1CE; | ||
326 | if (pmc_inuse & 0xfe) | ||
327 | mmcr0 |= MMCR0_PMCjCE; | ||
328 | |||
329 | mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ | ||
330 | |||
331 | /* Return MMCRx values */ | ||
332 | mmcr[0] = mmcr0; | ||
333 | mmcr[1] = mmcr1; | ||
334 | mmcr[2] = mmcra; | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static void p970_disable_pmc(unsigned int pmc, u64 mmcr[]) | ||
339 | { | ||
340 | int shift, i; | ||
341 | |||
342 | if (pmc <= 1) { | ||
343 | shift = MMCR0_PMC1SEL_SH - 7 * pmc; | ||
344 | i = 0; | ||
345 | } else { | ||
346 | shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2); | ||
347 | i = 1; | ||
348 | } | ||
349 | /* | ||
350 | * Setting the PMCxSEL field to 0x08 disables PMC x. | ||
351 | */ | ||
352 | mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift); | ||
353 | } | ||
354 | |||
355 | static int ppc970_generic_events[] = { | ||
356 | [PERF_COUNT_CPU_CYCLES] = 7, | ||
357 | [PERF_COUNT_INSTRUCTIONS] = 1, | ||
358 | [PERF_COUNT_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */ | ||
359 | [PERF_COUNT_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */ | ||
360 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */ | ||
361 | [PERF_COUNT_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ | ||
362 | }; | ||
363 | |||
364 | struct power_pmu ppc970_pmu = { | ||
365 | .n_counter = 8, | ||
366 | .max_alternatives = 2, | ||
367 | .add_fields = 0x001100005555ull, | ||
368 | .test_adder = 0x013300000000ull, | ||
369 | .compute_mmcr = p970_compute_mmcr, | ||
370 | .get_constraint = p970_get_constraint, | ||
371 | .get_alternatives = p970_get_alternatives, | ||
372 | .disable_pmc = p970_disable_pmc, | ||
373 | .n_generic = ARRAY_SIZE(ppc970_generic_events), | ||
374 | .generic_events = ppc970_generic_events, | ||
375 | }; | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 76993941cac9..17bbf6f91fbe 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/kprobes.h> | 30 | #include <linux/kprobes.h> |
31 | #include <linux/kdebug.h> | 31 | #include <linux/kdebug.h> |
32 | #include <linux/perf_counter.h> | ||
32 | 33 | ||
33 | #include <asm/firmware.h> | 34 | #include <asm/firmware.h> |
34 | #include <asm/page.h> | 35 | #include <asm/page.h> |
@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
170 | die("Weird page fault", regs, SIGSEGV); | 171 | die("Weird page fault", regs, SIGSEGV); |
171 | } | 172 | } |
172 | 173 | ||
174 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); | ||
175 | |||
173 | /* When running in the kernel we expect faults to occur only to | 176 | /* When running in the kernel we expect faults to occur only to |
174 | * addresses in user space. All other faults represent errors in the | 177 | * addresses in user space. All other faults represent errors in the |
175 | * kernel and should generate an OOPS. Unfortunately, in the case of an | 178 | * kernel and should generate an OOPS. Unfortunately, in the case of an |
@@ -309,6 +312,7 @@ good_area: | |||
309 | } | 312 | } |
310 | if (ret & VM_FAULT_MAJOR) { | 313 | if (ret & VM_FAULT_MAJOR) { |
311 | current->maj_flt++; | 314 | current->maj_flt++; |
315 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); | ||
312 | #ifdef CONFIG_PPC_SMLPAR | 316 | #ifdef CONFIG_PPC_SMLPAR |
313 | if (firmware_has_feature(FW_FEATURE_CMO)) { | 317 | if (firmware_has_feature(FW_FEATURE_CMO)) { |
314 | preempt_disable(); | 318 | preempt_disable(); |
@@ -316,8 +320,10 @@ good_area: | |||
316 | preempt_enable(); | 320 | preempt_enable(); |
317 | } | 321 | } |
318 | #endif | 322 | #endif |
319 | } else | 323 | } else { |
320 | current->min_flt++; | 324 | current->min_flt++; |
325 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); | ||
326 | } | ||
321 | up_read(&mm->mmap_sem); | 327 | up_read(&mm->mmap_sem); |
322 | return 0; | 328 | return 0; |
323 | 329 | ||
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 9da795e49337..732ee93a8e98 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -1,6 +1,7 @@ | |||
1 | config PPC64 | 1 | config PPC64 |
2 | bool "64-bit kernel" | 2 | bool "64-bit kernel" |
3 | default n | 3 | default n |
4 | select HAVE_PERF_COUNTERS | ||
4 | help | 5 | help |
5 | This option selects whether a 32-bit or a 64-bit kernel | 6 | This option selects whether a 32-bit or a 64-bit kernel |
6 | will be built. | 7 | will be built. |