diff options
author | Madhavan Srinivasan <maddy@linux.vnet.ibm.com> | 2016-06-26 13:37:05 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-07-05 09:49:47 -0400 |
commit | 7ffd948fae4cd4f0207bece20132edd9afb9abcc (patch) | |
tree | 4d60a6edd40122f05d7d1e48e30dbd7b7be5230a | |
parent | 4d3576b207167bdb7af31408871d1bb1a2c1a5c7 (diff) |
powerpc/perf: factor out power8 pmu functions
Factor out some of the power8 pmu functions
to new file "isa207-common.c" to share with
power9 pmu code. Only code movement and no
logic change
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/perf/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/perf/isa207-common.c | 263 | ||||
-rw-r--r-- | arch/powerpc/perf/isa207-common.h | 6 | ||||
-rw-r--r-- | arch/powerpc/perf/power8-pmu.c | 256 |
4 files changed, 273 insertions, 254 deletions
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index 77b6394a7c50..92f8ea46238b 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile | |||
@@ -5,7 +5,7 @@ obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o | |||
5 | obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o | 5 | obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o |
6 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ | 6 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ |
7 | power5+-pmu.o power6-pmu.o power7-pmu.o \ | 7 | power5+-pmu.o power6-pmu.o power7-pmu.o \ |
8 | power8-pmu.o | 8 | isa207-common.o power8-pmu.o |
9 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o | 9 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o |
10 | 10 | ||
11 | obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o | 11 | obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o |
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c new file mode 100644 index 000000000000..6143c99f3ec5 --- /dev/null +++ b/arch/powerpc/perf/isa207-common.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * Common Performance counter support functions for PowerISA v2.07 processors. | ||
3 | * | ||
4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | ||
5 | * Copyright 2013 Michael Ellerman, IBM Corporation. | ||
6 | * Copyright 2016 Madhavan Srinivasan, IBM Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | #include "isa207-common.h" | ||
14 | |||
15 | static inline bool event_is_fab_match(u64 event) | ||
16 | { | ||
17 | /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */ | ||
18 | event &= 0xff0fe; | ||
19 | |||
20 | /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */ | ||
21 | return (event == 0x30056 || event == 0x4f052); | ||
22 | } | ||
23 | |||
24 | int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) | ||
25 | { | ||
26 | unsigned int unit, pmc, cache, ebb; | ||
27 | unsigned long mask, value; | ||
28 | |||
29 | mask = value = 0; | ||
30 | |||
31 | if (event & ~EVENT_VALID_MASK) | ||
32 | return -1; | ||
33 | |||
34 | pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | ||
35 | unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | ||
36 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; | ||
37 | ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; | ||
38 | |||
39 | if (pmc) { | ||
40 | u64 base_event; | ||
41 | |||
42 | if (pmc > 6) | ||
43 | return -1; | ||
44 | |||
45 | /* Ignore Linux defined bits when checking event below */ | ||
46 | base_event = event & ~EVENT_LINUX_MASK; | ||
47 | |||
48 | if (pmc >= 5 && base_event != 0x500fa && | ||
49 | base_event != 0x600f4) | ||
50 | return -1; | ||
51 | |||
52 | mask |= CNST_PMC_MASK(pmc); | ||
53 | value |= CNST_PMC_VAL(pmc); | ||
54 | } | ||
55 | |||
56 | if (pmc <= 4) { | ||
57 | /* | ||
58 | * Add to number of counters in use. Note this includes events with | ||
59 | * a PMC of 0 - they still need a PMC, it's just assigned later. | ||
60 | * Don't count events on PMC 5 & 6, there is only one valid event | ||
61 | * on each of those counters, and they are handled above. | ||
62 | */ | ||
63 | mask |= CNST_NC_MASK; | ||
64 | value |= CNST_NC_VAL; | ||
65 | } | ||
66 | |||
67 | if (unit >= 6 && unit <= 9) { | ||
68 | /* | ||
69 | * L2/L3 events contain a cache selector field, which is | ||
70 | * supposed to be programmed into MMCRC. However MMCRC is only | ||
71 | * HV writable, and there is no API for guest kernels to modify | ||
72 | * it. The solution is for the hypervisor to initialise the | ||
73 | * field to zeroes, and for us to only ever allow events that | ||
74 | * have a cache selector of zero. The bank selector (bit 3) is | ||
75 | * irrelevant, as long as the rest of the value is 0. | ||
76 | */ | ||
77 | if (cache & 0x7) | ||
78 | return -1; | ||
79 | |||
80 | } else if (event & EVENT_IS_L1) { | ||
81 | mask |= CNST_L1_QUAL_MASK; | ||
82 | value |= CNST_L1_QUAL_VAL(cache); | ||
83 | } | ||
84 | |||
85 | if (event & EVENT_IS_MARKED) { | ||
86 | mask |= CNST_SAMPLE_MASK; | ||
87 | value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | ||
92 | * the threshold control bits are used for the match value. | ||
93 | */ | ||
94 | if (event_is_fab_match(event)) { | ||
95 | mask |= CNST_FAB_MATCH_MASK; | ||
96 | value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); | ||
97 | } else { | ||
98 | /* | ||
99 | * Check the mantissa upper two bits are not zero, unless the | ||
100 | * exponent is also zero. See the THRESH_CMP_MANTISSA doc. | ||
101 | */ | ||
102 | unsigned int cmp, exp; | ||
103 | |||
104 | cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | ||
105 | exp = cmp >> 7; | ||
106 | |||
107 | if (exp && (cmp & 0x60) == 0) | ||
108 | return -1; | ||
109 | |||
110 | mask |= CNST_THRESH_MASK; | ||
111 | value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); | ||
112 | } | ||
113 | |||
114 | if (!pmc && ebb) | ||
115 | /* EBB events must specify the PMC */ | ||
116 | return -1; | ||
117 | |||
118 | if (event & EVENT_WANTS_BHRB) { | ||
119 | if (!ebb) | ||
120 | /* Only EBB events can request BHRB */ | ||
121 | return -1; | ||
122 | |||
123 | mask |= CNST_IFM_MASK; | ||
124 | value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * All events must agree on EBB, either all request it or none. | ||
129 | * EBB events are pinned & exclusive, so this should never actually | ||
130 | * hit, but we leave it as a fallback in case. | ||
131 | */ | ||
132 | mask |= CNST_EBB_VAL(ebb); | ||
133 | value |= CNST_EBB_MASK; | ||
134 | |||
135 | *maskp = mask; | ||
136 | *valp = value; | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | int isa207_compute_mmcr(u64 event[], int n_ev, | ||
142 | unsigned int hwc[], unsigned long mmcr[], | ||
143 | struct perf_event *pevents[]) | ||
144 | { | ||
145 | unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; | ||
146 | unsigned int pmc, pmc_inuse; | ||
147 | int i; | ||
148 | |||
149 | pmc_inuse = 0; | ||
150 | |||
151 | /* First pass to count resource use */ | ||
152 | for (i = 0; i < n_ev; ++i) { | ||
153 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | ||
154 | if (pmc) | ||
155 | pmc_inuse |= 1 << pmc; | ||
156 | } | ||
157 | |||
158 | /* In continuous sampling mode, update SDAR on TLB miss */ | ||
159 | mmcra = MMCRA_SDAR_MODE_TLB; | ||
160 | mmcr1 = mmcr2 = 0; | ||
161 | |||
162 | /* Second pass: assign PMCs, set all MMCR1 fields */ | ||
163 | for (i = 0; i < n_ev; ++i) { | ||
164 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | ||
165 | unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | ||
166 | combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; | ||
167 | psel = event[i] & EVENT_PSEL_MASK; | ||
168 | |||
169 | if (!pmc) { | ||
170 | for (pmc = 1; pmc <= 4; ++pmc) { | ||
171 | if (!(pmc_inuse & (1 << pmc))) | ||
172 | break; | ||
173 | } | ||
174 | |||
175 | pmc_inuse |= 1 << pmc; | ||
176 | } | ||
177 | |||
178 | if (pmc <= 4) { | ||
179 | mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc); | ||
180 | mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc); | ||
181 | mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc); | ||
182 | } | ||
183 | |||
184 | if (event[i] & EVENT_IS_L1) { | ||
185 | cache = event[i] >> EVENT_CACHE_SEL_SHIFT; | ||
186 | mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; | ||
187 | cache >>= 1; | ||
188 | mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; | ||
189 | } | ||
190 | |||
191 | if (event[i] & EVENT_IS_MARKED) { | ||
192 | mmcra |= MMCRA_SAMPLE_ENABLE; | ||
193 | |||
194 | val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; | ||
195 | if (val) { | ||
196 | mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT; | ||
197 | mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT; | ||
198 | } | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | ||
203 | * the threshold bits are used for the match value. | ||
204 | */ | ||
205 | if (event_is_fab_match(event[i])) { | ||
206 | mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & | ||
207 | EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; | ||
208 | } else { | ||
209 | val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; | ||
210 | mmcra |= val << MMCRA_THR_CTL_SHIFT; | ||
211 | val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; | ||
212 | mmcra |= val << MMCRA_THR_SEL_SHIFT; | ||
213 | val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | ||
214 | mmcra |= val << MMCRA_THR_CMP_SHIFT; | ||
215 | } | ||
216 | |||
217 | if (event[i] & EVENT_WANTS_BHRB) { | ||
218 | val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; | ||
219 | mmcra |= val << MMCRA_IFM_SHIFT; | ||
220 | } | ||
221 | |||
222 | if (pevents[i]->attr.exclude_user) | ||
223 | mmcr2 |= MMCR2_FCP(pmc); | ||
224 | |||
225 | if (pevents[i]->attr.exclude_hv) | ||
226 | mmcr2 |= MMCR2_FCH(pmc); | ||
227 | |||
228 | if (pevents[i]->attr.exclude_kernel) { | ||
229 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
230 | mmcr2 |= MMCR2_FCH(pmc); | ||
231 | else | ||
232 | mmcr2 |= MMCR2_FCS(pmc); | ||
233 | } | ||
234 | |||
235 | hwc[i] = pmc - 1; | ||
236 | } | ||
237 | |||
238 | /* Return MMCRx values */ | ||
239 | mmcr[0] = 0; | ||
240 | |||
241 | /* pmc_inuse is 1-based */ | ||
242 | if (pmc_inuse & 2) | ||
243 | mmcr[0] = MMCR0_PMC1CE; | ||
244 | |||
245 | if (pmc_inuse & 0x7c) | ||
246 | mmcr[0] |= MMCR0_PMCjCE; | ||
247 | |||
248 | /* If we're not using PMC 5 or 6, freeze them */ | ||
249 | if (!(pmc_inuse & 0x60)) | ||
250 | mmcr[0] |= MMCR0_FC56; | ||
251 | |||
252 | mmcr[1] = mmcr1; | ||
253 | mmcr[2] = mmcra; | ||
254 | mmcr[3] = mmcr2; | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | ||
260 | { | ||
261 | if (pmc <= 3) | ||
262 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); | ||
263 | } | ||
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 03205f5354e9..4d0a4e5017c2 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h | |||
@@ -227,4 +227,10 @@ | |||
227 | #define MAX_ALT 2 | 227 | #define MAX_ALT 2 |
228 | #define MAX_PMU_COUNTERS 6 | 228 | #define MAX_PMU_COUNTERS 6 |
229 | 229 | ||
230 | int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp); | ||
231 | int isa207_compute_mmcr(u64 event[], int n_ev, | ||
232 | unsigned int hwc[], unsigned long mmcr[], | ||
233 | struct perf_event *pevents[]); | ||
234 | void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]); | ||
235 | |||
230 | #endif | 236 | #endif |
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 4303e9b91e43..5fde2b192fec 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c | |||
@@ -30,250 +30,6 @@ enum { | |||
30 | #define POWER8_MMCRA_IFM2 0x0000000080000000UL | 30 | #define POWER8_MMCRA_IFM2 0x0000000080000000UL |
31 | #define POWER8_MMCRA_IFM3 0x00000000C0000000UL | 31 | #define POWER8_MMCRA_IFM3 0x00000000C0000000UL |
32 | 32 | ||
33 | static inline bool event_is_fab_match(u64 event) | ||
34 | { | ||
35 | /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */ | ||
36 | event &= 0xff0fe; | ||
37 | |||
38 | /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */ | ||
39 | return (event == 0x30056 || event == 0x4f052); | ||
40 | } | ||
41 | |||
42 | static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) | ||
43 | { | ||
44 | unsigned int unit, pmc, cache, ebb; | ||
45 | unsigned long mask, value; | ||
46 | |||
47 | mask = value = 0; | ||
48 | |||
49 | if (event & ~EVENT_VALID_MASK) | ||
50 | return -1; | ||
51 | |||
52 | pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | ||
53 | unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | ||
54 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; | ||
55 | ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; | ||
56 | |||
57 | if (pmc) { | ||
58 | u64 base_event; | ||
59 | |||
60 | if (pmc > 6) | ||
61 | return -1; | ||
62 | |||
63 | /* Ignore Linux defined bits when checking event below */ | ||
64 | base_event = event & ~EVENT_LINUX_MASK; | ||
65 | |||
66 | if (pmc >= 5 && base_event != PM_RUN_INST_CMPL && | ||
67 | base_event != PM_RUN_CYC) | ||
68 | return -1; | ||
69 | |||
70 | mask |= CNST_PMC_MASK(pmc); | ||
71 | value |= CNST_PMC_VAL(pmc); | ||
72 | } | ||
73 | |||
74 | if (pmc <= 4) { | ||
75 | /* | ||
76 | * Add to number of counters in use. Note this includes events with | ||
77 | * a PMC of 0 - they still need a PMC, it's just assigned later. | ||
78 | * Don't count events on PMC 5 & 6, there is only one valid event | ||
79 | * on each of those counters, and they are handled above. | ||
80 | */ | ||
81 | mask |= CNST_NC_MASK; | ||
82 | value |= CNST_NC_VAL; | ||
83 | } | ||
84 | |||
85 | if (unit >= 6 && unit <= 9) { | ||
86 | /* | ||
87 | * L2/L3 events contain a cache selector field, which is | ||
88 | * supposed to be programmed into MMCRC. However MMCRC is only | ||
89 | * HV writable, and there is no API for guest kernels to modify | ||
90 | * it. The solution is for the hypervisor to initialise the | ||
91 | * field to zeroes, and for us to only ever allow events that | ||
92 | * have a cache selector of zero. The bank selector (bit 3) is | ||
93 | * irrelevant, as long as the rest of the value is 0. | ||
94 | */ | ||
95 | if (cache & 0x7) | ||
96 | return -1; | ||
97 | |||
98 | } else if (event & EVENT_IS_L1) { | ||
99 | mask |= CNST_L1_QUAL_MASK; | ||
100 | value |= CNST_L1_QUAL_VAL(cache); | ||
101 | } | ||
102 | |||
103 | if (event & EVENT_IS_MARKED) { | ||
104 | mask |= CNST_SAMPLE_MASK; | ||
105 | value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | ||
110 | * the threshold control bits are used for the match value. | ||
111 | */ | ||
112 | if (event_is_fab_match(event)) { | ||
113 | mask |= CNST_FAB_MATCH_MASK; | ||
114 | value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); | ||
115 | } else { | ||
116 | /* | ||
117 | * Check the mantissa upper two bits are not zero, unless the | ||
118 | * exponent is also zero. See the THRESH_CMP_MANTISSA doc. | ||
119 | */ | ||
120 | unsigned int cmp, exp; | ||
121 | |||
122 | cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | ||
123 | exp = cmp >> 7; | ||
124 | |||
125 | if (exp && (cmp & 0x60) == 0) | ||
126 | return -1; | ||
127 | |||
128 | mask |= CNST_THRESH_MASK; | ||
129 | value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); | ||
130 | } | ||
131 | |||
132 | if (!pmc && ebb) | ||
133 | /* EBB events must specify the PMC */ | ||
134 | return -1; | ||
135 | |||
136 | if (event & EVENT_WANTS_BHRB) { | ||
137 | if (!ebb) | ||
138 | /* Only EBB events can request BHRB */ | ||
139 | return -1; | ||
140 | |||
141 | mask |= CNST_IFM_MASK; | ||
142 | value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * All events must agree on EBB, either all request it or none. | ||
147 | * EBB events are pinned & exclusive, so this should never actually | ||
148 | * hit, but we leave it as a fallback in case. | ||
149 | */ | ||
150 | mask |= CNST_EBB_VAL(ebb); | ||
151 | value |= CNST_EBB_MASK; | ||
152 | |||
153 | *maskp = mask; | ||
154 | *valp = value; | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int power8_compute_mmcr(u64 event[], int n_ev, | ||
160 | unsigned int hwc[], unsigned long mmcr[], | ||
161 | struct perf_event *pevents[]) | ||
162 | { | ||
163 | unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; | ||
164 | unsigned int pmc, pmc_inuse; | ||
165 | int i; | ||
166 | |||
167 | pmc_inuse = 0; | ||
168 | |||
169 | /* First pass to count resource use */ | ||
170 | for (i = 0; i < n_ev; ++i) { | ||
171 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | ||
172 | if (pmc) | ||
173 | pmc_inuse |= 1 << pmc; | ||
174 | } | ||
175 | |||
176 | /* In continuous sampling mode, update SDAR on TLB miss */ | ||
177 | mmcra = MMCRA_SDAR_MODE_TLB; | ||
178 | mmcr1 = mmcr2 = 0; | ||
179 | |||
180 | /* Second pass: assign PMCs, set all MMCR1 fields */ | ||
181 | for (i = 0; i < n_ev; ++i) { | ||
182 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | ||
183 | unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | ||
184 | combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; | ||
185 | psel = event[i] & EVENT_PSEL_MASK; | ||
186 | |||
187 | if (!pmc) { | ||
188 | for (pmc = 1; pmc <= 4; ++pmc) { | ||
189 | if (!(pmc_inuse & (1 << pmc))) | ||
190 | break; | ||
191 | } | ||
192 | |||
193 | pmc_inuse |= 1 << pmc; | ||
194 | } | ||
195 | |||
196 | if (pmc <= 4) { | ||
197 | mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc); | ||
198 | mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc); | ||
199 | mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc); | ||
200 | } | ||
201 | |||
202 | if (event[i] & EVENT_IS_L1) { | ||
203 | cache = event[i] >> EVENT_CACHE_SEL_SHIFT; | ||
204 | mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; | ||
205 | cache >>= 1; | ||
206 | mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; | ||
207 | } | ||
208 | |||
209 | if (event[i] & EVENT_IS_MARKED) { | ||
210 | mmcra |= MMCRA_SAMPLE_ENABLE; | ||
211 | |||
212 | val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; | ||
213 | if (val) { | ||
214 | mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT; | ||
215 | mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | ||
221 | * the threshold bits are used for the match value. | ||
222 | */ | ||
223 | if (event_is_fab_match(event[i])) { | ||
224 | mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & | ||
225 | EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; | ||
226 | } else { | ||
227 | val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; | ||
228 | mmcra |= val << MMCRA_THR_CTL_SHIFT; | ||
229 | val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; | ||
230 | mmcra |= val << MMCRA_THR_SEL_SHIFT; | ||
231 | val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | ||
232 | mmcra |= val << MMCRA_THR_CMP_SHIFT; | ||
233 | } | ||
234 | |||
235 | if (event[i] & EVENT_WANTS_BHRB) { | ||
236 | val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; | ||
237 | mmcra |= val << MMCRA_IFM_SHIFT; | ||
238 | } | ||
239 | |||
240 | if (pevents[i]->attr.exclude_user) | ||
241 | mmcr2 |= MMCR2_FCP(pmc); | ||
242 | |||
243 | if (pevents[i]->attr.exclude_hv) | ||
244 | mmcr2 |= MMCR2_FCH(pmc); | ||
245 | |||
246 | if (pevents[i]->attr.exclude_kernel) { | ||
247 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
248 | mmcr2 |= MMCR2_FCH(pmc); | ||
249 | else | ||
250 | mmcr2 |= MMCR2_FCS(pmc); | ||
251 | } | ||
252 | |||
253 | hwc[i] = pmc - 1; | ||
254 | } | ||
255 | |||
256 | /* Return MMCRx values */ | ||
257 | mmcr[0] = 0; | ||
258 | |||
259 | /* pmc_inuse is 1-based */ | ||
260 | if (pmc_inuse & 2) | ||
261 | mmcr[0] = MMCR0_PMC1CE; | ||
262 | |||
263 | if (pmc_inuse & 0x7c) | ||
264 | mmcr[0] |= MMCR0_PMCjCE; | ||
265 | |||
266 | /* If we're not using PMC 5 or 6, freeze them */ | ||
267 | if (!(pmc_inuse & 0x60)) | ||
268 | mmcr[0] |= MMCR0_FC56; | ||
269 | |||
270 | mmcr[1] = mmcr1; | ||
271 | mmcr[2] = mmcra; | ||
272 | mmcr[3] = mmcr2; | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* Table of alternatives, sorted by column 0 */ | 33 | /* Table of alternatives, sorted by column 0 */ |
278 | static const unsigned int event_alternatives[][MAX_ALT] = { | 34 | static const unsigned int event_alternatives[][MAX_ALT] = { |
279 | { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT }, | 35 | { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT }, |
@@ -354,12 +110,6 @@ static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | |||
354 | return num_alt; | 110 | return num_alt; |
355 | } | 111 | } |
356 | 112 | ||
357 | static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | ||
358 | { | ||
359 | if (pmc <= 3) | ||
360 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); | ||
361 | } | ||
362 | |||
363 | GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); | 113 | GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); |
364 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); | 114 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); |
365 | GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); | 115 | GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); |
@@ -632,12 +382,12 @@ static struct power_pmu power8_pmu = { | |||
632 | .max_alternatives = MAX_ALT + 1, | 382 | .max_alternatives = MAX_ALT + 1, |
633 | .add_fields = ISA207_ADD_FIELDS, | 383 | .add_fields = ISA207_ADD_FIELDS, |
634 | .test_adder = ISA207_TEST_ADDER, | 384 | .test_adder = ISA207_TEST_ADDER, |
635 | .compute_mmcr = power8_compute_mmcr, | 385 | .compute_mmcr = isa207_compute_mmcr, |
636 | .config_bhrb = power8_config_bhrb, | 386 | .config_bhrb = power8_config_bhrb, |
637 | .bhrb_filter_map = power8_bhrb_filter_map, | 387 | .bhrb_filter_map = power8_bhrb_filter_map, |
638 | .get_constraint = power8_get_constraint, | 388 | .get_constraint = isa207_get_constraint, |
639 | .get_alternatives = power8_get_alternatives, | 389 | .get_alternatives = power8_get_alternatives, |
640 | .disable_pmc = power8_disable_pmc, | 390 | .disable_pmc = isa207_disable_pmc, |
641 | .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, | 391 | .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, |
642 | .n_generic = ARRAY_SIZE(power8_generic_events), | 392 | .n_generic = ARRAY_SIZE(power8_generic_events), |
643 | .generic_events = power8_generic_events, | 393 | .generic_events = power8_generic_events, |