diff options
Diffstat (limited to 'arch/powerpc/perf/isa207-common.c')
-rw-r--r-- | arch/powerpc/perf/isa207-common.c | 58 |
1 files changed, 41 insertions, 17 deletions
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 177de814286f..a6c24d866b2f 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c | |||
@@ -148,6 +148,14 @@ static bool is_thresh_cmp_valid(u64 event) | |||
148 | return true; | 148 | return true; |
149 | } | 149 | } |
150 | 150 | ||
151 | static unsigned int dc_ic_rld_quad_l1_sel(u64 event) | ||
152 | { | ||
153 | unsigned int cache; | ||
154 | |||
155 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK; | ||
156 | return cache; | ||
157 | } | ||
158 | |||
151 | static inline u64 isa207_find_source(u64 idx, u32 sub_idx) | 159 | static inline u64 isa207_find_source(u64 idx, u32 sub_idx) |
152 | { | 160 | { |
153 | u64 ret = PERF_MEM_NA; | 161 | u64 ret = PERF_MEM_NA; |
@@ -226,8 +234,13 @@ void isa207_get_mem_weight(u64 *weight) | |||
226 | u64 mmcra = mfspr(SPRN_MMCRA); | 234 | u64 mmcra = mfspr(SPRN_MMCRA); |
227 | u64 exp = MMCRA_THR_CTR_EXP(mmcra); | 235 | u64 exp = MMCRA_THR_CTR_EXP(mmcra); |
228 | u64 mantissa = MMCRA_THR_CTR_MANT(mmcra); | 236 | u64 mantissa = MMCRA_THR_CTR_MANT(mmcra); |
237 | u64 sier = mfspr(SPRN_SIER); | ||
238 | u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; | ||
229 | 239 | ||
230 | *weight = mantissa << (2 * exp); | 240 | if (val == 0 || val == 7) |
241 | *weight = 0; | ||
242 | else | ||
243 | *weight = mantissa << (2 * exp); | ||
231 | } | 244 | } |
232 | 245 | ||
233 | int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) | 246 | int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) |
@@ -274,19 +287,27 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) | |||
274 | } | 287 | } |
275 | 288 | ||
276 | if (unit >= 6 && unit <= 9) { | 289 | if (unit >= 6 && unit <= 9) { |
277 | /* | 290 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
278 | * L2/L3 events contain a cache selector field, which is | 291 | mask |= CNST_CACHE_GROUP_MASK; |
279 | * supposed to be programmed into MMCRC. However MMCRC is only | 292 | value |= CNST_CACHE_GROUP_VAL(event & 0xff); |
280 | * HV writable, and there is no API for guest kernels to modify | 293 | |
281 | * it. The solution is for the hypervisor to initialise the | 294 | mask |= CNST_CACHE_PMC4_MASK; |
282 | * field to zeroes, and for us to only ever allow events that | 295 | if (pmc == 4) |
283 | * have a cache selector of zero. The bank selector (bit 3) is | 296 | value |= CNST_CACHE_PMC4_VAL; |
284 | * irrelevant, as long as the rest of the value is 0. | 297 | } else if (cache & 0x7) { |
285 | */ | 298 | /* |
286 | if (cache & 0x7) | 299 | * L2/L3 events contain a cache selector field, which is |
300 | * supposed to be programmed into MMCRC. However MMCRC is only | ||
301 | * HV writable, and there is no API for guest kernels to modify | ||
302 | * it. The solution is for the hypervisor to initialise the | ||
303 | * field to zeroes, and for us to only ever allow events that | ||
304 | * have a cache selector of zero. The bank selector (bit 3) is | ||
305 | * irrelevant, as long as the rest of the value is 0. | ||
306 | */ | ||
287 | return -1; | 307 | return -1; |
308 | } | ||
288 | 309 | ||
289 | } else if (event & EVENT_IS_L1) { | 310 | } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) { |
290 | mask |= CNST_L1_QUAL_MASK; | 311 | mask |= CNST_L1_QUAL_MASK; |
291 | value |= CNST_L1_QUAL_VAL(cache); | 312 | value |= CNST_L1_QUAL_VAL(cache); |
292 | } | 313 | } |
@@ -389,11 +410,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev, | |||
389 | /* In continuous sampling mode, update SDAR on TLB miss */ | 410 | /* In continuous sampling mode, update SDAR on TLB miss */ |
390 | mmcra_sdar_mode(event[i], &mmcra); | 411 | mmcra_sdar_mode(event[i], &mmcra); |
391 | 412 | ||
392 | if (event[i] & EVENT_IS_L1) { | 413 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
393 | cache = event[i] >> EVENT_CACHE_SEL_SHIFT; | 414 | cache = dc_ic_rld_quad_l1_sel(event[i]); |
394 | mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; | 415 | mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; |
395 | cache >>= 1; | 416 | } else { |
396 | mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; | 417 | if (event[i] & EVENT_IS_L1) { |
418 | cache = dc_ic_rld_quad_l1_sel(event[i]); | ||
419 | mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; | ||
420 | } | ||
397 | } | 421 | } |
398 | 422 | ||
399 | if (is_event_marked(event[i])) { | 423 | if (is_event_marked(event[i])) { |