diff options
Diffstat (limited to 'arch/powerpc/perf/power8-pmu.c')
| -rw-r--r-- | arch/powerpc/perf/power8-pmu.c | 78 |
1 files changed, 61 insertions, 17 deletions
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 96cee20dcd34..fe2763b6e039 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #define pr_fmt(fmt) "power8-pmu: " fmt | ||
| 14 | |||
| 13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 14 | #include <linux/perf_event.h> | 16 | #include <linux/perf_event.h> |
| 15 | #include <asm/firmware.h> | 17 | #include <asm/firmware.h> |
| @@ -62,9 +64,11 @@ | |||
| 62 | * | 64 | * |
| 63 | * 60 56 52 48 44 40 36 32 | 65 | * 60 56 52 48 44 40 36 32 |
| 64 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | 66 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | |
| 65 | * | [ thresh_cmp ] [ thresh_ctl ] | 67 | * | | [ ] [ thresh_cmp ] [ thresh_ctl ] |
| 66 | * | | | 68 | * | | | | |
| 67 | * *- EBB (Linux) thresh start/stop OR FAB match -* | 69 | * | | *- IFM (Linux) thresh start/stop OR FAB match -* |
| 70 | * | *- BHRB (Linux) | ||
| 71 | * *- EBB (Linux) | ||
| 68 | * | 72 | * |
| 69 | * 28 24 20 16 12 8 4 0 | 73 | * 28 24 20 16 12 8 4 0 |
| 70 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | 74 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | |
| @@ -114,9 +118,18 @@ | |||
| 114 | * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) | 118 | * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) |
| 115 | * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) | 119 | * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) |
| 116 | * | 120 | * |
| 121 | * if EBB and BHRB: | ||
| 122 | * MMCRA[32:33] = IFM | ||
| 123 | * | ||
| 117 | */ | 124 | */ |
| 118 | 125 | ||
| 119 | #define EVENT_EBB_MASK 1ull | 126 | #define EVENT_EBB_MASK 1ull |
| 127 | #define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT | ||
| 128 | #define EVENT_BHRB_MASK 1ull | ||
| 129 | #define EVENT_BHRB_SHIFT 62 | ||
| 130 | #define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | ||
| 131 | #define EVENT_IFM_MASK 3ull | ||
| 132 | #define EVENT_IFM_SHIFT 60 | ||
| 120 | #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ | 133 | #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ |
| 121 | #define EVENT_THR_CMP_MASK 0x3ff | 134 | #define EVENT_THR_CMP_MASK 0x3ff |
| 122 | #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ | 135 | #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ |
| @@ -141,6 +154,12 @@ | |||
| 141 | #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | 154 | #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) |
| 142 | #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ | 155 | #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ |
| 143 | 156 | ||
| 157 | /* Bits defined by Linux */ | ||
| 158 | #define EVENT_LINUX_MASK \ | ||
| 159 | ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \ | ||
| 160 | (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \ | ||
| 161 | (EVENT_IFM_MASK << EVENT_IFM_SHIFT)) | ||
| 162 | |||
| 144 | #define EVENT_VALID_MASK \ | 163 | #define EVENT_VALID_MASK \ |
| 145 | ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ | 164 | ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ |
| 146 | (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ | 165 | (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ |
| @@ -149,7 +168,7 @@ | |||
| 149 | (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ | 168 | (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ |
| 150 | (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ | 169 | (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ |
| 151 | (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ | 170 | (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ |
| 152 | (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \ | 171 | EVENT_LINUX_MASK | \ |
| 153 | EVENT_PSEL_MASK) | 172 | EVENT_PSEL_MASK) |
| 154 | 173 | ||
| 155 | /* MMCRA IFM bits - POWER8 */ | 174 | /* MMCRA IFM bits - POWER8 */ |
| @@ -173,10 +192,11 @@ | |||
| 173 | * | 192 | * |
| 174 | * 28 24 20 16 12 8 4 0 | 193 | * 28 24 20 16 12 8 4 0 |
| 175 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | 194 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | |
| 176 | * | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] | 195 | * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] |
| 177 | * EBB -* | | | 196 | * | | | | |
| 178 | * | | Count of events for each PMC. | 197 | * BHRB IFM -* | | | Count of events for each PMC. |
| 179 | * L1 I/D qualifier -* | p1, p2, p3, p4, p5, p6. | 198 | * EBB -* | | p1, p2, p3, p4, p5, p6. |
| 199 | * L1 I/D qualifier -* | | ||
| 180 | * nc - number of counters -* | 200 | * nc - number of counters -* |
| 181 | * | 201 | * |
| 182 | * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints | 202 | * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints |
| @@ -195,6 +215,9 @@ | |||
| 195 | #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) | 215 | #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) |
| 196 | #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) | 216 | #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) |
| 197 | 217 | ||
| 218 | #define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25) | ||
| 219 | #define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK) | ||
| 220 | |||
| 198 | #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) | 221 | #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) |
| 199 | #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) | 222 | #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) |
| 200 | 223 | ||
| @@ -241,6 +264,7 @@ | |||
| 241 | #define MMCRA_THR_SEL_SHIFT 16 | 264 | #define MMCRA_THR_SEL_SHIFT 16 |
| 242 | #define MMCRA_THR_CMP_SHIFT 32 | 265 | #define MMCRA_THR_CMP_SHIFT 32 |
| 243 | #define MMCRA_SDAR_MODE_TLB (1ull << 42) | 266 | #define MMCRA_SDAR_MODE_TLB (1ull << 42) |
| 267 | #define MMCRA_IFM_SHIFT 30 | ||
| 244 | 268 | ||
| 245 | 269 | ||
| 246 | static inline bool event_is_fab_match(u64 event) | 270 | static inline bool event_is_fab_match(u64 event) |
| @@ -265,20 +289,22 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long | |||
| 265 | pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | 289 | pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; |
| 266 | unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | 290 | unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; |
| 267 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; | 291 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; |
| 268 | ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; | 292 | ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; |
| 269 | |||
| 270 | /* Clear the EBB bit in the event, so event checks work below */ | ||
| 271 | event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT); | ||
| 272 | 293 | ||
| 273 | if (pmc) { | 294 | if (pmc) { |
| 295 | u64 base_event; | ||
| 296 | |||
| 274 | if (pmc > 6) | 297 | if (pmc > 6) |
| 275 | return -1; | 298 | return -1; |
| 276 | 299 | ||
| 277 | mask |= CNST_PMC_MASK(pmc); | 300 | /* Ignore Linux defined bits when checking event below */ |
| 278 | value |= CNST_PMC_VAL(pmc); | 301 | base_event = event & ~EVENT_LINUX_MASK; |
| 279 | 302 | ||
| 280 | if (pmc >= 5 && event != 0x500fa && event != 0x600f4) | 303 | if (pmc >= 5 && base_event != 0x500fa && base_event != 0x600f4) |
| 281 | return -1; | 304 | return -1; |
| 305 | |||
| 306 | mask |= CNST_PMC_MASK(pmc); | ||
| 307 | value |= CNST_PMC_VAL(pmc); | ||
| 282 | } | 308 | } |
| 283 | 309 | ||
| 284 | if (pmc <= 4) { | 310 | if (pmc <= 4) { |
| @@ -299,9 +325,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long | |||
| 299 | * HV writable, and there is no API for guest kernels to modify | 325 | * HV writable, and there is no API for guest kernels to modify |
| 300 | * it. The solution is for the hypervisor to initialise the | 326 | * it. The solution is for the hypervisor to initialise the |
| 301 | * field to zeroes, and for us to only ever allow events that | 327 | * field to zeroes, and for us to only ever allow events that |
| 302 | * have a cache selector of zero. | 328 | * have a cache selector of zero. The bank selector (bit 3) is |
| 329 | * irrelevant, as long as the rest of the value is 0. | ||
| 303 | */ | 330 | */ |
| 304 | if (cache) | 331 | if (cache & 0x7) |
| 305 | return -1; | 332 | return -1; |
| 306 | 333 | ||
| 307 | } else if (event & EVENT_IS_L1) { | 334 | } else if (event & EVENT_IS_L1) { |
| @@ -342,6 +369,15 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long | |||
| 342 | /* EBB events must specify the PMC */ | 369 | /* EBB events must specify the PMC */ |
| 343 | return -1; | 370 | return -1; |
| 344 | 371 | ||
| 372 | if (event & EVENT_WANTS_BHRB) { | ||
| 373 | if (!ebb) | ||
| 374 | /* Only EBB events can request BHRB */ | ||
| 375 | return -1; | ||
| 376 | |||
| 377 | mask |= CNST_IFM_MASK; | ||
| 378 | value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); | ||
| 379 | } | ||
| 380 | |||
| 345 | /* | 381 | /* |
| 346 | * All events must agree on EBB, either all request it or none. | 382 | * All events must agree on EBB, either all request it or none. |
| 347 | * EBB events are pinned & exclusive, so this should never actually | 383 | * EBB events are pinned & exclusive, so this should never actually |
| @@ -431,6 +467,11 @@ static int power8_compute_mmcr(u64 event[], int n_ev, | |||
| 431 | mmcra |= val << MMCRA_THR_CMP_SHIFT; | 467 | mmcra |= val << MMCRA_THR_CMP_SHIFT; |
| 432 | } | 468 | } |
| 433 | 469 | ||
| 470 | if (event[i] & EVENT_WANTS_BHRB) { | ||
| 471 | val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; | ||
| 472 | mmcra |= val << MMCRA_IFM_SHIFT; | ||
| 473 | } | ||
| 474 | |||
| 434 | hwc[i] = pmc - 1; | 475 | hwc[i] = pmc - 1; |
| 435 | } | 476 | } |
| 436 | 477 | ||
| @@ -774,6 +815,9 @@ static int __init init_power8_pmu(void) | |||
| 774 | /* Tell userspace that EBB is supported */ | 815 | /* Tell userspace that EBB is supported */ |
| 775 | cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; | 816 | cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; |
| 776 | 817 | ||
| 818 | if (cpu_has_feature(CPU_FTR_PMAO_BUG)) | ||
| 819 | pr_info("PMAO restore workaround active.\n"); | ||
| 820 | |||
| 777 | return 0; | 821 | return 0; |
| 778 | } | 822 | } |
| 779 | early_initcall(init_power8_pmu); | 823 | early_initcall(init_power8_pmu); |
