diff options
author | Yan, Zheng <zheng.z.yan@intel.com> | 2014-07-30 03:22:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-13 01:51:08 -0400 |
commit | c1e46580c3b7bf25053519cf39f01a2f9ea4d865 (patch) | |
tree | 24e61949788a43300e71df11389eada6d6dbb22d | |
parent | 8268fdfc45b747bcb3351464efefbdf611aeea9b (diff) |
perf/x86/uncore: move NHM-EX/WSM-EX specific code to seperate file
Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1406704935-27708-4-git-send-email-zheng.z.yan@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 1038 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.h | 185 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c | 1221 |
4 files changed, 1227 insertions, 1219 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 7dee8664573a..7e1fd4e08552 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -37,7 +37,7 @@ endif | |||
37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
38 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 38 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
39 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o | 39 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o |
40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o | 40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o |
41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o | 41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o |
42 | endif | 42 | endif |
43 | 43 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cf6966a37580..b1f84d9ccc48 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -28,15 +28,6 @@ ssize_t uncore_event_show(struct kobject *kobj, | |||
28 | return sprintf(buf, "%s", event->config); | 28 | return sprintf(buf, "%s", event->config); |
29 | } | 29 | } |
30 | 30 | ||
31 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ | ||
32 | ((1ULL << (n)) - 1))) | ||
33 | |||
34 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | ||
35 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | ||
36 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | ||
37 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | ||
38 | DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); | ||
39 | |||
40 | struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) | 31 | struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) |
41 | { | 32 | { |
42 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); | 33 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); |
@@ -158,1025 +149,6 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) | |||
158 | return config; | 149 | return config; |
159 | } | 150 | } |
160 | 151 | ||
161 | /* Nehalem-EX uncore support */ | ||
162 | DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); | ||
163 | DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); | ||
164 | DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); | ||
165 | DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); | ||
166 | |||
167 | static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) | ||
168 | { | ||
169 | wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); | ||
170 | } | ||
171 | |||
172 | static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
173 | { | ||
174 | unsigned msr = uncore_msr_box_ctl(box); | ||
175 | u64 config; | ||
176 | |||
177 | if (msr) { | ||
178 | rdmsrl(msr, config); | ||
179 | config &= ~((1ULL << uncore_num_counters(box)) - 1); | ||
180 | /* WBox has a fixed counter */ | ||
181 | if (uncore_msr_fixed_ctl(box)) | ||
182 | config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
183 | wrmsrl(msr, config); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
188 | { | ||
189 | unsigned msr = uncore_msr_box_ctl(box); | ||
190 | u64 config; | ||
191 | |||
192 | if (msr) { | ||
193 | rdmsrl(msr, config); | ||
194 | config |= (1ULL << uncore_num_counters(box)) - 1; | ||
195 | /* WBox has a fixed counter */ | ||
196 | if (uncore_msr_fixed_ctl(box)) | ||
197 | config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
198 | wrmsrl(msr, config); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
203 | { | ||
204 | wrmsrl(event->hw.config_base, 0); | ||
205 | } | ||
206 | |||
207 | static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
208 | { | ||
209 | struct hw_perf_event *hwc = &event->hw; | ||
210 | |||
211 | if (hwc->idx >= UNCORE_PMC_IDX_FIXED) | ||
212 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); | ||
213 | else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) | ||
214 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
215 | else | ||
216 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
217 | } | ||
218 | |||
219 | #define NHMEX_UNCORE_OPS_COMMON_INIT() \ | ||
220 | .init_box = nhmex_uncore_msr_init_box, \ | ||
221 | .disable_box = nhmex_uncore_msr_disable_box, \ | ||
222 | .enable_box = nhmex_uncore_msr_enable_box, \ | ||
223 | .disable_event = nhmex_uncore_msr_disable_event, \ | ||
224 | .read_counter = uncore_msr_read_counter | ||
225 | |||
226 | static struct intel_uncore_ops nhmex_uncore_ops = { | ||
227 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
228 | .enable_event = nhmex_uncore_msr_enable_event, | ||
229 | }; | ||
230 | |||
231 | static struct attribute *nhmex_uncore_ubox_formats_attr[] = { | ||
232 | &format_attr_event.attr, | ||
233 | &format_attr_edge.attr, | ||
234 | NULL, | ||
235 | }; | ||
236 | |||
237 | static struct attribute_group nhmex_uncore_ubox_format_group = { | ||
238 | .name = "format", | ||
239 | .attrs = nhmex_uncore_ubox_formats_attr, | ||
240 | }; | ||
241 | |||
242 | static struct intel_uncore_type nhmex_uncore_ubox = { | ||
243 | .name = "ubox", | ||
244 | .num_counters = 1, | ||
245 | .num_boxes = 1, | ||
246 | .perf_ctr_bits = 48, | ||
247 | .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, | ||
248 | .perf_ctr = NHMEX_U_MSR_PMON_CTR, | ||
249 | .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, | ||
250 | .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, | ||
251 | .ops = &nhmex_uncore_ops, | ||
252 | .format_group = &nhmex_uncore_ubox_format_group | ||
253 | }; | ||
254 | |||
255 | static struct attribute *nhmex_uncore_cbox_formats_attr[] = { | ||
256 | &format_attr_event.attr, | ||
257 | &format_attr_umask.attr, | ||
258 | &format_attr_edge.attr, | ||
259 | &format_attr_inv.attr, | ||
260 | &format_attr_thresh8.attr, | ||
261 | NULL, | ||
262 | }; | ||
263 | |||
264 | static struct attribute_group nhmex_uncore_cbox_format_group = { | ||
265 | .name = "format", | ||
266 | .attrs = nhmex_uncore_cbox_formats_attr, | ||
267 | }; | ||
268 | |||
269 | /* msr offset for each instance of cbox */ | ||
270 | static unsigned nhmex_cbox_msr_offsets[] = { | ||
271 | 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, | ||
272 | }; | ||
273 | |||
274 | static struct intel_uncore_type nhmex_uncore_cbox = { | ||
275 | .name = "cbox", | ||
276 | .num_counters = 6, | ||
277 | .num_boxes = 10, | ||
278 | .perf_ctr_bits = 48, | ||
279 | .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, | ||
280 | .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, | ||
281 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
282 | .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, | ||
283 | .msr_offsets = nhmex_cbox_msr_offsets, | ||
284 | .pair_ctr_ctl = 1, | ||
285 | .ops = &nhmex_uncore_ops, | ||
286 | .format_group = &nhmex_uncore_cbox_format_group | ||
287 | }; | ||
288 | |||
289 | static struct uncore_event_desc nhmex_uncore_wbox_events[] = { | ||
290 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), | ||
291 | { /* end: all zeroes */ }, | ||
292 | }; | ||
293 | |||
294 | static struct intel_uncore_type nhmex_uncore_wbox = { | ||
295 | .name = "wbox", | ||
296 | .num_counters = 4, | ||
297 | .num_boxes = 1, | ||
298 | .perf_ctr_bits = 48, | ||
299 | .event_ctl = NHMEX_W_MSR_PMON_CNT0, | ||
300 | .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, | ||
301 | .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, | ||
302 | .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, | ||
303 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
304 | .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, | ||
305 | .pair_ctr_ctl = 1, | ||
306 | .event_descs = nhmex_uncore_wbox_events, | ||
307 | .ops = &nhmex_uncore_ops, | ||
308 | .format_group = &nhmex_uncore_cbox_format_group | ||
309 | }; | ||
310 | |||
311 | static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
312 | { | ||
313 | struct hw_perf_event *hwc = &event->hw; | ||
314 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
315 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
316 | int ctr, ev_sel; | ||
317 | |||
318 | ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> | ||
319 | NHMEX_B_PMON_CTR_SHIFT; | ||
320 | ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> | ||
321 | NHMEX_B_PMON_CTL_EV_SEL_SHIFT; | ||
322 | |||
323 | /* events that do not use the match/mask registers */ | ||
324 | if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || | ||
325 | (ctr == 2 && ev_sel != 0x4) || ctr == 3) | ||
326 | return 0; | ||
327 | |||
328 | if (box->pmu->pmu_idx == 0) | ||
329 | reg1->reg = NHMEX_B0_MSR_MATCH; | ||
330 | else | ||
331 | reg1->reg = NHMEX_B1_MSR_MATCH; | ||
332 | reg1->idx = 0; | ||
333 | reg1->config = event->attr.config1; | ||
334 | reg2->config = event->attr.config2; | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
339 | { | ||
340 | struct hw_perf_event *hwc = &event->hw; | ||
341 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
342 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
343 | |||
344 | if (reg1->idx != EXTRA_REG_NONE) { | ||
345 | wrmsrl(reg1->reg, reg1->config); | ||
346 | wrmsrl(reg1->reg + 1, reg2->config); | ||
347 | } | ||
348 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
349 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * The Bbox has 4 counters, but each counter monitors different events. | ||
354 | * Use bits 6-7 in the event config to select counter. | ||
355 | */ | ||
356 | static struct event_constraint nhmex_uncore_bbox_constraints[] = { | ||
357 | EVENT_CONSTRAINT(0 , 1, 0xc0), | ||
358 | EVENT_CONSTRAINT(0x40, 2, 0xc0), | ||
359 | EVENT_CONSTRAINT(0x80, 4, 0xc0), | ||
360 | EVENT_CONSTRAINT(0xc0, 8, 0xc0), | ||
361 | EVENT_CONSTRAINT_END, | ||
362 | }; | ||
363 | |||
364 | static struct attribute *nhmex_uncore_bbox_formats_attr[] = { | ||
365 | &format_attr_event5.attr, | ||
366 | &format_attr_counter.attr, | ||
367 | &format_attr_match.attr, | ||
368 | &format_attr_mask.attr, | ||
369 | NULL, | ||
370 | }; | ||
371 | |||
372 | static struct attribute_group nhmex_uncore_bbox_format_group = { | ||
373 | .name = "format", | ||
374 | .attrs = nhmex_uncore_bbox_formats_attr, | ||
375 | }; | ||
376 | |||
377 | static struct intel_uncore_ops nhmex_uncore_bbox_ops = { | ||
378 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
379 | .enable_event = nhmex_bbox_msr_enable_event, | ||
380 | .hw_config = nhmex_bbox_hw_config, | ||
381 | .get_constraint = uncore_get_constraint, | ||
382 | .put_constraint = uncore_put_constraint, | ||
383 | }; | ||
384 | |||
385 | static struct intel_uncore_type nhmex_uncore_bbox = { | ||
386 | .name = "bbox", | ||
387 | .num_counters = 4, | ||
388 | .num_boxes = 2, | ||
389 | .perf_ctr_bits = 48, | ||
390 | .event_ctl = NHMEX_B0_MSR_PMON_CTL0, | ||
391 | .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, | ||
392 | .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, | ||
393 | .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, | ||
394 | .msr_offset = NHMEX_B_MSR_OFFSET, | ||
395 | .pair_ctr_ctl = 1, | ||
396 | .num_shared_regs = 1, | ||
397 | .constraints = nhmex_uncore_bbox_constraints, | ||
398 | .ops = &nhmex_uncore_bbox_ops, | ||
399 | .format_group = &nhmex_uncore_bbox_format_group | ||
400 | }; | ||
401 | |||
402 | static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
403 | { | ||
404 | struct hw_perf_event *hwc = &event->hw; | ||
405 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
406 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
407 | |||
408 | /* only TO_R_PROG_EV event uses the match/mask register */ | ||
409 | if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != | ||
410 | NHMEX_S_EVENT_TO_R_PROG_EV) | ||
411 | return 0; | ||
412 | |||
413 | if (box->pmu->pmu_idx == 0) | ||
414 | reg1->reg = NHMEX_S0_MSR_MM_CFG; | ||
415 | else | ||
416 | reg1->reg = NHMEX_S1_MSR_MM_CFG; | ||
417 | reg1->idx = 0; | ||
418 | reg1->config = event->attr.config1; | ||
419 | reg2->config = event->attr.config2; | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
424 | { | ||
425 | struct hw_perf_event *hwc = &event->hw; | ||
426 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
427 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
428 | |||
429 | if (reg1->idx != EXTRA_REG_NONE) { | ||
430 | wrmsrl(reg1->reg, 0); | ||
431 | wrmsrl(reg1->reg + 1, reg1->config); | ||
432 | wrmsrl(reg1->reg + 2, reg2->config); | ||
433 | wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); | ||
434 | } | ||
435 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
436 | } | ||
437 | |||
438 | static struct attribute *nhmex_uncore_sbox_formats_attr[] = { | ||
439 | &format_attr_event.attr, | ||
440 | &format_attr_umask.attr, | ||
441 | &format_attr_edge.attr, | ||
442 | &format_attr_inv.attr, | ||
443 | &format_attr_thresh8.attr, | ||
444 | &format_attr_match.attr, | ||
445 | &format_attr_mask.attr, | ||
446 | NULL, | ||
447 | }; | ||
448 | |||
449 | static struct attribute_group nhmex_uncore_sbox_format_group = { | ||
450 | .name = "format", | ||
451 | .attrs = nhmex_uncore_sbox_formats_attr, | ||
452 | }; | ||
453 | |||
454 | static struct intel_uncore_ops nhmex_uncore_sbox_ops = { | ||
455 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
456 | .enable_event = nhmex_sbox_msr_enable_event, | ||
457 | .hw_config = nhmex_sbox_hw_config, | ||
458 | .get_constraint = uncore_get_constraint, | ||
459 | .put_constraint = uncore_put_constraint, | ||
460 | }; | ||
461 | |||
462 | static struct intel_uncore_type nhmex_uncore_sbox = { | ||
463 | .name = "sbox", | ||
464 | .num_counters = 4, | ||
465 | .num_boxes = 2, | ||
466 | .perf_ctr_bits = 48, | ||
467 | .event_ctl = NHMEX_S0_MSR_PMON_CTL0, | ||
468 | .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, | ||
469 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
470 | .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, | ||
471 | .msr_offset = NHMEX_S_MSR_OFFSET, | ||
472 | .pair_ctr_ctl = 1, | ||
473 | .num_shared_regs = 1, | ||
474 | .ops = &nhmex_uncore_sbox_ops, | ||
475 | .format_group = &nhmex_uncore_sbox_format_group | ||
476 | }; | ||
477 | |||
478 | enum { | ||
479 | EXTRA_REG_NHMEX_M_FILTER, | ||
480 | EXTRA_REG_NHMEX_M_DSP, | ||
481 | EXTRA_REG_NHMEX_M_ISS, | ||
482 | EXTRA_REG_NHMEX_M_MAP, | ||
483 | EXTRA_REG_NHMEX_M_MSC_THR, | ||
484 | EXTRA_REG_NHMEX_M_PGT, | ||
485 | EXTRA_REG_NHMEX_M_PLD, | ||
486 | EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, | ||
487 | }; | ||
488 | |||
489 | static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { | ||
490 | MBOX_INC_SEL_EXTAR_REG(0x0, DSP), | ||
491 | MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), | ||
492 | MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), | ||
493 | MBOX_INC_SEL_EXTAR_REG(0x9, ISS), | ||
494 | /* event 0xa uses two extra registers */ | ||
495 | MBOX_INC_SEL_EXTAR_REG(0xa, ISS), | ||
496 | MBOX_INC_SEL_EXTAR_REG(0xa, PLD), | ||
497 | MBOX_INC_SEL_EXTAR_REG(0xb, PLD), | ||
498 | /* events 0xd ~ 0x10 use the same extra register */ | ||
499 | MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), | ||
500 | MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), | ||
501 | MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), | ||
502 | MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), | ||
503 | MBOX_INC_SEL_EXTAR_REG(0x16, PGT), | ||
504 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), | ||
505 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), | ||
506 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), | ||
507 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), | ||
508 | EVENT_EXTRA_END | ||
509 | }; | ||
510 | |||
511 | /* Nehalem-EX or Westmere-EX ? */ | ||
512 | static bool uncore_nhmex; | ||
513 | |||
514 | static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) | ||
515 | { | ||
516 | struct intel_uncore_extra_reg *er; | ||
517 | unsigned long flags; | ||
518 | bool ret = false; | ||
519 | u64 mask; | ||
520 | |||
521 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
522 | er = &box->shared_regs[idx]; | ||
523 | raw_spin_lock_irqsave(&er->lock, flags); | ||
524 | if (!atomic_read(&er->ref) || er->config == config) { | ||
525 | atomic_inc(&er->ref); | ||
526 | er->config = config; | ||
527 | ret = true; | ||
528 | } | ||
529 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
530 | |||
531 | return ret; | ||
532 | } | ||
533 | /* | ||
534 | * The ZDP_CTL_FVC MSR has 4 fields which are used to control | ||
535 | * events 0xd ~ 0x10. Besides these 4 fields, there are additional | ||
536 | * fields which are shared. | ||
537 | */ | ||
538 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
539 | if (WARN_ON_ONCE(idx >= 4)) | ||
540 | return false; | ||
541 | |||
542 | /* mask of the shared fields */ | ||
543 | if (uncore_nhmex) | ||
544 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
545 | else | ||
546 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
547 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
548 | |||
549 | raw_spin_lock_irqsave(&er->lock, flags); | ||
550 | /* add mask of the non-shared field if it's in use */ | ||
551 | if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { | ||
552 | if (uncore_nhmex) | ||
553 | mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
554 | else | ||
555 | mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
556 | } | ||
557 | |||
558 | if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { | ||
559 | atomic_add(1 << (idx * 8), &er->ref); | ||
560 | if (uncore_nhmex) | ||
561 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
562 | NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
563 | else | ||
564 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
565 | WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
566 | er->config &= ~mask; | ||
567 | er->config |= (config & mask); | ||
568 | ret = true; | ||
569 | } | ||
570 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
571 | |||
572 | return ret; | ||
573 | } | ||
574 | |||
575 | static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) | ||
576 | { | ||
577 | struct intel_uncore_extra_reg *er; | ||
578 | |||
579 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
580 | er = &box->shared_regs[idx]; | ||
581 | atomic_dec(&er->ref); | ||
582 | return; | ||
583 | } | ||
584 | |||
585 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
586 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
587 | atomic_sub(1 << (idx * 8), &er->ref); | ||
588 | } | ||
589 | |||
590 | static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
591 | { | ||
592 | struct hw_perf_event *hwc = &event->hw; | ||
593 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
594 | u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
595 | u64 config = reg1->config; | ||
596 | |||
597 | /* get the non-shared control bits and shift them */ | ||
598 | idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
599 | if (uncore_nhmex) | ||
600 | config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
601 | else | ||
602 | config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
603 | if (new_idx > orig_idx) { | ||
604 | idx = new_idx - orig_idx; | ||
605 | config <<= 3 * idx; | ||
606 | } else { | ||
607 | idx = orig_idx - new_idx; | ||
608 | config >>= 3 * idx; | ||
609 | } | ||
610 | |||
611 | /* add the shared control bits back */ | ||
612 | if (uncore_nhmex) | ||
613 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
614 | else | ||
615 | config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
616 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
617 | if (modify) { | ||
618 | /* adjust the main event selector */ | ||
619 | if (new_idx > orig_idx) | ||
620 | hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
621 | else | ||
622 | hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
623 | reg1->config = config; | ||
624 | reg1->idx = ~0xff | new_idx; | ||
625 | } | ||
626 | return config; | ||
627 | } | ||
628 | |||
629 | static struct event_constraint * | ||
630 | nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
631 | { | ||
632 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
633 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
634 | int i, idx[2], alloc = 0; | ||
635 | u64 config1 = reg1->config; | ||
636 | |||
637 | idx[0] = __BITS_VALUE(reg1->idx, 0, 8); | ||
638 | idx[1] = __BITS_VALUE(reg1->idx, 1, 8); | ||
639 | again: | ||
640 | for (i = 0; i < 2; i++) { | ||
641 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
642 | idx[i] = 0xff; | ||
643 | |||
644 | if (idx[i] == 0xff) | ||
645 | continue; | ||
646 | |||
647 | if (!nhmex_mbox_get_shared_reg(box, idx[i], | ||
648 | __BITS_VALUE(config1, i, 32))) | ||
649 | goto fail; | ||
650 | alloc |= (0x1 << i); | ||
651 | } | ||
652 | |||
653 | /* for the match/mask registers */ | ||
654 | if (reg2->idx != EXTRA_REG_NONE && | ||
655 | (uncore_box_is_fake(box) || !reg2->alloc) && | ||
656 | !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) | ||
657 | goto fail; | ||
658 | |||
659 | /* | ||
660 | * If it's a fake box -- as per validate_{group,event}() we | ||
661 | * shouldn't touch event state and we can avoid doing so | ||
662 | * since both will only call get_event_constraints() once | ||
663 | * on each event, this avoids the need for reg->alloc. | ||
664 | */ | ||
665 | if (!uncore_box_is_fake(box)) { | ||
666 | if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) | ||
667 | nhmex_mbox_alter_er(event, idx[0], true); | ||
668 | reg1->alloc |= alloc; | ||
669 | if (reg2->idx != EXTRA_REG_NONE) | ||
670 | reg2->alloc = 1; | ||
671 | } | ||
672 | return NULL; | ||
673 | fail: | ||
674 | if (idx[0] != 0xff && !(alloc & 0x1) && | ||
675 | idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
676 | /* | ||
677 | * events 0xd ~ 0x10 are functional identical, but are | ||
678 | * controlled by different fields in the ZDP_CTL_FVC | ||
679 | * register. If we failed to take one field, try the | ||
680 | * rest 3 choices. | ||
681 | */ | ||
682 | BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); | ||
683 | idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
684 | idx[0] = (idx[0] + 1) % 4; | ||
685 | idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
686 | if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { | ||
687 | config1 = nhmex_mbox_alter_er(event, idx[0], false); | ||
688 | goto again; | ||
689 | } | ||
690 | } | ||
691 | |||
692 | if (alloc & 0x1) | ||
693 | nhmex_mbox_put_shared_reg(box, idx[0]); | ||
694 | if (alloc & 0x2) | ||
695 | nhmex_mbox_put_shared_reg(box, idx[1]); | ||
696 | return &uncore_constraint_empty; | ||
697 | } | ||
698 | |||
699 | static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
700 | { | ||
701 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
702 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
703 | |||
704 | if (uncore_box_is_fake(box)) | ||
705 | return; | ||
706 | |||
707 | if (reg1->alloc & 0x1) | ||
708 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); | ||
709 | if (reg1->alloc & 0x2) | ||
710 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); | ||
711 | reg1->alloc = 0; | ||
712 | |||
713 | if (reg2->alloc) { | ||
714 | nhmex_mbox_put_shared_reg(box, reg2->idx); | ||
715 | reg2->alloc = 0; | ||
716 | } | ||
717 | } | ||
718 | |||
719 | static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) | ||
720 | { | ||
721 | if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
722 | return er->idx; | ||
723 | return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; | ||
724 | } | ||
725 | |||
726 | static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
727 | { | ||
728 | struct intel_uncore_type *type = box->pmu->type; | ||
729 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
730 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
731 | struct extra_reg *er; | ||
732 | unsigned msr; | ||
733 | int reg_idx = 0; | ||
734 | /* | ||
735 | * The mbox events may require 2 extra MSRs at the most. But only | ||
736 | * the lower 32 bits in these MSRs are significant, so we can use | ||
737 | * config1 to pass two MSRs' config. | ||
738 | */ | ||
739 | for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { | ||
740 | if (er->event != (event->hw.config & er->config_mask)) | ||
741 | continue; | ||
742 | if (event->attr.config1 & ~er->valid_mask) | ||
743 | return -EINVAL; | ||
744 | |||
745 | msr = er->msr + type->msr_offset * box->pmu->pmu_idx; | ||
746 | if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) | ||
747 | return -EINVAL; | ||
748 | |||
749 | /* always use the 32~63 bits to pass the PLD config */ | ||
750 | if (er->idx == EXTRA_REG_NHMEX_M_PLD) | ||
751 | reg_idx = 1; | ||
752 | else if (WARN_ON_ONCE(reg_idx > 0)) | ||
753 | return -EINVAL; | ||
754 | |||
755 | reg1->idx &= ~(0xff << (reg_idx * 8)); | ||
756 | reg1->reg &= ~(0xffff << (reg_idx * 16)); | ||
757 | reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); | ||
758 | reg1->reg |= msr << (reg_idx * 16); | ||
759 | reg1->config = event->attr.config1; | ||
760 | reg_idx++; | ||
761 | } | ||
762 | /* | ||
763 | * The mbox only provides ability to perform address matching | ||
764 | * for the PLD events. | ||
765 | */ | ||
766 | if (reg_idx == 2) { | ||
767 | reg2->idx = EXTRA_REG_NHMEX_M_FILTER; | ||
768 | if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) | ||
769 | reg2->config = event->attr.config2; | ||
770 | else | ||
771 | reg2->config = ~0ULL; | ||
772 | if (box->pmu->pmu_idx == 0) | ||
773 | reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; | ||
774 | else | ||
775 | reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; | ||
776 | } | ||
777 | return 0; | ||
778 | } | ||
779 | |||
780 | static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
781 | { | ||
782 | struct intel_uncore_extra_reg *er; | ||
783 | unsigned long flags; | ||
784 | u64 config; | ||
785 | |||
786 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
787 | return box->shared_regs[idx].config; | ||
788 | |||
789 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
790 | raw_spin_lock_irqsave(&er->lock, flags); | ||
791 | config = er->config; | ||
792 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
793 | return config; | ||
794 | } | ||
795 | |||
796 | static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
797 | { | ||
798 | struct hw_perf_event *hwc = &event->hw; | ||
799 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
800 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
801 | int idx; | ||
802 | |||
803 | idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
804 | if (idx != 0xff) | ||
805 | wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), | ||
806 | nhmex_mbox_shared_reg_config(box, idx)); | ||
807 | idx = __BITS_VALUE(reg1->idx, 1, 8); | ||
808 | if (idx != 0xff) | ||
809 | wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), | ||
810 | nhmex_mbox_shared_reg_config(box, idx)); | ||
811 | |||
812 | if (reg2->idx != EXTRA_REG_NONE) { | ||
813 | wrmsrl(reg2->reg, 0); | ||
814 | if (reg2->config != ~0ULL) { | ||
815 | wrmsrl(reg2->reg + 1, | ||
816 | reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); | ||
817 | wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & | ||
818 | (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); | ||
819 | wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); | ||
820 | } | ||
821 | } | ||
822 | |||
823 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
824 | } | ||
825 | |||
826 | DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); | ||
827 | DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); | ||
828 | DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); | ||
829 | DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); | ||
830 | DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); | ||
831 | DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); | ||
832 | DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); | ||
833 | DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); | ||
834 | DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); | ||
835 | DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); | ||
836 | DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); | ||
837 | DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); | ||
838 | DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); | ||
839 | DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); | ||
840 | DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); | ||
841 | DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); | ||
842 | |||
843 | static struct attribute *nhmex_uncore_mbox_formats_attr[] = { | ||
844 | &format_attr_count_mode.attr, | ||
845 | &format_attr_storage_mode.attr, | ||
846 | &format_attr_wrap_mode.attr, | ||
847 | &format_attr_flag_mode.attr, | ||
848 | &format_attr_inc_sel.attr, | ||
849 | &format_attr_set_flag_sel.attr, | ||
850 | &format_attr_filter_cfg_en.attr, | ||
851 | &format_attr_filter_match.attr, | ||
852 | &format_attr_filter_mask.attr, | ||
853 | &format_attr_dsp.attr, | ||
854 | &format_attr_thr.attr, | ||
855 | &format_attr_fvc.attr, | ||
856 | &format_attr_pgt.attr, | ||
857 | &format_attr_map.attr, | ||
858 | &format_attr_iss.attr, | ||
859 | &format_attr_pld.attr, | ||
860 | NULL, | ||
861 | }; | ||
862 | |||
863 | static struct attribute_group nhmex_uncore_mbox_format_group = { | ||
864 | .name = "format", | ||
865 | .attrs = nhmex_uncore_mbox_formats_attr, | ||
866 | }; | ||
867 | |||
868 | static struct uncore_event_desc nhmex_uncore_mbox_events[] = { | ||
869 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), | ||
870 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), | ||
871 | { /* end: all zeroes */ }, | ||
872 | }; | ||
873 | |||
874 | static struct uncore_event_desc wsmex_uncore_mbox_events[] = { | ||
875 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), | ||
876 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), | ||
877 | { /* end: all zeroes */ }, | ||
878 | }; | ||
879 | |||
880 | static struct intel_uncore_ops nhmex_uncore_mbox_ops = { | ||
881 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
882 | .enable_event = nhmex_mbox_msr_enable_event, | ||
883 | .hw_config = nhmex_mbox_hw_config, | ||
884 | .get_constraint = nhmex_mbox_get_constraint, | ||
885 | .put_constraint = nhmex_mbox_put_constraint, | ||
886 | }; | ||
887 | |||
888 | static struct intel_uncore_type nhmex_uncore_mbox = { | ||
889 | .name = "mbox", | ||
890 | .num_counters = 6, | ||
891 | .num_boxes = 2, | ||
892 | .perf_ctr_bits = 48, | ||
893 | .event_ctl = NHMEX_M0_MSR_PMU_CTL0, | ||
894 | .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, | ||
895 | .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, | ||
896 | .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, | ||
897 | .msr_offset = NHMEX_M_MSR_OFFSET, | ||
898 | .pair_ctr_ctl = 1, | ||
899 | .num_shared_regs = 8, | ||
900 | .event_descs = nhmex_uncore_mbox_events, | ||
901 | .ops = &nhmex_uncore_mbox_ops, | ||
902 | .format_group = &nhmex_uncore_mbox_format_group, | ||
903 | }; | ||
904 | |||
905 | static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) | ||
906 | { | ||
907 | struct hw_perf_event *hwc = &event->hw; | ||
908 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
909 | |||
910 | /* adjust the main event selector and extra register index */ | ||
911 | if (reg1->idx % 2) { | ||
912 | reg1->idx--; | ||
913 | hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
914 | } else { | ||
915 | reg1->idx++; | ||
916 | hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
917 | } | ||
918 | |||
919 | /* adjust extra register config */ | ||
920 | switch (reg1->idx % 6) { | ||
921 | case 2: | ||
922 | /* shift the 8~15 bits to the 0~7 bits */ | ||
923 | reg1->config >>= 8; | ||
924 | break; | ||
925 | case 3: | ||
926 | /* shift the 0~7 bits to the 8~15 bits */ | ||
927 | reg1->config <<= 8; | ||
928 | break; | ||
929 | }; | ||
930 | } | ||
931 | |||
932 | /* | ||
933 | * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. | ||
934 | * An event set consists of 6 events, the 3rd and 4th events in | ||
935 | * an event set use the same extra register. So an event set uses | ||
936 | * 5 extra registers. | ||
937 | */ | ||
938 | static struct event_constraint * | ||
939 | nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
940 | { | ||
941 | struct hw_perf_event *hwc = &event->hw; | ||
942 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
943 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
944 | struct intel_uncore_extra_reg *er; | ||
945 | unsigned long flags; | ||
946 | int idx, er_idx; | ||
947 | u64 config1; | ||
948 | bool ok = false; | ||
949 | |||
950 | if (!uncore_box_is_fake(box) && reg1->alloc) | ||
951 | return NULL; | ||
952 | |||
953 | idx = reg1->idx % 6; | ||
954 | config1 = reg1->config; | ||
955 | again: | ||
956 | er_idx = idx; | ||
957 | /* the 3rd and 4th events use the same extra register */ | ||
958 | if (er_idx > 2) | ||
959 | er_idx--; | ||
960 | er_idx += (reg1->idx / 6) * 5; | ||
961 | |||
962 | er = &box->shared_regs[er_idx]; | ||
963 | raw_spin_lock_irqsave(&er->lock, flags); | ||
964 | if (idx < 2) { | ||
965 | if (!atomic_read(&er->ref) || er->config == reg1->config) { | ||
966 | atomic_inc(&er->ref); | ||
967 | er->config = reg1->config; | ||
968 | ok = true; | ||
969 | } | ||
970 | } else if (idx == 2 || idx == 3) { | ||
971 | /* | ||
972 | * these two events use different fields in a extra register, | ||
973 | * the 0~7 bits and the 8~15 bits respectively. | ||
974 | */ | ||
975 | u64 mask = 0xff << ((idx - 2) * 8); | ||
976 | if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || | ||
977 | !((er->config ^ config1) & mask)) { | ||
978 | atomic_add(1 << ((idx - 2) * 8), &er->ref); | ||
979 | er->config &= ~mask; | ||
980 | er->config |= config1 & mask; | ||
981 | ok = true; | ||
982 | } | ||
983 | } else { | ||
984 | if (!atomic_read(&er->ref) || | ||
985 | (er->config == (hwc->config >> 32) && | ||
986 | er->config1 == reg1->config && | ||
987 | er->config2 == reg2->config)) { | ||
988 | atomic_inc(&er->ref); | ||
989 | er->config = (hwc->config >> 32); | ||
990 | er->config1 = reg1->config; | ||
991 | er->config2 = reg2->config; | ||
992 | ok = true; | ||
993 | } | ||
994 | } | ||
995 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
996 | |||
997 | if (!ok) { | ||
998 | /* | ||
999 | * The Rbox events are always in pairs. The paired | ||
1000 | * events are functional identical, but use different | ||
1001 | * extra registers. If we failed to take an extra | ||
1002 | * register, try the alternative. | ||
1003 | */ | ||
1004 | idx ^= 1; | ||
1005 | if (idx != reg1->idx % 6) { | ||
1006 | if (idx == 2) | ||
1007 | config1 >>= 8; | ||
1008 | else if (idx == 3) | ||
1009 | config1 <<= 8; | ||
1010 | goto again; | ||
1011 | } | ||
1012 | } else { | ||
1013 | if (!uncore_box_is_fake(box)) { | ||
1014 | if (idx != reg1->idx % 6) | ||
1015 | nhmex_rbox_alter_er(box, event); | ||
1016 | reg1->alloc = 1; | ||
1017 | } | ||
1018 | return NULL; | ||
1019 | } | ||
1020 | return &uncore_constraint_empty; | ||
1021 | } | ||
1022 | |||
1023 | static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1024 | { | ||
1025 | struct intel_uncore_extra_reg *er; | ||
1026 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1027 | int idx, er_idx; | ||
1028 | |||
1029 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
1030 | return; | ||
1031 | |||
1032 | idx = reg1->idx % 6; | ||
1033 | er_idx = idx; | ||
1034 | if (er_idx > 2) | ||
1035 | er_idx--; | ||
1036 | er_idx += (reg1->idx / 6) * 5; | ||
1037 | |||
1038 | er = &box->shared_regs[er_idx]; | ||
1039 | if (idx == 2 || idx == 3) | ||
1040 | atomic_sub(1 << ((idx - 2) * 8), &er->ref); | ||
1041 | else | ||
1042 | atomic_dec(&er->ref); | ||
1043 | |||
1044 | reg1->alloc = 0; | ||
1045 | } | ||
1046 | |||
1047 | static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1048 | { | ||
1049 | struct hw_perf_event *hwc = &event->hw; | ||
1050 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1051 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1052 | int idx; | ||
1053 | |||
1054 | idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> | ||
1055 | NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1056 | if (idx >= 0x18) | ||
1057 | return -EINVAL; | ||
1058 | |||
1059 | reg1->idx = idx; | ||
1060 | reg1->config = event->attr.config1; | ||
1061 | |||
1062 | switch (idx % 6) { | ||
1063 | case 4: | ||
1064 | case 5: | ||
1065 | hwc->config |= event->attr.config & (~0ULL << 32); | ||
1066 | reg2->config = event->attr.config2; | ||
1067 | break; | ||
1068 | }; | ||
1069 | return 0; | ||
1070 | } | ||
1071 | |||
1072 | static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1073 | { | ||
1074 | struct hw_perf_event *hwc = &event->hw; | ||
1075 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1076 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1077 | int idx, port; | ||
1078 | |||
1079 | idx = reg1->idx; | ||
1080 | port = idx / 6 + box->pmu->pmu_idx * 4; | ||
1081 | |||
1082 | switch (idx % 6) { | ||
1083 | case 0: | ||
1084 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); | ||
1085 | break; | ||
1086 | case 1: | ||
1087 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); | ||
1088 | break; | ||
1089 | case 2: | ||
1090 | case 3: | ||
1091 | wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), | ||
1092 | uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); | ||
1093 | break; | ||
1094 | case 4: | ||
1095 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), | ||
1096 | hwc->config >> 32); | ||
1097 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); | ||
1098 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); | ||
1099 | break; | ||
1100 | case 5: | ||
1101 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), | ||
1102 | hwc->config >> 32); | ||
1103 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); | ||
1104 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); | ||
1105 | break; | ||
1106 | }; | ||
1107 | |||
1108 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
1109 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); | ||
1110 | } | ||
1111 | |||
1112 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); | ||
1113 | DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); | ||
1114 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); | ||
1115 | DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); | ||
1116 | DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); | ||
1117 | |||
1118 | static struct attribute *nhmex_uncore_rbox_formats_attr[] = { | ||
1119 | &format_attr_event5.attr, | ||
1120 | &format_attr_xbr_mm_cfg.attr, | ||
1121 | &format_attr_xbr_match.attr, | ||
1122 | &format_attr_xbr_mask.attr, | ||
1123 | &format_attr_qlx_cfg.attr, | ||
1124 | &format_attr_iperf_cfg.attr, | ||
1125 | NULL, | ||
1126 | }; | ||
1127 | |||
1128 | static struct attribute_group nhmex_uncore_rbox_format_group = { | ||
1129 | .name = "format", | ||
1130 | .attrs = nhmex_uncore_rbox_formats_attr, | ||
1131 | }; | ||
1132 | |||
1133 | static struct uncore_event_desc nhmex_uncore_rbox_events[] = { | ||
1134 | INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), | ||
1135 | INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), | ||
1136 | INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), | ||
1137 | INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), | ||
1138 | INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), | ||
1139 | INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), | ||
1140 | { /* end: all zeroes */ }, | ||
1141 | }; | ||
1142 | |||
1143 | static struct intel_uncore_ops nhmex_uncore_rbox_ops = { | ||
1144 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1145 | .enable_event = nhmex_rbox_msr_enable_event, | ||
1146 | .hw_config = nhmex_rbox_hw_config, | ||
1147 | .get_constraint = nhmex_rbox_get_constraint, | ||
1148 | .put_constraint = nhmex_rbox_put_constraint, | ||
1149 | }; | ||
1150 | |||
1151 | static struct intel_uncore_type nhmex_uncore_rbox = { | ||
1152 | .name = "rbox", | ||
1153 | .num_counters = 8, | ||
1154 | .num_boxes = 2, | ||
1155 | .perf_ctr_bits = 48, | ||
1156 | .event_ctl = NHMEX_R_MSR_PMON_CTL0, | ||
1157 | .perf_ctr = NHMEX_R_MSR_PMON_CNT0, | ||
1158 | .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, | ||
1159 | .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, | ||
1160 | .msr_offset = NHMEX_R_MSR_OFFSET, | ||
1161 | .pair_ctr_ctl = 1, | ||
1162 | .num_shared_regs = 20, | ||
1163 | .event_descs = nhmex_uncore_rbox_events, | ||
1164 | .ops = &nhmex_uncore_rbox_ops, | ||
1165 | .format_group = &nhmex_uncore_rbox_format_group | ||
1166 | }; | ||
1167 | |||
1168 | static struct intel_uncore_type *nhmex_msr_uncores[] = { | ||
1169 | &nhmex_uncore_ubox, | ||
1170 | &nhmex_uncore_cbox, | ||
1171 | &nhmex_uncore_bbox, | ||
1172 | &nhmex_uncore_sbox, | ||
1173 | &nhmex_uncore_mbox, | ||
1174 | &nhmex_uncore_rbox, | ||
1175 | &nhmex_uncore_wbox, | ||
1176 | NULL, | ||
1177 | }; | ||
1178 | /* end of Nehalem-EX uncore support */ | ||
1179 | |||
1180 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) | 152 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) |
1181 | { | 153 | { |
1182 | struct hw_perf_event *hwc = &event->hw; | 154 | struct hw_perf_event *hwc = &event->hw; |
@@ -2195,9 +1167,8 @@ static void __init uncore_cpu_setup(void *dummy) | |||
2195 | 1167 | ||
2196 | static int __init uncore_cpu_init(void) | 1168 | static int __init uncore_cpu_init(void) |
2197 | { | 1169 | { |
2198 | int ret, max_cores; | 1170 | int ret; |
2199 | 1171 | ||
2200 | max_cores = boot_cpu_data.x86_max_cores; | ||
2201 | switch (boot_cpu_data.x86_model) { | 1172 | switch (boot_cpu_data.x86_model) { |
2202 | case 26: /* Nehalem */ | 1173 | case 26: /* Nehalem */ |
2203 | case 30: | 1174 | case 30: |
@@ -2213,13 +1184,8 @@ static int __init uncore_cpu_init(void) | |||
2213 | snbep_uncore_cpu_init(); | 1184 | snbep_uncore_cpu_init(); |
2214 | break; | 1185 | break; |
2215 | case 46: /* Nehalem-EX */ | 1186 | case 46: /* Nehalem-EX */ |
2216 | uncore_nhmex = true; | ||
2217 | case 47: /* Westmere-EX aka. Xeon E7 */ | 1187 | case 47: /* Westmere-EX aka. Xeon E7 */ |
2218 | if (!uncore_nhmex) | 1188 | nhmex_uncore_cpu_init(); |
2219 | nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; | ||
2220 | if (nhmex_uncore_cbox.num_boxes > max_cores) | ||
2221 | nhmex_uncore_cbox.num_boxes = max_cores; | ||
2222 | uncore_msr_uncores = nhmex_msr_uncores; | ||
2223 | break; | 1189 | break; |
2224 | case 62: /* IvyTown */ | 1190 | case 62: /* IvyTown */ |
2225 | ivt_uncore_cpu_init(); | 1191 | ivt_uncore_cpu_init(); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 538be93b5f19..b91559936d49 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -24,188 +24,6 @@ | |||
24 | 24 | ||
25 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) | 25 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) |
26 | 26 | ||
27 | /* NHM-EX event control */ | ||
28 | #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
29 | #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
30 | #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) | ||
31 | #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) | ||
32 | #define NHMEX_PMON_CTL_PMI_EN (1 << 20) | ||
33 | #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) | ||
34 | #define NHMEX_PMON_CTL_INVERT (1 << 23) | ||
35 | #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 | ||
36 | #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
37 | NHMEX_PMON_CTL_UMASK_MASK | \ | ||
38 | NHMEX_PMON_CTL_EDGE_DET | \ | ||
39 | NHMEX_PMON_CTL_INVERT | \ | ||
40 | NHMEX_PMON_CTL_TRESH_MASK) | ||
41 | |||
42 | /* NHM-EX Ubox */ | ||
43 | #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
44 | #define NHMEX_U_MSR_PMON_CTR 0xc11 | ||
45 | #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 | ||
46 | |||
47 | #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) | ||
48 | #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e | ||
49 | #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) | ||
50 | #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) | ||
51 | #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
52 | |||
53 | #define NHMEX_U_PMON_RAW_EVENT_MASK \ | ||
54 | (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
55 | NHMEX_PMON_CTL_EDGE_DET) | ||
56 | |||
57 | /* NHM-EX Cbox */ | ||
58 | #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 | ||
59 | #define NHMEX_C0_MSR_PMON_CTR0 0xd11 | ||
60 | #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 | ||
61 | #define NHMEX_C_MSR_OFFSET 0x20 | ||
62 | |||
63 | /* NHM-EX Bbox */ | ||
64 | #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 | ||
65 | #define NHMEX_B0_MSR_PMON_CTR0 0xc31 | ||
66 | #define NHMEX_B0_MSR_PMON_CTL0 0xc30 | ||
67 | #define NHMEX_B_MSR_OFFSET 0x40 | ||
68 | #define NHMEX_B0_MSR_MATCH 0xe45 | ||
69 | #define NHMEX_B0_MSR_MASK 0xe46 | ||
70 | #define NHMEX_B1_MSR_MATCH 0xe4d | ||
71 | #define NHMEX_B1_MSR_MASK 0xe4e | ||
72 | |||
73 | #define NHMEX_B_PMON_CTL_EN (1 << 0) | ||
74 | #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 | ||
75 | #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ | ||
76 | (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) | ||
77 | #define NHMEX_B_PMON_CTR_SHIFT 6 | ||
78 | #define NHMEX_B_PMON_CTR_MASK \ | ||
79 | (0x3 << NHMEX_B_PMON_CTR_SHIFT) | ||
80 | #define NHMEX_B_PMON_RAW_EVENT_MASK \ | ||
81 | (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ | ||
82 | NHMEX_B_PMON_CTR_MASK) | ||
83 | |||
84 | /* NHM-EX Sbox */ | ||
85 | #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 | ||
86 | #define NHMEX_S0_MSR_PMON_CTR0 0xc51 | ||
87 | #define NHMEX_S0_MSR_PMON_CTL0 0xc50 | ||
88 | #define NHMEX_S_MSR_OFFSET 0x80 | ||
89 | #define NHMEX_S0_MSR_MM_CFG 0xe48 | ||
90 | #define NHMEX_S0_MSR_MATCH 0xe49 | ||
91 | #define NHMEX_S0_MSR_MASK 0xe4a | ||
92 | #define NHMEX_S1_MSR_MM_CFG 0xe58 | ||
93 | #define NHMEX_S1_MSR_MATCH 0xe59 | ||
94 | #define NHMEX_S1_MSR_MASK 0xe5a | ||
95 | |||
96 | #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) | ||
97 | #define NHMEX_S_EVENT_TO_R_PROG_EV 0 | ||
98 | |||
99 | /* NHM-EX Mbox */ | ||
100 | #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 | ||
101 | #define NHMEX_M0_MSR_PMU_DSP 0xca5 | ||
102 | #define NHMEX_M0_MSR_PMU_ISS 0xca6 | ||
103 | #define NHMEX_M0_MSR_PMU_MAP 0xca7 | ||
104 | #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 | ||
105 | #define NHMEX_M0_MSR_PMU_PGT 0xca9 | ||
106 | #define NHMEX_M0_MSR_PMU_PLD 0xcaa | ||
107 | #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab | ||
108 | #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 | ||
109 | #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 | ||
110 | #define NHMEX_M_MSR_OFFSET 0x40 | ||
111 | #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 | ||
112 | #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c | ||
113 | |||
114 | #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) | ||
115 | #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL | ||
116 | #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL | ||
117 | #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 | ||
118 | |||
119 | #define NHMEX_M_PMON_CTL_EN (1 << 0) | ||
120 | #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) | ||
121 | #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 | ||
122 | #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ | ||
123 | (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) | ||
124 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 | ||
125 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ | ||
126 | (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) | ||
127 | #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) | ||
128 | #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) | ||
129 | #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 | ||
130 | #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ | ||
131 | (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
132 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 | ||
133 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ | ||
134 | (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | ||
135 | #define NHMEX_M_PMON_RAW_EVENT_MASK \ | ||
136 | (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ | ||
137 | NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ | ||
138 | NHMEX_M_PMON_CTL_WRAP_MODE | \ | ||
139 | NHMEX_M_PMON_CTL_FLAG_MODE | \ | ||
140 | NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
141 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | ||
142 | |||
143 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) | ||
144 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) | ||
145 | |||
146 | #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) | ||
147 | #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) | ||
148 | |||
149 | /* | ||
150 | * use the 9~13 bits to select event If the 7th bit is not set, | ||
151 | * otherwise use the 19~21 bits to select event. | ||
152 | */ | ||
153 | #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
154 | #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ | ||
155 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
156 | #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
157 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
158 | #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ | ||
159 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
160 | #define MBOX_INC_SEL_EXTAR_REG(c, r) \ | ||
161 | EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
162 | MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) | ||
163 | #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ | ||
164 | EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
165 | MBOX_SET_FLAG_SEL_MASK, \ | ||
166 | (u64)-1, NHMEX_M_##r) | ||
167 | |||
168 | /* NHM-EX Rbox */ | ||
169 | #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 | ||
170 | #define NHMEX_R_MSR_PMON_CTL0 0xe10 | ||
171 | #define NHMEX_R_MSR_PMON_CNT0 0xe11 | ||
172 | #define NHMEX_R_MSR_OFFSET 0x20 | ||
173 | |||
174 | #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ | ||
175 | ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) | ||
176 | #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) | ||
177 | #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) | ||
178 | #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ | ||
179 | (((n) < 4 ? 0 : 0x10) + (n) * 4) | ||
180 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ | ||
181 | (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
182 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ | ||
183 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) | ||
184 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ | ||
185 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) | ||
186 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ | ||
187 | (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
188 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ | ||
189 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) | ||
190 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ | ||
191 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) | ||
192 | |||
193 | #define NHMEX_R_PMON_CTL_EN (1 << 0) | ||
194 | #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 | ||
195 | #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ | ||
196 | (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) | ||
197 | #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) | ||
198 | #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK | ||
199 | |||
200 | /* NHM-EX Wbox */ | ||
201 | #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 | ||
202 | #define NHMEX_W_MSR_PMON_CNT0 0xc90 | ||
203 | #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 | ||
204 | #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 | ||
205 | #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 | ||
206 | |||
207 | #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) | ||
208 | |||
209 | struct intel_uncore_ops; | 27 | struct intel_uncore_ops; |
210 | struct intel_uncore_pmu; | 28 | struct intel_uncore_pmu; |
211 | struct intel_uncore_box; | 29 | struct intel_uncore_box; |
@@ -514,3 +332,6 @@ int snbep_uncore_pci_init(void); | |||
514 | void snbep_uncore_cpu_init(void); | 332 | void snbep_uncore_cpu_init(void); |
515 | int ivt_uncore_pci_init(void); | 333 | int ivt_uncore_pci_init(void); |
516 | void ivt_uncore_cpu_init(void); | 334 | void ivt_uncore_cpu_init(void); |
335 | |||
336 | /* perf_event_intel_uncore_nhmex.c */ | ||
337 | void nhmex_uncore_cpu_init(void); | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c new file mode 100644 index 000000000000..93b11a8f110d --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c | |||
@@ -0,0 +1,1221 @@ | |||
1 | /* Nehalem-EX/Westmere-EX uncore support */ | ||
2 | #include "perf_event_intel_uncore.h" | ||
3 | |||
4 | /* NHM-EX event control */ | ||
5 | #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
6 | #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
7 | #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) | ||
8 | #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) | ||
9 | #define NHMEX_PMON_CTL_PMI_EN (1 << 20) | ||
10 | #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) | ||
11 | #define NHMEX_PMON_CTL_INVERT (1 << 23) | ||
12 | #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 | ||
13 | #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
14 | NHMEX_PMON_CTL_UMASK_MASK | \ | ||
15 | NHMEX_PMON_CTL_EDGE_DET | \ | ||
16 | NHMEX_PMON_CTL_INVERT | \ | ||
17 | NHMEX_PMON_CTL_TRESH_MASK) | ||
18 | |||
19 | /* NHM-EX Ubox */ | ||
20 | #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
21 | #define NHMEX_U_MSR_PMON_CTR 0xc11 | ||
22 | #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 | ||
23 | |||
24 | #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) | ||
25 | #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e | ||
26 | #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) | ||
27 | #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) | ||
28 | #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
29 | |||
30 | #define NHMEX_U_PMON_RAW_EVENT_MASK \ | ||
31 | (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
32 | NHMEX_PMON_CTL_EDGE_DET) | ||
33 | |||
34 | /* NHM-EX Cbox */ | ||
35 | #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 | ||
36 | #define NHMEX_C0_MSR_PMON_CTR0 0xd11 | ||
37 | #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 | ||
38 | #define NHMEX_C_MSR_OFFSET 0x20 | ||
39 | |||
40 | /* NHM-EX Bbox */ | ||
41 | #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 | ||
42 | #define NHMEX_B0_MSR_PMON_CTR0 0xc31 | ||
43 | #define NHMEX_B0_MSR_PMON_CTL0 0xc30 | ||
44 | #define NHMEX_B_MSR_OFFSET 0x40 | ||
45 | #define NHMEX_B0_MSR_MATCH 0xe45 | ||
46 | #define NHMEX_B0_MSR_MASK 0xe46 | ||
47 | #define NHMEX_B1_MSR_MATCH 0xe4d | ||
48 | #define NHMEX_B1_MSR_MASK 0xe4e | ||
49 | |||
50 | #define NHMEX_B_PMON_CTL_EN (1 << 0) | ||
51 | #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 | ||
52 | #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ | ||
53 | (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) | ||
54 | #define NHMEX_B_PMON_CTR_SHIFT 6 | ||
55 | #define NHMEX_B_PMON_CTR_MASK \ | ||
56 | (0x3 << NHMEX_B_PMON_CTR_SHIFT) | ||
57 | #define NHMEX_B_PMON_RAW_EVENT_MASK \ | ||
58 | (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ | ||
59 | NHMEX_B_PMON_CTR_MASK) | ||
60 | |||
61 | /* NHM-EX Sbox */ | ||
62 | #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 | ||
63 | #define NHMEX_S0_MSR_PMON_CTR0 0xc51 | ||
64 | #define NHMEX_S0_MSR_PMON_CTL0 0xc50 | ||
65 | #define NHMEX_S_MSR_OFFSET 0x80 | ||
66 | #define NHMEX_S0_MSR_MM_CFG 0xe48 | ||
67 | #define NHMEX_S0_MSR_MATCH 0xe49 | ||
68 | #define NHMEX_S0_MSR_MASK 0xe4a | ||
69 | #define NHMEX_S1_MSR_MM_CFG 0xe58 | ||
70 | #define NHMEX_S1_MSR_MATCH 0xe59 | ||
71 | #define NHMEX_S1_MSR_MASK 0xe5a | ||
72 | |||
73 | #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) | ||
74 | #define NHMEX_S_EVENT_TO_R_PROG_EV 0 | ||
75 | |||
76 | /* NHM-EX Mbox */ | ||
77 | #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 | ||
78 | #define NHMEX_M0_MSR_PMU_DSP 0xca5 | ||
79 | #define NHMEX_M0_MSR_PMU_ISS 0xca6 | ||
80 | #define NHMEX_M0_MSR_PMU_MAP 0xca7 | ||
81 | #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 | ||
82 | #define NHMEX_M0_MSR_PMU_PGT 0xca9 | ||
83 | #define NHMEX_M0_MSR_PMU_PLD 0xcaa | ||
84 | #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab | ||
85 | #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 | ||
86 | #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 | ||
87 | #define NHMEX_M_MSR_OFFSET 0x40 | ||
88 | #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 | ||
89 | #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c | ||
90 | |||
91 | #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) | ||
92 | #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL | ||
93 | #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL | ||
94 | #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 | ||
95 | |||
96 | #define NHMEX_M_PMON_CTL_EN (1 << 0) | ||
97 | #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) | ||
98 | #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 | ||
99 | #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ | ||
100 | (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) | ||
101 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 | ||
102 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ | ||
103 | (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) | ||
104 | #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) | ||
105 | #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) | ||
106 | #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 | ||
107 | #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ | ||
108 | (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
109 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 | ||
110 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ | ||
111 | (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | ||
112 | #define NHMEX_M_PMON_RAW_EVENT_MASK \ | ||
113 | (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ | ||
114 | NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ | ||
115 | NHMEX_M_PMON_CTL_WRAP_MODE | \ | ||
116 | NHMEX_M_PMON_CTL_FLAG_MODE | \ | ||
117 | NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
118 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | ||
119 | |||
120 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) | ||
121 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) | ||
122 | |||
123 | #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) | ||
124 | #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) | ||
125 | |||
126 | /* | ||
127 | * use the 9~13 bits to select event If the 7th bit is not set, | ||
128 | * otherwise use the 19~21 bits to select event. | ||
129 | */ | ||
130 | #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
131 | #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ | ||
132 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
133 | #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
134 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
135 | #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ | ||
136 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
137 | #define MBOX_INC_SEL_EXTAR_REG(c, r) \ | ||
138 | EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
139 | MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) | ||
140 | #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ | ||
141 | EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
142 | MBOX_SET_FLAG_SEL_MASK, \ | ||
143 | (u64)-1, NHMEX_M_##r) | ||
144 | |||
145 | /* NHM-EX Rbox */ | ||
146 | #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 | ||
147 | #define NHMEX_R_MSR_PMON_CTL0 0xe10 | ||
148 | #define NHMEX_R_MSR_PMON_CNT0 0xe11 | ||
149 | #define NHMEX_R_MSR_OFFSET 0x20 | ||
150 | |||
151 | #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ | ||
152 | ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) | ||
153 | #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) | ||
154 | #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) | ||
155 | #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ | ||
156 | (((n) < 4 ? 0 : 0x10) + (n) * 4) | ||
157 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ | ||
158 | (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
159 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ | ||
160 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) | ||
161 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ | ||
162 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) | ||
163 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ | ||
164 | (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
165 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ | ||
166 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) | ||
167 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ | ||
168 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) | ||
169 | |||
170 | #define NHMEX_R_PMON_CTL_EN (1 << 0) | ||
171 | #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 | ||
172 | #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ | ||
173 | (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) | ||
174 | #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) | ||
175 | #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK | ||
176 | |||
177 | /* NHM-EX Wbox */ | ||
178 | #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 | ||
179 | #define NHMEX_W_MSR_PMON_CNT0 0xc90 | ||
180 | #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 | ||
181 | #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 | ||
182 | #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 | ||
183 | |||
184 | #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) | ||
185 | |||
186 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ | ||
187 | ((1ULL << (n)) - 1))) | ||
188 | |||
189 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | ||
190 | DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); | ||
191 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | ||
192 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | ||
193 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | ||
194 | DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); | ||
195 | DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); | ||
196 | DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); | ||
197 | DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); | ||
198 | |||
199 | static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) | ||
200 | { | ||
201 | wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); | ||
202 | } | ||
203 | |||
204 | static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
205 | { | ||
206 | unsigned msr = uncore_msr_box_ctl(box); | ||
207 | u64 config; | ||
208 | |||
209 | if (msr) { | ||
210 | rdmsrl(msr, config); | ||
211 | config &= ~((1ULL << uncore_num_counters(box)) - 1); | ||
212 | /* WBox has a fixed counter */ | ||
213 | if (uncore_msr_fixed_ctl(box)) | ||
214 | config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
215 | wrmsrl(msr, config); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
220 | { | ||
221 | unsigned msr = uncore_msr_box_ctl(box); | ||
222 | u64 config; | ||
223 | |||
224 | if (msr) { | ||
225 | rdmsrl(msr, config); | ||
226 | config |= (1ULL << uncore_num_counters(box)) - 1; | ||
227 | /* WBox has a fixed counter */ | ||
228 | if (uncore_msr_fixed_ctl(box)) | ||
229 | config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
230 | wrmsrl(msr, config); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
235 | { | ||
236 | wrmsrl(event->hw.config_base, 0); | ||
237 | } | ||
238 | |||
239 | static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
240 | { | ||
241 | struct hw_perf_event *hwc = &event->hw; | ||
242 | |||
243 | if (hwc->idx >= UNCORE_PMC_IDX_FIXED) | ||
244 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); | ||
245 | else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) | ||
246 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
247 | else | ||
248 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
249 | } | ||
250 | |||
251 | #define NHMEX_UNCORE_OPS_COMMON_INIT() \ | ||
252 | .init_box = nhmex_uncore_msr_init_box, \ | ||
253 | .disable_box = nhmex_uncore_msr_disable_box, \ | ||
254 | .enable_box = nhmex_uncore_msr_enable_box, \ | ||
255 | .disable_event = nhmex_uncore_msr_disable_event, \ | ||
256 | .read_counter = uncore_msr_read_counter | ||
257 | |||
258 | static struct intel_uncore_ops nhmex_uncore_ops = { | ||
259 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
260 | .enable_event = nhmex_uncore_msr_enable_event, | ||
261 | }; | ||
262 | |||
263 | static struct attribute *nhmex_uncore_ubox_formats_attr[] = { | ||
264 | &format_attr_event.attr, | ||
265 | &format_attr_edge.attr, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | static struct attribute_group nhmex_uncore_ubox_format_group = { | ||
270 | .name = "format", | ||
271 | .attrs = nhmex_uncore_ubox_formats_attr, | ||
272 | }; | ||
273 | |||
274 | static struct intel_uncore_type nhmex_uncore_ubox = { | ||
275 | .name = "ubox", | ||
276 | .num_counters = 1, | ||
277 | .num_boxes = 1, | ||
278 | .perf_ctr_bits = 48, | ||
279 | .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, | ||
280 | .perf_ctr = NHMEX_U_MSR_PMON_CTR, | ||
281 | .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, | ||
282 | .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, | ||
283 | .ops = &nhmex_uncore_ops, | ||
284 | .format_group = &nhmex_uncore_ubox_format_group | ||
285 | }; | ||
286 | |||
287 | static struct attribute *nhmex_uncore_cbox_formats_attr[] = { | ||
288 | &format_attr_event.attr, | ||
289 | &format_attr_umask.attr, | ||
290 | &format_attr_edge.attr, | ||
291 | &format_attr_inv.attr, | ||
292 | &format_attr_thresh8.attr, | ||
293 | NULL, | ||
294 | }; | ||
295 | |||
296 | static struct attribute_group nhmex_uncore_cbox_format_group = { | ||
297 | .name = "format", | ||
298 | .attrs = nhmex_uncore_cbox_formats_attr, | ||
299 | }; | ||
300 | |||
301 | /* msr offset for each instance of cbox */ | ||
302 | static unsigned nhmex_cbox_msr_offsets[] = { | ||
303 | 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, | ||
304 | }; | ||
305 | |||
306 | static struct intel_uncore_type nhmex_uncore_cbox = { | ||
307 | .name = "cbox", | ||
308 | .num_counters = 6, | ||
309 | .num_boxes = 10, | ||
310 | .perf_ctr_bits = 48, | ||
311 | .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, | ||
312 | .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, | ||
313 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
314 | .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, | ||
315 | .msr_offsets = nhmex_cbox_msr_offsets, | ||
316 | .pair_ctr_ctl = 1, | ||
317 | .ops = &nhmex_uncore_ops, | ||
318 | .format_group = &nhmex_uncore_cbox_format_group | ||
319 | }; | ||
320 | |||
321 | static struct uncore_event_desc nhmex_uncore_wbox_events[] = { | ||
322 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), | ||
323 | { /* end: all zeroes */ }, | ||
324 | }; | ||
325 | |||
326 | static struct intel_uncore_type nhmex_uncore_wbox = { | ||
327 | .name = "wbox", | ||
328 | .num_counters = 4, | ||
329 | .num_boxes = 1, | ||
330 | .perf_ctr_bits = 48, | ||
331 | .event_ctl = NHMEX_W_MSR_PMON_CNT0, | ||
332 | .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, | ||
333 | .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, | ||
334 | .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, | ||
335 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
336 | .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, | ||
337 | .pair_ctr_ctl = 1, | ||
338 | .event_descs = nhmex_uncore_wbox_events, | ||
339 | .ops = &nhmex_uncore_ops, | ||
340 | .format_group = &nhmex_uncore_cbox_format_group | ||
341 | }; | ||
342 | |||
343 | static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
344 | { | ||
345 | struct hw_perf_event *hwc = &event->hw; | ||
346 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
347 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
348 | int ctr, ev_sel; | ||
349 | |||
350 | ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> | ||
351 | NHMEX_B_PMON_CTR_SHIFT; | ||
352 | ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> | ||
353 | NHMEX_B_PMON_CTL_EV_SEL_SHIFT; | ||
354 | |||
355 | /* events that do not use the match/mask registers */ | ||
356 | if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || | ||
357 | (ctr == 2 && ev_sel != 0x4) || ctr == 3) | ||
358 | return 0; | ||
359 | |||
360 | if (box->pmu->pmu_idx == 0) | ||
361 | reg1->reg = NHMEX_B0_MSR_MATCH; | ||
362 | else | ||
363 | reg1->reg = NHMEX_B1_MSR_MATCH; | ||
364 | reg1->idx = 0; | ||
365 | reg1->config = event->attr.config1; | ||
366 | reg2->config = event->attr.config2; | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
371 | { | ||
372 | struct hw_perf_event *hwc = &event->hw; | ||
373 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
374 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
375 | |||
376 | if (reg1->idx != EXTRA_REG_NONE) { | ||
377 | wrmsrl(reg1->reg, reg1->config); | ||
378 | wrmsrl(reg1->reg + 1, reg2->config); | ||
379 | } | ||
380 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
381 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * The Bbox has 4 counters, but each counter monitors different events. | ||
386 | * Use bits 6-7 in the event config to select counter. | ||
387 | */ | ||
388 | static struct event_constraint nhmex_uncore_bbox_constraints[] = { | ||
389 | EVENT_CONSTRAINT(0 , 1, 0xc0), | ||
390 | EVENT_CONSTRAINT(0x40, 2, 0xc0), | ||
391 | EVENT_CONSTRAINT(0x80, 4, 0xc0), | ||
392 | EVENT_CONSTRAINT(0xc0, 8, 0xc0), | ||
393 | EVENT_CONSTRAINT_END, | ||
394 | }; | ||
395 | |||
396 | static struct attribute *nhmex_uncore_bbox_formats_attr[] = { | ||
397 | &format_attr_event5.attr, | ||
398 | &format_attr_counter.attr, | ||
399 | &format_attr_match.attr, | ||
400 | &format_attr_mask.attr, | ||
401 | NULL, | ||
402 | }; | ||
403 | |||
404 | static struct attribute_group nhmex_uncore_bbox_format_group = { | ||
405 | .name = "format", | ||
406 | .attrs = nhmex_uncore_bbox_formats_attr, | ||
407 | }; | ||
408 | |||
409 | static struct intel_uncore_ops nhmex_uncore_bbox_ops = { | ||
410 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
411 | .enable_event = nhmex_bbox_msr_enable_event, | ||
412 | .hw_config = nhmex_bbox_hw_config, | ||
413 | .get_constraint = uncore_get_constraint, | ||
414 | .put_constraint = uncore_put_constraint, | ||
415 | }; | ||
416 | |||
417 | static struct intel_uncore_type nhmex_uncore_bbox = { | ||
418 | .name = "bbox", | ||
419 | .num_counters = 4, | ||
420 | .num_boxes = 2, | ||
421 | .perf_ctr_bits = 48, | ||
422 | .event_ctl = NHMEX_B0_MSR_PMON_CTL0, | ||
423 | .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, | ||
424 | .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, | ||
425 | .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, | ||
426 | .msr_offset = NHMEX_B_MSR_OFFSET, | ||
427 | .pair_ctr_ctl = 1, | ||
428 | .num_shared_regs = 1, | ||
429 | .constraints = nhmex_uncore_bbox_constraints, | ||
430 | .ops = &nhmex_uncore_bbox_ops, | ||
431 | .format_group = &nhmex_uncore_bbox_format_group | ||
432 | }; | ||
433 | |||
434 | static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
435 | { | ||
436 | struct hw_perf_event *hwc = &event->hw; | ||
437 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
438 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
439 | |||
440 | /* only TO_R_PROG_EV event uses the match/mask register */ | ||
441 | if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != | ||
442 | NHMEX_S_EVENT_TO_R_PROG_EV) | ||
443 | return 0; | ||
444 | |||
445 | if (box->pmu->pmu_idx == 0) | ||
446 | reg1->reg = NHMEX_S0_MSR_MM_CFG; | ||
447 | else | ||
448 | reg1->reg = NHMEX_S1_MSR_MM_CFG; | ||
449 | reg1->idx = 0; | ||
450 | reg1->config = event->attr.config1; | ||
451 | reg2->config = event->attr.config2; | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
456 | { | ||
457 | struct hw_perf_event *hwc = &event->hw; | ||
458 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
459 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
460 | |||
461 | if (reg1->idx != EXTRA_REG_NONE) { | ||
462 | wrmsrl(reg1->reg, 0); | ||
463 | wrmsrl(reg1->reg + 1, reg1->config); | ||
464 | wrmsrl(reg1->reg + 2, reg2->config); | ||
465 | wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); | ||
466 | } | ||
467 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
468 | } | ||
469 | |||
470 | static struct attribute *nhmex_uncore_sbox_formats_attr[] = { | ||
471 | &format_attr_event.attr, | ||
472 | &format_attr_umask.attr, | ||
473 | &format_attr_edge.attr, | ||
474 | &format_attr_inv.attr, | ||
475 | &format_attr_thresh8.attr, | ||
476 | &format_attr_match.attr, | ||
477 | &format_attr_mask.attr, | ||
478 | NULL, | ||
479 | }; | ||
480 | |||
481 | static struct attribute_group nhmex_uncore_sbox_format_group = { | ||
482 | .name = "format", | ||
483 | .attrs = nhmex_uncore_sbox_formats_attr, | ||
484 | }; | ||
485 | |||
486 | static struct intel_uncore_ops nhmex_uncore_sbox_ops = { | ||
487 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
488 | .enable_event = nhmex_sbox_msr_enable_event, | ||
489 | .hw_config = nhmex_sbox_hw_config, | ||
490 | .get_constraint = uncore_get_constraint, | ||
491 | .put_constraint = uncore_put_constraint, | ||
492 | }; | ||
493 | |||
494 | static struct intel_uncore_type nhmex_uncore_sbox = { | ||
495 | .name = "sbox", | ||
496 | .num_counters = 4, | ||
497 | .num_boxes = 2, | ||
498 | .perf_ctr_bits = 48, | ||
499 | .event_ctl = NHMEX_S0_MSR_PMON_CTL0, | ||
500 | .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, | ||
501 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
502 | .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, | ||
503 | .msr_offset = NHMEX_S_MSR_OFFSET, | ||
504 | .pair_ctr_ctl = 1, | ||
505 | .num_shared_regs = 1, | ||
506 | .ops = &nhmex_uncore_sbox_ops, | ||
507 | .format_group = &nhmex_uncore_sbox_format_group | ||
508 | }; | ||
509 | |||
510 | enum { | ||
511 | EXTRA_REG_NHMEX_M_FILTER, | ||
512 | EXTRA_REG_NHMEX_M_DSP, | ||
513 | EXTRA_REG_NHMEX_M_ISS, | ||
514 | EXTRA_REG_NHMEX_M_MAP, | ||
515 | EXTRA_REG_NHMEX_M_MSC_THR, | ||
516 | EXTRA_REG_NHMEX_M_PGT, | ||
517 | EXTRA_REG_NHMEX_M_PLD, | ||
518 | EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, | ||
519 | }; | ||
520 | |||
521 | static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { | ||
522 | MBOX_INC_SEL_EXTAR_REG(0x0, DSP), | ||
523 | MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), | ||
524 | MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), | ||
525 | MBOX_INC_SEL_EXTAR_REG(0x9, ISS), | ||
526 | /* event 0xa uses two extra registers */ | ||
527 | MBOX_INC_SEL_EXTAR_REG(0xa, ISS), | ||
528 | MBOX_INC_SEL_EXTAR_REG(0xa, PLD), | ||
529 | MBOX_INC_SEL_EXTAR_REG(0xb, PLD), | ||
530 | /* events 0xd ~ 0x10 use the same extra register */ | ||
531 | MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), | ||
532 | MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), | ||
533 | MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), | ||
534 | MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), | ||
535 | MBOX_INC_SEL_EXTAR_REG(0x16, PGT), | ||
536 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), | ||
537 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), | ||
538 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), | ||
539 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), | ||
540 | EVENT_EXTRA_END | ||
541 | }; | ||
542 | |||
543 | /* Nehalem-EX or Westmere-EX ? */ | ||
544 | static bool uncore_nhmex; | ||
545 | |||
546 | static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) | ||
547 | { | ||
548 | struct intel_uncore_extra_reg *er; | ||
549 | unsigned long flags; | ||
550 | bool ret = false; | ||
551 | u64 mask; | ||
552 | |||
553 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
554 | er = &box->shared_regs[idx]; | ||
555 | raw_spin_lock_irqsave(&er->lock, flags); | ||
556 | if (!atomic_read(&er->ref) || er->config == config) { | ||
557 | atomic_inc(&er->ref); | ||
558 | er->config = config; | ||
559 | ret = true; | ||
560 | } | ||
561 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | /* | ||
566 | * The ZDP_CTL_FVC MSR has 4 fields which are used to control | ||
567 | * events 0xd ~ 0x10. Besides these 4 fields, there are additional | ||
568 | * fields which are shared. | ||
569 | */ | ||
570 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
571 | if (WARN_ON_ONCE(idx >= 4)) | ||
572 | return false; | ||
573 | |||
574 | /* mask of the shared fields */ | ||
575 | if (uncore_nhmex) | ||
576 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
577 | else | ||
578 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
579 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
580 | |||
581 | raw_spin_lock_irqsave(&er->lock, flags); | ||
582 | /* add mask of the non-shared field if it's in use */ | ||
583 | if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { | ||
584 | if (uncore_nhmex) | ||
585 | mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
586 | else | ||
587 | mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
588 | } | ||
589 | |||
590 | if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { | ||
591 | atomic_add(1 << (idx * 8), &er->ref); | ||
592 | if (uncore_nhmex) | ||
593 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
594 | NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
595 | else | ||
596 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
597 | WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
598 | er->config &= ~mask; | ||
599 | er->config |= (config & mask); | ||
600 | ret = true; | ||
601 | } | ||
602 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
603 | |||
604 | return ret; | ||
605 | } | ||
606 | |||
607 | static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) | ||
608 | { | ||
609 | struct intel_uncore_extra_reg *er; | ||
610 | |||
611 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
612 | er = &box->shared_regs[idx]; | ||
613 | atomic_dec(&er->ref); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
618 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
619 | atomic_sub(1 << (idx * 8), &er->ref); | ||
620 | } | ||
621 | |||
622 | static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
623 | { | ||
624 | struct hw_perf_event *hwc = &event->hw; | ||
625 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
626 | u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
627 | u64 config = reg1->config; | ||
628 | |||
629 | /* get the non-shared control bits and shift them */ | ||
630 | idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
631 | if (uncore_nhmex) | ||
632 | config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
633 | else | ||
634 | config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
635 | if (new_idx > orig_idx) { | ||
636 | idx = new_idx - orig_idx; | ||
637 | config <<= 3 * idx; | ||
638 | } else { | ||
639 | idx = orig_idx - new_idx; | ||
640 | config >>= 3 * idx; | ||
641 | } | ||
642 | |||
643 | /* add the shared control bits back */ | ||
644 | if (uncore_nhmex) | ||
645 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
646 | else | ||
647 | config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
648 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
649 | if (modify) { | ||
650 | /* adjust the main event selector */ | ||
651 | if (new_idx > orig_idx) | ||
652 | hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
653 | else | ||
654 | hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
655 | reg1->config = config; | ||
656 | reg1->idx = ~0xff | new_idx; | ||
657 | } | ||
658 | return config; | ||
659 | } | ||
660 | |||
661 | static struct event_constraint * | ||
662 | nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
663 | { | ||
664 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
665 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
666 | int i, idx[2], alloc = 0; | ||
667 | u64 config1 = reg1->config; | ||
668 | |||
669 | idx[0] = __BITS_VALUE(reg1->idx, 0, 8); | ||
670 | idx[1] = __BITS_VALUE(reg1->idx, 1, 8); | ||
671 | again: | ||
672 | for (i = 0; i < 2; i++) { | ||
673 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
674 | idx[i] = 0xff; | ||
675 | |||
676 | if (idx[i] == 0xff) | ||
677 | continue; | ||
678 | |||
679 | if (!nhmex_mbox_get_shared_reg(box, idx[i], | ||
680 | __BITS_VALUE(config1, i, 32))) | ||
681 | goto fail; | ||
682 | alloc |= (0x1 << i); | ||
683 | } | ||
684 | |||
685 | /* for the match/mask registers */ | ||
686 | if (reg2->idx != EXTRA_REG_NONE && | ||
687 | (uncore_box_is_fake(box) || !reg2->alloc) && | ||
688 | !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) | ||
689 | goto fail; | ||
690 | |||
691 | /* | ||
692 | * If it's a fake box -- as per validate_{group,event}() we | ||
693 | * shouldn't touch event state and we can avoid doing so | ||
694 | * since both will only call get_event_constraints() once | ||
695 | * on each event, this avoids the need for reg->alloc. | ||
696 | */ | ||
697 | if (!uncore_box_is_fake(box)) { | ||
698 | if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) | ||
699 | nhmex_mbox_alter_er(event, idx[0], true); | ||
700 | reg1->alloc |= alloc; | ||
701 | if (reg2->idx != EXTRA_REG_NONE) | ||
702 | reg2->alloc = 1; | ||
703 | } | ||
704 | return NULL; | ||
705 | fail: | ||
706 | if (idx[0] != 0xff && !(alloc & 0x1) && | ||
707 | idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
708 | /* | ||
709 | * events 0xd ~ 0x10 are functional identical, but are | ||
710 | * controlled by different fields in the ZDP_CTL_FVC | ||
711 | * register. If we failed to take one field, try the | ||
712 | * rest 3 choices. | ||
713 | */ | ||
714 | BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); | ||
715 | idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
716 | idx[0] = (idx[0] + 1) % 4; | ||
717 | idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
718 | if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { | ||
719 | config1 = nhmex_mbox_alter_er(event, idx[0], false); | ||
720 | goto again; | ||
721 | } | ||
722 | } | ||
723 | |||
724 | if (alloc & 0x1) | ||
725 | nhmex_mbox_put_shared_reg(box, idx[0]); | ||
726 | if (alloc & 0x2) | ||
727 | nhmex_mbox_put_shared_reg(box, idx[1]); | ||
728 | return &uncore_constraint_empty; | ||
729 | } | ||
730 | |||
731 | static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
732 | { | ||
733 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
734 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
735 | |||
736 | if (uncore_box_is_fake(box)) | ||
737 | return; | ||
738 | |||
739 | if (reg1->alloc & 0x1) | ||
740 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); | ||
741 | if (reg1->alloc & 0x2) | ||
742 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); | ||
743 | reg1->alloc = 0; | ||
744 | |||
745 | if (reg2->alloc) { | ||
746 | nhmex_mbox_put_shared_reg(box, reg2->idx); | ||
747 | reg2->alloc = 0; | ||
748 | } | ||
749 | } | ||
750 | |||
751 | static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) | ||
752 | { | ||
753 | if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
754 | return er->idx; | ||
755 | return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; | ||
756 | } | ||
757 | |||
758 | static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
759 | { | ||
760 | struct intel_uncore_type *type = box->pmu->type; | ||
761 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
762 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
763 | struct extra_reg *er; | ||
764 | unsigned msr; | ||
765 | int reg_idx = 0; | ||
766 | /* | ||
767 | * The mbox events may require 2 extra MSRs at the most. But only | ||
768 | * the lower 32 bits in these MSRs are significant, so we can use | ||
769 | * config1 to pass two MSRs' config. | ||
770 | */ | ||
771 | for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { | ||
772 | if (er->event != (event->hw.config & er->config_mask)) | ||
773 | continue; | ||
774 | if (event->attr.config1 & ~er->valid_mask) | ||
775 | return -EINVAL; | ||
776 | |||
777 | msr = er->msr + type->msr_offset * box->pmu->pmu_idx; | ||
778 | if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) | ||
779 | return -EINVAL; | ||
780 | |||
781 | /* always use the 32~63 bits to pass the PLD config */ | ||
782 | if (er->idx == EXTRA_REG_NHMEX_M_PLD) | ||
783 | reg_idx = 1; | ||
784 | else if (WARN_ON_ONCE(reg_idx > 0)) | ||
785 | return -EINVAL; | ||
786 | |||
787 | reg1->idx &= ~(0xff << (reg_idx * 8)); | ||
788 | reg1->reg &= ~(0xffff << (reg_idx * 16)); | ||
789 | reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); | ||
790 | reg1->reg |= msr << (reg_idx * 16); | ||
791 | reg1->config = event->attr.config1; | ||
792 | reg_idx++; | ||
793 | } | ||
794 | /* | ||
795 | * The mbox only provides ability to perform address matching | ||
796 | * for the PLD events. | ||
797 | */ | ||
798 | if (reg_idx == 2) { | ||
799 | reg2->idx = EXTRA_REG_NHMEX_M_FILTER; | ||
800 | if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) | ||
801 | reg2->config = event->attr.config2; | ||
802 | else | ||
803 | reg2->config = ~0ULL; | ||
804 | if (box->pmu->pmu_idx == 0) | ||
805 | reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; | ||
806 | else | ||
807 | reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; | ||
808 | } | ||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
813 | { | ||
814 | struct intel_uncore_extra_reg *er; | ||
815 | unsigned long flags; | ||
816 | u64 config; | ||
817 | |||
818 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
819 | return box->shared_regs[idx].config; | ||
820 | |||
821 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
822 | raw_spin_lock_irqsave(&er->lock, flags); | ||
823 | config = er->config; | ||
824 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
825 | return config; | ||
826 | } | ||
827 | |||
828 | static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
829 | { | ||
830 | struct hw_perf_event *hwc = &event->hw; | ||
831 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
832 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
833 | int idx; | ||
834 | |||
835 | idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
836 | if (idx != 0xff) | ||
837 | wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), | ||
838 | nhmex_mbox_shared_reg_config(box, idx)); | ||
839 | idx = __BITS_VALUE(reg1->idx, 1, 8); | ||
840 | if (idx != 0xff) | ||
841 | wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), | ||
842 | nhmex_mbox_shared_reg_config(box, idx)); | ||
843 | |||
844 | if (reg2->idx != EXTRA_REG_NONE) { | ||
845 | wrmsrl(reg2->reg, 0); | ||
846 | if (reg2->config != ~0ULL) { | ||
847 | wrmsrl(reg2->reg + 1, | ||
848 | reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); | ||
849 | wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & | ||
850 | (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); | ||
851 | wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); | ||
852 | } | ||
853 | } | ||
854 | |||
855 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
856 | } | ||
857 | |||
858 | DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); | ||
859 | DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); | ||
860 | DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); | ||
861 | DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); | ||
862 | DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); | ||
863 | DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); | ||
864 | DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); | ||
865 | DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); | ||
866 | DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); | ||
867 | DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); | ||
868 | DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); | ||
869 | DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); | ||
870 | DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); | ||
871 | DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); | ||
872 | DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); | ||
873 | DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); | ||
874 | |||
875 | static struct attribute *nhmex_uncore_mbox_formats_attr[] = { | ||
876 | &format_attr_count_mode.attr, | ||
877 | &format_attr_storage_mode.attr, | ||
878 | &format_attr_wrap_mode.attr, | ||
879 | &format_attr_flag_mode.attr, | ||
880 | &format_attr_inc_sel.attr, | ||
881 | &format_attr_set_flag_sel.attr, | ||
882 | &format_attr_filter_cfg_en.attr, | ||
883 | &format_attr_filter_match.attr, | ||
884 | &format_attr_filter_mask.attr, | ||
885 | &format_attr_dsp.attr, | ||
886 | &format_attr_thr.attr, | ||
887 | &format_attr_fvc.attr, | ||
888 | &format_attr_pgt.attr, | ||
889 | &format_attr_map.attr, | ||
890 | &format_attr_iss.attr, | ||
891 | &format_attr_pld.attr, | ||
892 | NULL, | ||
893 | }; | ||
894 | |||
895 | static struct attribute_group nhmex_uncore_mbox_format_group = { | ||
896 | .name = "format", | ||
897 | .attrs = nhmex_uncore_mbox_formats_attr, | ||
898 | }; | ||
899 | |||
900 | static struct uncore_event_desc nhmex_uncore_mbox_events[] = { | ||
901 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), | ||
902 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), | ||
903 | { /* end: all zeroes */ }, | ||
904 | }; | ||
905 | |||
906 | static struct uncore_event_desc wsmex_uncore_mbox_events[] = { | ||
907 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), | ||
908 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), | ||
909 | { /* end: all zeroes */ }, | ||
910 | }; | ||
911 | |||
912 | static struct intel_uncore_ops nhmex_uncore_mbox_ops = { | ||
913 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
914 | .enable_event = nhmex_mbox_msr_enable_event, | ||
915 | .hw_config = nhmex_mbox_hw_config, | ||
916 | .get_constraint = nhmex_mbox_get_constraint, | ||
917 | .put_constraint = nhmex_mbox_put_constraint, | ||
918 | }; | ||
919 | |||
920 | static struct intel_uncore_type nhmex_uncore_mbox = { | ||
921 | .name = "mbox", | ||
922 | .num_counters = 6, | ||
923 | .num_boxes = 2, | ||
924 | .perf_ctr_bits = 48, | ||
925 | .event_ctl = NHMEX_M0_MSR_PMU_CTL0, | ||
926 | .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, | ||
927 | .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, | ||
928 | .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, | ||
929 | .msr_offset = NHMEX_M_MSR_OFFSET, | ||
930 | .pair_ctr_ctl = 1, | ||
931 | .num_shared_regs = 8, | ||
932 | .event_descs = nhmex_uncore_mbox_events, | ||
933 | .ops = &nhmex_uncore_mbox_ops, | ||
934 | .format_group = &nhmex_uncore_mbox_format_group, | ||
935 | }; | ||
936 | |||
937 | static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) | ||
938 | { | ||
939 | struct hw_perf_event *hwc = &event->hw; | ||
940 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
941 | |||
942 | /* adjust the main event selector and extra register index */ | ||
943 | if (reg1->idx % 2) { | ||
944 | reg1->idx--; | ||
945 | hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
946 | } else { | ||
947 | reg1->idx++; | ||
948 | hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
949 | } | ||
950 | |||
951 | /* adjust extra register config */ | ||
952 | switch (reg1->idx % 6) { | ||
953 | case 2: | ||
954 | /* shift the 8~15 bits to the 0~7 bits */ | ||
955 | reg1->config >>= 8; | ||
956 | break; | ||
957 | case 3: | ||
958 | /* shift the 0~7 bits to the 8~15 bits */ | ||
959 | reg1->config <<= 8; | ||
960 | break; | ||
961 | }; | ||
962 | } | ||
963 | |||
964 | /* | ||
965 | * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. | ||
966 | * An event set consists of 6 events, the 3rd and 4th events in | ||
967 | * an event set use the same extra register. So an event set uses | ||
968 | * 5 extra registers. | ||
969 | */ | ||
970 | static struct event_constraint * | ||
971 | nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
972 | { | ||
973 | struct hw_perf_event *hwc = &event->hw; | ||
974 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
975 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
976 | struct intel_uncore_extra_reg *er; | ||
977 | unsigned long flags; | ||
978 | int idx, er_idx; | ||
979 | u64 config1; | ||
980 | bool ok = false; | ||
981 | |||
982 | if (!uncore_box_is_fake(box) && reg1->alloc) | ||
983 | return NULL; | ||
984 | |||
985 | idx = reg1->idx % 6; | ||
986 | config1 = reg1->config; | ||
987 | again: | ||
988 | er_idx = idx; | ||
989 | /* the 3rd and 4th events use the same extra register */ | ||
990 | if (er_idx > 2) | ||
991 | er_idx--; | ||
992 | er_idx += (reg1->idx / 6) * 5; | ||
993 | |||
994 | er = &box->shared_regs[er_idx]; | ||
995 | raw_spin_lock_irqsave(&er->lock, flags); | ||
996 | if (idx < 2) { | ||
997 | if (!atomic_read(&er->ref) || er->config == reg1->config) { | ||
998 | atomic_inc(&er->ref); | ||
999 | er->config = reg1->config; | ||
1000 | ok = true; | ||
1001 | } | ||
1002 | } else if (idx == 2 || idx == 3) { | ||
1003 | /* | ||
1004 | * these two events use different fields in a extra register, | ||
1005 | * the 0~7 bits and the 8~15 bits respectively. | ||
1006 | */ | ||
1007 | u64 mask = 0xff << ((idx - 2) * 8); | ||
1008 | if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || | ||
1009 | !((er->config ^ config1) & mask)) { | ||
1010 | atomic_add(1 << ((idx - 2) * 8), &er->ref); | ||
1011 | er->config &= ~mask; | ||
1012 | er->config |= config1 & mask; | ||
1013 | ok = true; | ||
1014 | } | ||
1015 | } else { | ||
1016 | if (!atomic_read(&er->ref) || | ||
1017 | (er->config == (hwc->config >> 32) && | ||
1018 | er->config1 == reg1->config && | ||
1019 | er->config2 == reg2->config)) { | ||
1020 | atomic_inc(&er->ref); | ||
1021 | er->config = (hwc->config >> 32); | ||
1022 | er->config1 = reg1->config; | ||
1023 | er->config2 = reg2->config; | ||
1024 | ok = true; | ||
1025 | } | ||
1026 | } | ||
1027 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1028 | |||
1029 | if (!ok) { | ||
1030 | /* | ||
1031 | * The Rbox events are always in pairs. The paired | ||
1032 | * events are functional identical, but use different | ||
1033 | * extra registers. If we failed to take an extra | ||
1034 | * register, try the alternative. | ||
1035 | */ | ||
1036 | idx ^= 1; | ||
1037 | if (idx != reg1->idx % 6) { | ||
1038 | if (idx == 2) | ||
1039 | config1 >>= 8; | ||
1040 | else if (idx == 3) | ||
1041 | config1 <<= 8; | ||
1042 | goto again; | ||
1043 | } | ||
1044 | } else { | ||
1045 | if (!uncore_box_is_fake(box)) { | ||
1046 | if (idx != reg1->idx % 6) | ||
1047 | nhmex_rbox_alter_er(box, event); | ||
1048 | reg1->alloc = 1; | ||
1049 | } | ||
1050 | return NULL; | ||
1051 | } | ||
1052 | return &uncore_constraint_empty; | ||
1053 | } | ||
1054 | |||
1055 | static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1056 | { | ||
1057 | struct intel_uncore_extra_reg *er; | ||
1058 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1059 | int idx, er_idx; | ||
1060 | |||
1061 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
1062 | return; | ||
1063 | |||
1064 | idx = reg1->idx % 6; | ||
1065 | er_idx = idx; | ||
1066 | if (er_idx > 2) | ||
1067 | er_idx--; | ||
1068 | er_idx += (reg1->idx / 6) * 5; | ||
1069 | |||
1070 | er = &box->shared_regs[er_idx]; | ||
1071 | if (idx == 2 || idx == 3) | ||
1072 | atomic_sub(1 << ((idx - 2) * 8), &er->ref); | ||
1073 | else | ||
1074 | atomic_dec(&er->ref); | ||
1075 | |||
1076 | reg1->alloc = 0; | ||
1077 | } | ||
1078 | |||
1079 | static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1080 | { | ||
1081 | struct hw_perf_event *hwc = &event->hw; | ||
1082 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1083 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1084 | int idx; | ||
1085 | |||
1086 | idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> | ||
1087 | NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1088 | if (idx >= 0x18) | ||
1089 | return -EINVAL; | ||
1090 | |||
1091 | reg1->idx = idx; | ||
1092 | reg1->config = event->attr.config1; | ||
1093 | |||
1094 | switch (idx % 6) { | ||
1095 | case 4: | ||
1096 | case 5: | ||
1097 | hwc->config |= event->attr.config & (~0ULL << 32); | ||
1098 | reg2->config = event->attr.config2; | ||
1099 | break; | ||
1100 | }; | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1105 | { | ||
1106 | struct hw_perf_event *hwc = &event->hw; | ||
1107 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1108 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1109 | int idx, port; | ||
1110 | |||
1111 | idx = reg1->idx; | ||
1112 | port = idx / 6 + box->pmu->pmu_idx * 4; | ||
1113 | |||
1114 | switch (idx % 6) { | ||
1115 | case 0: | ||
1116 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); | ||
1117 | break; | ||
1118 | case 1: | ||
1119 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); | ||
1120 | break; | ||
1121 | case 2: | ||
1122 | case 3: | ||
1123 | wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), | ||
1124 | uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); | ||
1125 | break; | ||
1126 | case 4: | ||
1127 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), | ||
1128 | hwc->config >> 32); | ||
1129 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); | ||
1130 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); | ||
1131 | break; | ||
1132 | case 5: | ||
1133 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), | ||
1134 | hwc->config >> 32); | ||
1135 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); | ||
1136 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); | ||
1137 | break; | ||
1138 | }; | ||
1139 | |||
1140 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
1141 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); | ||
1142 | } | ||
1143 | |||
1144 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); | ||
1145 | DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); | ||
1146 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); | ||
1147 | DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); | ||
1148 | DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); | ||
1149 | |||
1150 | static struct attribute *nhmex_uncore_rbox_formats_attr[] = { | ||
1151 | &format_attr_event5.attr, | ||
1152 | &format_attr_xbr_mm_cfg.attr, | ||
1153 | &format_attr_xbr_match.attr, | ||
1154 | &format_attr_xbr_mask.attr, | ||
1155 | &format_attr_qlx_cfg.attr, | ||
1156 | &format_attr_iperf_cfg.attr, | ||
1157 | NULL, | ||
1158 | }; | ||
1159 | |||
1160 | static struct attribute_group nhmex_uncore_rbox_format_group = { | ||
1161 | .name = "format", | ||
1162 | .attrs = nhmex_uncore_rbox_formats_attr, | ||
1163 | }; | ||
1164 | |||
1165 | static struct uncore_event_desc nhmex_uncore_rbox_events[] = { | ||
1166 | INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), | ||
1167 | INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), | ||
1168 | INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), | ||
1169 | INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), | ||
1170 | INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), | ||
1171 | INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), | ||
1172 | { /* end: all zeroes */ }, | ||
1173 | }; | ||
1174 | |||
1175 | static struct intel_uncore_ops nhmex_uncore_rbox_ops = { | ||
1176 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1177 | .enable_event = nhmex_rbox_msr_enable_event, | ||
1178 | .hw_config = nhmex_rbox_hw_config, | ||
1179 | .get_constraint = nhmex_rbox_get_constraint, | ||
1180 | .put_constraint = nhmex_rbox_put_constraint, | ||
1181 | }; | ||
1182 | |||
1183 | static struct intel_uncore_type nhmex_uncore_rbox = { | ||
1184 | .name = "rbox", | ||
1185 | .num_counters = 8, | ||
1186 | .num_boxes = 2, | ||
1187 | .perf_ctr_bits = 48, | ||
1188 | .event_ctl = NHMEX_R_MSR_PMON_CTL0, | ||
1189 | .perf_ctr = NHMEX_R_MSR_PMON_CNT0, | ||
1190 | .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, | ||
1191 | .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, | ||
1192 | .msr_offset = NHMEX_R_MSR_OFFSET, | ||
1193 | .pair_ctr_ctl = 1, | ||
1194 | .num_shared_regs = 20, | ||
1195 | .event_descs = nhmex_uncore_rbox_events, | ||
1196 | .ops = &nhmex_uncore_rbox_ops, | ||
1197 | .format_group = &nhmex_uncore_rbox_format_group | ||
1198 | }; | ||
1199 | |||
1200 | static struct intel_uncore_type *nhmex_msr_uncores[] = { | ||
1201 | &nhmex_uncore_ubox, | ||
1202 | &nhmex_uncore_cbox, | ||
1203 | &nhmex_uncore_bbox, | ||
1204 | &nhmex_uncore_sbox, | ||
1205 | &nhmex_uncore_mbox, | ||
1206 | &nhmex_uncore_rbox, | ||
1207 | &nhmex_uncore_wbox, | ||
1208 | NULL, | ||
1209 | }; | ||
1210 | |||
1211 | void nhmex_uncore_cpu_init(void) | ||
1212 | { | ||
1213 | if (boot_cpu_data.x86_model == 46) | ||
1214 | uncore_nhmex = true; | ||
1215 | else | ||
1216 | nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; | ||
1217 | if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | ||
1218 | nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | ||
1219 | uncore_msr_uncores = nhmex_msr_uncores; | ||
1220 | } | ||
1221 | /* end of Nehalem-EX uncore support */ | ||