aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKan Liang <kan.liang@intel.com>2016-08-16 16:09:50 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-10 05:18:52 -0400
commitcd34cd97b7b4336aa2c623c37daffab264c7c6ce (patch)
tree15ae623fa3d2a89b98bdea61485ff6d9402380fb
parent2668c6195685f4b6f281767d10b4f4f2e32c2305 (diff)
perf/x86/intel/uncore: Add Skylake server uncore support
This patch implements the uncore monitoring driver for Skylake server. The uncore subsystem in Skylake server is similar to previous server. There are some differences in config register encoding and pci device IDs. Besides, Skylake introduces many new boxes to reflect the MESH architecture changes. The control registers for IIO and UPI have been extended to 64 bit. This patch also introduces event_mask_ext to handle the high 32 bit mask. The CHA box number could vary for different machines. This patch gets the CHA box number by counting the CHA register space during initialization at runtime. Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Link: http://lkml.kernel.org/r/1471378190-17276-3-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/uncore.c9
-rw-r--r--arch/x86/events/intel/uncore.h3
-rw-r--r--arch/x86/events/intel/uncore_snbep.c589
3 files changed, 600 insertions, 1 deletions
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 7b3cc8be3282..d9844cc74486 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -685,7 +685,8 @@ static int uncore_pmu_event_init(struct perf_event *event)
685 /* fixed counters have event field hardcoded to zero */ 685 /* fixed counters have event field hardcoded to zero */
686 hwc->config = 0ULL; 686 hwc->config = 0ULL;
687 } else { 687 } else {
688 hwc->config = event->attr.config & pmu->type->event_mask; 688 hwc->config = event->attr.config &
689 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
689 if (pmu->type->ops->hw_config) { 690 if (pmu->type->ops->hw_config) {
690 ret = pmu->type->ops->hw_config(box, event); 691 ret = pmu->type->ops->hw_config(box, event);
691 if (ret) 692 if (ret)
@@ -1323,6 +1324,11 @@ static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1323 .pci_init = skl_uncore_pci_init, 1324 .pci_init = skl_uncore_pci_init,
1324}; 1325};
1325 1326
1327static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1328 .cpu_init = skx_uncore_cpu_init,
1329 .pci_init = skx_uncore_pci_init,
1330};
1331
1326static const struct x86_cpu_id intel_uncore_match[] __initconst = { 1332static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1327 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init), 1333 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1328 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init), 1334 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
@@ -1345,6 +1351,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1345 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), 1351 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1346 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), 1352 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1347 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), 1353 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1354 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
1348 {}, 1355 {},
1349}; 1356};
1350 1357
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index a43175fe4871..ad986c1e29bc 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -44,6 +44,7 @@ struct intel_uncore_type {
44 unsigned perf_ctr; 44 unsigned perf_ctr;
45 unsigned event_ctl; 45 unsigned event_ctl;
46 unsigned event_mask; 46 unsigned event_mask;
47 unsigned event_mask_ext;
47 unsigned fixed_ctr; 48 unsigned fixed_ctr;
48 unsigned fixed_ctl; 49 unsigned fixed_ctl;
49 unsigned box_ctl; 50 unsigned box_ctl;
@@ -381,6 +382,8 @@ int bdx_uncore_pci_init(void);
381void bdx_uncore_cpu_init(void); 382void bdx_uncore_cpu_init(void);
382int knl_uncore_pci_init(void); 383int knl_uncore_pci_init(void);
383void knl_uncore_cpu_init(void); 384void knl_uncore_cpu_init(void);
385int skx_uncore_pci_init(void);
386void skx_uncore_cpu_init(void);
384 387
385/* perf_event_intel_uncore_nhmex.c */ 388/* perf_event_intel_uncore_nhmex.c */
386void nhmex_uncore_cpu_init(void); 389void nhmex_uncore_cpu_init(void);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 3719af52843c..272427700d48 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -268,15 +268,72 @@
268 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 268 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 269 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
270 270
271/* SKX pci bus to socket mapping */
272#define SKX_CPUNODEID 0xc0
273#define SKX_GIDNIDMAP 0xd4
274
275/* SKX CHA */
276#define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
277#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
278#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
279#define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
280#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
281#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
282#define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
283#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
284#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
285#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
286#define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
287#define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
288#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
289
290/* SKX IIO */
291#define SKX_IIO0_MSR_PMON_CTL0 0xa48
292#define SKX_IIO0_MSR_PMON_CTR0 0xa41
293#define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
294#define SKX_IIO_MSR_OFFSET 0x20
295
296#define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
297#define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
298#define SKX_PMON_CTL_CH_MASK (0xff << 4)
299#define SKX_PMON_CTL_FC_MASK (0x7 << 12)
300#define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
301 SNBEP_PMON_CTL_UMASK_MASK | \
302 SNBEP_PMON_CTL_EDGE_DET | \
303 SNBEP_PMON_CTL_INVERT | \
304 SKX_PMON_CTL_TRESH_MASK)
305#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
306 SKX_PMON_CTL_CH_MASK | \
307 SKX_PMON_CTL_FC_MASK)
308
309/* SKX IRP */
310#define SKX_IRP0_MSR_PMON_CTL0 0xa5b
311#define SKX_IRP0_MSR_PMON_CTR0 0xa59
312#define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
313#define SKX_IRP_MSR_OFFSET 0x20
314
315/* SKX UPI */
316#define SKX_UPI_PCI_PMON_CTL0 0x350
317#define SKX_UPI_PCI_PMON_CTR0 0x318
318#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
319#define SKX_PMON_CTL_UMASK_EXT 0xff
320
321/* SKX M2M */
322#define SKX_M2M_PCI_PMON_CTL0 0x228
323#define SKX_M2M_PCI_PMON_CTR0 0x200
324#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
325
271DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 326DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
272DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); 327DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
273DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 328DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
274DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); 329DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
275DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 330DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
331DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39");
276DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); 332DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
277DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 333DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
278DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); 334DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
279DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 335DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
336DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
280DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 337DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
281DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29"); 338DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
282DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); 339DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
@@ -284,6 +341,8 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
284DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 341DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
285DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 342DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
286DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); 343DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
344DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
345DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
287DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 346DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
288DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); 347DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
289DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); 348DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
@@ -292,18 +351,26 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
292DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); 351DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
293DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); 352DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
294DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); 353DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
354DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
295DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 355DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
296DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); 356DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
297DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 357DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
298DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); 358DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
299DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); 359DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
300DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20"); 360DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
361DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
362DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
363DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
364DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
365DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
301DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33"); 366DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
302DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35"); 367DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
303DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37"); 368DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
304DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 369DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
305DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); 370DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
306DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60"); 371DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
372DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
373DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
307DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); 374DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
308DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); 375DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
309DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); 376DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
@@ -3209,3 +3276,525 @@ int bdx_uncore_pci_init(void)
3209} 3276}
3210 3277
3211/* end of BDX uncore support */ 3278/* end of BDX uncore support */
3279
3280/* SKX uncore support */
3281
3282static struct intel_uncore_type skx_uncore_ubox = {
3283 .name = "ubox",
3284 .num_counters = 2,
3285 .num_boxes = 1,
3286 .perf_ctr_bits = 48,
3287 .fixed_ctr_bits = 48,
3288 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3289 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3290 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3291 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3292 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3293 .ops = &ivbep_uncore_msr_ops,
3294 .format_group = &ivbep_uncore_ubox_format_group,
3295};
3296
3297static struct attribute *skx_uncore_cha_formats_attr[] = {
3298 &format_attr_event.attr,
3299 &format_attr_umask.attr,
3300 &format_attr_edge.attr,
3301 &format_attr_tid_en.attr,
3302 &format_attr_inv.attr,
3303 &format_attr_thresh8.attr,
3304 &format_attr_filter_tid4.attr,
3305 &format_attr_filter_link4.attr,
3306 &format_attr_filter_state5.attr,
3307 &format_attr_filter_rem.attr,
3308 &format_attr_filter_loc.attr,
3309 &format_attr_filter_nm.attr,
3310 &format_attr_filter_all_op.attr,
3311 &format_attr_filter_not_nm.attr,
3312 &format_attr_filter_opc_0.attr,
3313 &format_attr_filter_opc_1.attr,
3314 &format_attr_filter_nc.attr,
3315 &format_attr_filter_c6.attr,
3316 &format_attr_filter_isoc.attr,
3317 NULL,
3318};
3319
3320static struct attribute_group skx_uncore_chabox_format_group = {
3321 .name = "format",
3322 .attrs = skx_uncore_cha_formats_attr,
3323};
3324
3325static struct event_constraint skx_uncore_chabox_constraints[] = {
3326 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3327 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3328 EVENT_CONSTRAINT_END
3329};
3330
3331static struct extra_reg skx_uncore_cha_extra_regs[] = {
3332 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3333 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3334 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3335 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3336 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
3337 SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4),
3338};
3339
3340static u64 skx_cha_filter_mask(int fields)
3341{
3342 u64 mask = 0;
3343
3344 if (fields & 0x1)
3345 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3346 if (fields & 0x2)
3347 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3348 if (fields & 0x4)
3349 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3350 return mask;
3351}
3352
3353static struct event_constraint *
3354skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3355{
3356 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3357}
3358
3359static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3360{
3361 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3362 struct extra_reg *er;
3363 int idx = 0;
3364
3365 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3366 if (er->event != (event->hw.config & er->config_mask))
3367 continue;
3368 idx |= er->idx;
3369 }
3370
3371 if (idx) {
3372 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3373 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3374 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3375 reg1->idx = idx;
3376 }
3377 return 0;
3378}
3379
3380static struct intel_uncore_ops skx_uncore_chabox_ops = {
3381 /* There is no frz_en for chabox ctl */
3382 .init_box = ivbep_uncore_msr_init_box,
3383 .disable_box = snbep_uncore_msr_disable_box,
3384 .enable_box = snbep_uncore_msr_enable_box,
3385 .disable_event = snbep_uncore_msr_disable_event,
3386 .enable_event = hswep_cbox_enable_event,
3387 .read_counter = uncore_msr_read_counter,
3388 .hw_config = skx_cha_hw_config,
3389 .get_constraint = skx_cha_get_constraint,
3390 .put_constraint = snbep_cbox_put_constraint,
3391};
3392
3393static struct intel_uncore_type skx_uncore_chabox = {
3394 .name = "cha",
3395 .num_counters = 4,
3396 .perf_ctr_bits = 48,
3397 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3398 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3399 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3400 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3401 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3402 .num_shared_regs = 1,
3403 .constraints = skx_uncore_chabox_constraints,
3404 .ops = &skx_uncore_chabox_ops,
3405 .format_group = &skx_uncore_chabox_format_group,
3406};
3407
3408static struct attribute *skx_uncore_iio_formats_attr[] = {
3409 &format_attr_event.attr,
3410 &format_attr_umask.attr,
3411 &format_attr_edge.attr,
3412 &format_attr_inv.attr,
3413 &format_attr_thresh9.attr,
3414 &format_attr_ch_mask.attr,
3415 &format_attr_fc_mask.attr,
3416 NULL,
3417};
3418
3419static struct attribute_group skx_uncore_iio_format_group = {
3420 .name = "format",
3421 .attrs = skx_uncore_iio_formats_attr,
3422};
3423
3424static struct event_constraint skx_uncore_iio_constraints[] = {
3425 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3426 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3427 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3428 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3429 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3430 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3431 EVENT_CONSTRAINT_END
3432};
3433
3434static void skx_iio_enable_event(struct intel_uncore_box *box,
3435 struct perf_event *event)
3436{
3437 struct hw_perf_event *hwc = &event->hw;
3438
3439 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3440}
3441
3442static struct intel_uncore_ops skx_uncore_iio_ops = {
3443 .init_box = ivbep_uncore_msr_init_box,
3444 .disable_box = snbep_uncore_msr_disable_box,
3445 .enable_box = snbep_uncore_msr_enable_box,
3446 .disable_event = snbep_uncore_msr_disable_event,
3447 .enable_event = skx_iio_enable_event,
3448 .read_counter = uncore_msr_read_counter,
3449};
3450
3451static struct intel_uncore_type skx_uncore_iio = {
3452 .name = "iio",
3453 .num_counters = 4,
3454 .num_boxes = 5,
3455 .perf_ctr_bits = 48,
3456 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3457 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3458 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3459 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3460 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3461 .msr_offset = SKX_IIO_MSR_OFFSET,
3462 .constraints = skx_uncore_iio_constraints,
3463 .ops = &skx_uncore_iio_ops,
3464 .format_group = &skx_uncore_iio_format_group,
3465};
3466
3467static struct attribute *skx_uncore_formats_attr[] = {
3468 &format_attr_event.attr,
3469 &format_attr_umask.attr,
3470 &format_attr_edge.attr,
3471 &format_attr_inv.attr,
3472 &format_attr_thresh8.attr,
3473 NULL,
3474};
3475
3476static struct attribute_group skx_uncore_format_group = {
3477 .name = "format",
3478 .attrs = skx_uncore_formats_attr,
3479};
3480
3481static struct intel_uncore_type skx_uncore_irp = {
3482 .name = "irp",
3483 .num_counters = 2,
3484 .num_boxes = 5,
3485 .perf_ctr_bits = 48,
3486 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3487 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3488 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3489 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3490 .msr_offset = SKX_IRP_MSR_OFFSET,
3491 .ops = &skx_uncore_iio_ops,
3492 .format_group = &skx_uncore_format_group,
3493};
3494
3495static struct intel_uncore_ops skx_uncore_pcu_ops = {
3496 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3497 .hw_config = hswep_pcu_hw_config,
3498 .get_constraint = snbep_pcu_get_constraint,
3499 .put_constraint = snbep_pcu_put_constraint,
3500};
3501
3502static struct intel_uncore_type skx_uncore_pcu = {
3503 .name = "pcu",
3504 .num_counters = 4,
3505 .num_boxes = 1,
3506 .perf_ctr_bits = 48,
3507 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
3508 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
3509 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3510 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
3511 .num_shared_regs = 1,
3512 .ops = &skx_uncore_pcu_ops,
3513 .format_group = &snbep_uncore_pcu_format_group,
3514};
3515
3516static struct intel_uncore_type *skx_msr_uncores[] = {
3517 &skx_uncore_ubox,
3518 &skx_uncore_chabox,
3519 &skx_uncore_iio,
3520 &skx_uncore_irp,
3521 &skx_uncore_pcu,
3522 NULL,
3523};
3524
3525static int skx_count_chabox(void)
3526{
3527 struct pci_dev *chabox_dev = NULL;
3528 int bus, count = 0;
3529
3530 while (1) {
3531 chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
3532 if (!chabox_dev)
3533 break;
3534 if (count == 0)
3535 bus = chabox_dev->bus->number;
3536 if (bus != chabox_dev->bus->number)
3537 break;
3538 count++;
3539 }
3540
3541 pci_dev_put(chabox_dev);
3542 return count;
3543}
3544
3545void skx_uncore_cpu_init(void)
3546{
3547 skx_uncore_chabox.num_boxes = skx_count_chabox();
3548 uncore_msr_uncores = skx_msr_uncores;
3549}
3550
3551static struct intel_uncore_type skx_uncore_imc = {
3552 .name = "imc",
3553 .num_counters = 4,
3554 .num_boxes = 6,
3555 .perf_ctr_bits = 48,
3556 .fixed_ctr_bits = 48,
3557 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3558 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3559 .event_descs = hswep_uncore_imc_events,
3560 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3561 .event_ctl = SNBEP_PCI_PMON_CTL0,
3562 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3563 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3564 .ops = &ivbep_uncore_pci_ops,
3565 .format_group = &skx_uncore_format_group,
3566};
3567
3568static struct attribute *skx_upi_uncore_formats_attr[] = {
3569 &format_attr_event_ext.attr,
3570 &format_attr_umask_ext.attr,
3571 &format_attr_edge.attr,
3572 &format_attr_inv.attr,
3573 &format_attr_thresh8.attr,
3574 NULL,
3575};
3576
3577static struct attribute_group skx_upi_uncore_format_group = {
3578 .name = "format",
3579 .attrs = skx_upi_uncore_formats_attr,
3580};
3581
3582static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3583{
3584 struct pci_dev *pdev = box->pci_dev;
3585
3586 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3587 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3588}
3589
3590static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3591 .init_box = skx_upi_uncore_pci_init_box,
3592 .disable_box = snbep_uncore_pci_disable_box,
3593 .enable_box = snbep_uncore_pci_enable_box,
3594 .disable_event = snbep_uncore_pci_disable_event,
3595 .enable_event = snbep_uncore_pci_enable_event,
3596 .read_counter = snbep_uncore_pci_read_counter,
3597};
3598
3599static struct intel_uncore_type skx_uncore_upi = {
3600 .name = "upi",
3601 .num_counters = 4,
3602 .num_boxes = 3,
3603 .perf_ctr_bits = 48,
3604 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
3605 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
3606 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3607 .event_mask_ext = SKX_PMON_CTL_UMASK_EXT,
3608 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
3609 .ops = &skx_upi_uncore_pci_ops,
3610 .format_group = &skx_upi_uncore_format_group,
3611};
3612
3613static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3614{
3615 struct pci_dev *pdev = box->pci_dev;
3616
3617 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3618 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3619}
3620
3621static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3622 .init_box = skx_m2m_uncore_pci_init_box,
3623 .disable_box = snbep_uncore_pci_disable_box,
3624 .enable_box = snbep_uncore_pci_enable_box,
3625 .disable_event = snbep_uncore_pci_disable_event,
3626 .enable_event = snbep_uncore_pci_enable_event,
3627 .read_counter = snbep_uncore_pci_read_counter,
3628};
3629
3630static struct intel_uncore_type skx_uncore_m2m = {
3631 .name = "m2m",
3632 .num_counters = 4,
3633 .num_boxes = 2,
3634 .perf_ctr_bits = 48,
3635 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
3636 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
3637 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3638 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
3639 .ops = &skx_m2m_uncore_pci_ops,
3640 .format_group = &skx_uncore_format_group,
3641};
3642
3643static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3644 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3645 EVENT_CONSTRAINT_END
3646};
3647
3648static struct intel_uncore_type skx_uncore_m2pcie = {
3649 .name = "m2pcie",
3650 .num_counters = 4,
3651 .num_boxes = 4,
3652 .perf_ctr_bits = 48,
3653 .constraints = skx_uncore_m2pcie_constraints,
3654 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3655 .event_ctl = SNBEP_PCI_PMON_CTL0,
3656 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3657 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3658 .ops = &ivbep_uncore_pci_ops,
3659 .format_group = &skx_uncore_format_group,
3660};
3661
3662static struct event_constraint skx_uncore_m3upi_constraints[] = {
3663 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3664 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3665 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3666 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3667 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3668 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3669 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3670 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3671 EVENT_CONSTRAINT_END
3672};
3673
3674static struct intel_uncore_type skx_uncore_m3upi = {
3675 .name = "m3upi",
3676 .num_counters = 3,
3677 .num_boxes = 3,
3678 .perf_ctr_bits = 48,
3679 .constraints = skx_uncore_m3upi_constraints,
3680 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3681 .event_ctl = SNBEP_PCI_PMON_CTL0,
3682 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3683 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3684 .ops = &ivbep_uncore_pci_ops,
3685 .format_group = &skx_uncore_format_group,
3686};
3687
3688enum {
3689 SKX_PCI_UNCORE_IMC,
3690 SKX_PCI_UNCORE_M2M,
3691 SKX_PCI_UNCORE_UPI,
3692 SKX_PCI_UNCORE_M2PCIE,
3693 SKX_PCI_UNCORE_M3UPI,
3694};
3695
3696static struct intel_uncore_type *skx_pci_uncores[] = {
3697 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
3698 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
3699 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
3700 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3701 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
3702 NULL,
3703};
3704
3705static const struct pci_device_id skx_uncore_pci_ids[] = {
3706 { /* MC0 Channel 0 */
3707 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3708 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3709 },
3710 { /* MC0 Channel 1 */
3711 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3712 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3713 },
3714 { /* MC0 Channel 2 */
3715 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3716 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3717 },
3718 { /* MC1 Channel 0 */
3719 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3720 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3721 },
3722 { /* MC1 Channel 1 */
3723 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3724 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3725 },
3726 { /* MC1 Channel 2 */
3727 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3728 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3729 },
3730 { /* M2M0 */
3731 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3732 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3733 },
3734 { /* M2M1 */
3735 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3736 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3737 },
3738 { /* UPI0 Link 0 */
3739 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3740 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3741 },
3742 { /* UPI0 Link 1 */
3743 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3744 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3745 },
3746 { /* UPI1 Link 2 */
3747 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3748 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3749 },
3750 { /* M2PCIe 0 */
3751 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3752 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3753 },
3754 { /* M2PCIe 1 */
3755 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3756 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3757 },
3758 { /* M2PCIe 2 */
3759 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3760 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3761 },
3762 { /* M2PCIe 3 */
3763 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3764 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3765 },
3766 { /* M3UPI0 Link 0 */
3767 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3768 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
3769 },
3770 { /* M3UPI0 Link 1 */
3771 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3772 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
3773 },
3774 { /* M3UPI1 Link 2 */
3775 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3776 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
3777 },
3778 { /* end: all zeroes */ }
3779};
3780
3781
3782static struct pci_driver skx_uncore_pci_driver = {
3783 .name = "skx_uncore",
3784 .id_table = skx_uncore_pci_ids,
3785};
3786
3787int skx_uncore_pci_init(void)
3788{
3789 /* need to double check pci address */
3790 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3791
3792 if (ret)
3793 return ret;
3794
3795 uncore_pci_uncores = skx_pci_uncores;
3796 uncore_pci_driver = &skx_uncore_pci_driver;
3797 return 0;
3798}
3799
3800/* end of SKX uncore support */