aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2014-07-30 03:22:14 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-13 01:51:07 -0400
commit8268fdfc45b747bcb3351464efefbdf611aeea9b (patch)
treeb39d4603d727c8bf726423abc0bec6f433957105
parent92807ffdf32c380a09cfa396c853e97303826103 (diff)
perf/x86/uncore: Move SNB/IVB-EP specific code to seperate file
Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1406704935-27708-3-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c1465
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h164
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c1644
4 files changed, 1655 insertions, 1619 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 5ca8f4a39106..7dee8664573a 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -37,6 +37,7 @@ endif
37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
38obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 38obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
39obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o 39obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o
40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o
40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o 41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
41endif 42endif
42 43
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index feb085c8ca4e..cf6966a37580 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -32,46 +32,10 @@ ssize_t uncore_event_show(struct kobject *kobj,
32 ((1ULL << (n)) - 1))) 32 ((1ULL << (n)) - 1)))
33 33
34DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 34DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
35DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
36DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 35DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
37DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 36DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
38DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
39DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 37DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
40DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 38DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
41DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
42DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
43DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
44DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
45DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
46DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
47DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
48DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
49DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
50DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
51DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
52DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
53DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
54DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
55DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
56DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
57DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
58DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
59DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
60DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
61DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
62DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
63DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
64DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
65DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
66DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
67DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
68DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
69DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
70DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
71DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
72DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
73DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
74DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
75 39
76struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 40struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
77{ 41{
@@ -194,1415 +158,6 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
194 return config; 158 return config;
195} 159}
196 160
197/* Sandy Bridge-EP uncore support */
198static struct intel_uncore_type snbep_uncore_cbox;
199static struct intel_uncore_type snbep_uncore_pcu;
200
201static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
202{
203 struct pci_dev *pdev = box->pci_dev;
204 int box_ctl = uncore_pci_box_ctl(box);
205 u32 config = 0;
206
207 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
208 config |= SNBEP_PMON_BOX_CTL_FRZ;
209 pci_write_config_dword(pdev, box_ctl, config);
210 }
211}
212
213static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
214{
215 struct pci_dev *pdev = box->pci_dev;
216 int box_ctl = uncore_pci_box_ctl(box);
217 u32 config = 0;
218
219 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
220 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
221 pci_write_config_dword(pdev, box_ctl, config);
222 }
223}
224
225static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
226{
227 struct pci_dev *pdev = box->pci_dev;
228 struct hw_perf_event *hwc = &event->hw;
229
230 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
231}
232
233static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
234{
235 struct pci_dev *pdev = box->pci_dev;
236 struct hw_perf_event *hwc = &event->hw;
237
238 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
239}
240
241static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
242{
243 struct pci_dev *pdev = box->pci_dev;
244 struct hw_perf_event *hwc = &event->hw;
245 u64 count = 0;
246
247 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
248 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
249
250 return count;
251}
252
253static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
254{
255 struct pci_dev *pdev = box->pci_dev;
256
257 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
258}
259
260static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
261{
262 u64 config;
263 unsigned msr;
264
265 msr = uncore_msr_box_ctl(box);
266 if (msr) {
267 rdmsrl(msr, config);
268 config |= SNBEP_PMON_BOX_CTL_FRZ;
269 wrmsrl(msr, config);
270 }
271}
272
273static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
274{
275 u64 config;
276 unsigned msr;
277
278 msr = uncore_msr_box_ctl(box);
279 if (msr) {
280 rdmsrl(msr, config);
281 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
282 wrmsrl(msr, config);
283 }
284}
285
286static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
287{
288 struct hw_perf_event *hwc = &event->hw;
289 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
290
291 if (reg1->idx != EXTRA_REG_NONE)
292 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
293
294 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
295}
296
297static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
298 struct perf_event *event)
299{
300 struct hw_perf_event *hwc = &event->hw;
301
302 wrmsrl(hwc->config_base, hwc->config);
303}
304
305static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
306{
307 unsigned msr = uncore_msr_box_ctl(box);
308
309 if (msr)
310 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
311}
312
313static struct attribute *snbep_uncore_formats_attr[] = {
314 &format_attr_event.attr,
315 &format_attr_umask.attr,
316 &format_attr_edge.attr,
317 &format_attr_inv.attr,
318 &format_attr_thresh8.attr,
319 NULL,
320};
321
322static struct attribute *snbep_uncore_ubox_formats_attr[] = {
323 &format_attr_event.attr,
324 &format_attr_umask.attr,
325 &format_attr_edge.attr,
326 &format_attr_inv.attr,
327 &format_attr_thresh5.attr,
328 NULL,
329};
330
331static struct attribute *snbep_uncore_cbox_formats_attr[] = {
332 &format_attr_event.attr,
333 &format_attr_umask.attr,
334 &format_attr_edge.attr,
335 &format_attr_tid_en.attr,
336 &format_attr_inv.attr,
337 &format_attr_thresh8.attr,
338 &format_attr_filter_tid.attr,
339 &format_attr_filter_nid.attr,
340 &format_attr_filter_state.attr,
341 &format_attr_filter_opc.attr,
342 NULL,
343};
344
345static struct attribute *snbep_uncore_pcu_formats_attr[] = {
346 &format_attr_event_ext.attr,
347 &format_attr_occ_sel.attr,
348 &format_attr_edge.attr,
349 &format_attr_inv.attr,
350 &format_attr_thresh5.attr,
351 &format_attr_occ_invert.attr,
352 &format_attr_occ_edge.attr,
353 &format_attr_filter_band0.attr,
354 &format_attr_filter_band1.attr,
355 &format_attr_filter_band2.attr,
356 &format_attr_filter_band3.attr,
357 NULL,
358};
359
360static struct attribute *snbep_uncore_qpi_formats_attr[] = {
361 &format_attr_event_ext.attr,
362 &format_attr_umask.attr,
363 &format_attr_edge.attr,
364 &format_attr_inv.attr,
365 &format_attr_thresh8.attr,
366 &format_attr_match_rds.attr,
367 &format_attr_match_rnid30.attr,
368 &format_attr_match_rnid4.attr,
369 &format_attr_match_dnid.attr,
370 &format_attr_match_mc.attr,
371 &format_attr_match_opc.attr,
372 &format_attr_match_vnw.attr,
373 &format_attr_match0.attr,
374 &format_attr_match1.attr,
375 &format_attr_mask_rds.attr,
376 &format_attr_mask_rnid30.attr,
377 &format_attr_mask_rnid4.attr,
378 &format_attr_mask_dnid.attr,
379 &format_attr_mask_mc.attr,
380 &format_attr_mask_opc.attr,
381 &format_attr_mask_vnw.attr,
382 &format_attr_mask0.attr,
383 &format_attr_mask1.attr,
384 NULL,
385};
386
387static struct uncore_event_desc snbep_uncore_imc_events[] = {
388 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
389 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
390 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
391 { /* end: all zeroes */ },
392};
393
394static struct uncore_event_desc snbep_uncore_qpi_events[] = {
395 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
396 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
397 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
398 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
399 { /* end: all zeroes */ },
400};
401
402static struct attribute_group snbep_uncore_format_group = {
403 .name = "format",
404 .attrs = snbep_uncore_formats_attr,
405};
406
407static struct attribute_group snbep_uncore_ubox_format_group = {
408 .name = "format",
409 .attrs = snbep_uncore_ubox_formats_attr,
410};
411
412static struct attribute_group snbep_uncore_cbox_format_group = {
413 .name = "format",
414 .attrs = snbep_uncore_cbox_formats_attr,
415};
416
417static struct attribute_group snbep_uncore_pcu_format_group = {
418 .name = "format",
419 .attrs = snbep_uncore_pcu_formats_attr,
420};
421
422static struct attribute_group snbep_uncore_qpi_format_group = {
423 .name = "format",
424 .attrs = snbep_uncore_qpi_formats_attr,
425};
426
427#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
428 .init_box = snbep_uncore_msr_init_box, \
429 .disable_box = snbep_uncore_msr_disable_box, \
430 .enable_box = snbep_uncore_msr_enable_box, \
431 .disable_event = snbep_uncore_msr_disable_event, \
432 .enable_event = snbep_uncore_msr_enable_event, \
433 .read_counter = uncore_msr_read_counter
434
435static struct intel_uncore_ops snbep_uncore_msr_ops = {
436 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
437};
438
439#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
440 .init_box = snbep_uncore_pci_init_box, \
441 .disable_box = snbep_uncore_pci_disable_box, \
442 .enable_box = snbep_uncore_pci_enable_box, \
443 .disable_event = snbep_uncore_pci_disable_event, \
444 .read_counter = snbep_uncore_pci_read_counter
445
446static struct intel_uncore_ops snbep_uncore_pci_ops = {
447 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
448 .enable_event = snbep_uncore_pci_enable_event, \
449};
450
451static struct event_constraint snbep_uncore_cbox_constraints[] = {
452 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
453 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
454 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
458 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
459 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
461 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
462 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
463 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
464 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
465 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
466 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
473 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
474 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
477 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
478 EVENT_CONSTRAINT_END
479};
480
481static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
482 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
483 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
484 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
485 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
486 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
487 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
488 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
489 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
490 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
491 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
492 EVENT_CONSTRAINT_END
493};
494
495static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
496 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
497 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
498 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
499 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
500 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
501 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
502 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
503 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
504 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
505 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
506 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
507 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
508 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
509 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
510 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
511 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
512 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
513 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
514 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
515 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
516 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
517 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
518 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
519 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
520 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
521 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
522 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
523 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
524 EVENT_CONSTRAINT_END
525};
526
527static struct intel_uncore_type snbep_uncore_ubox = {
528 .name = "ubox",
529 .num_counters = 2,
530 .num_boxes = 1,
531 .perf_ctr_bits = 44,
532 .fixed_ctr_bits = 48,
533 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
534 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
535 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
536 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
537 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
538 .ops = &snbep_uncore_msr_ops,
539 .format_group = &snbep_uncore_ubox_format_group,
540};
541
542static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
543 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
544 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
545 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
546 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
547 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
548 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
549 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
566 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
567 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
568 EVENT_EXTRA_END
569};
570
571static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
572{
573 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
574 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
575 int i;
576
577 if (uncore_box_is_fake(box))
578 return;
579
580 for (i = 0; i < 5; i++) {
581 if (reg1->alloc & (0x1 << i))
582 atomic_sub(1 << (i * 6), &er->ref);
583 }
584 reg1->alloc = 0;
585}
586
587static struct event_constraint *
588__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
589 u64 (*cbox_filter_mask)(int fields))
590{
591 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
592 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
593 int i, alloc = 0;
594 unsigned long flags;
595 u64 mask;
596
597 if (reg1->idx == EXTRA_REG_NONE)
598 return NULL;
599
600 raw_spin_lock_irqsave(&er->lock, flags);
601 for (i = 0; i < 5; i++) {
602 if (!(reg1->idx & (0x1 << i)))
603 continue;
604 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
605 continue;
606
607 mask = cbox_filter_mask(0x1 << i);
608 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
609 !((reg1->config ^ er->config) & mask)) {
610 atomic_add(1 << (i * 6), &er->ref);
611 er->config &= ~mask;
612 er->config |= reg1->config & mask;
613 alloc |= (0x1 << i);
614 } else {
615 break;
616 }
617 }
618 raw_spin_unlock_irqrestore(&er->lock, flags);
619 if (i < 5)
620 goto fail;
621
622 if (!uncore_box_is_fake(box))
623 reg1->alloc |= alloc;
624
625 return NULL;
626fail:
627 for (; i >= 0; i--) {
628 if (alloc & (0x1 << i))
629 atomic_sub(1 << (i * 6), &er->ref);
630 }
631 return &uncore_constraint_empty;
632}
633
634static u64 snbep_cbox_filter_mask(int fields)
635{
636 u64 mask = 0;
637
638 if (fields & 0x1)
639 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
640 if (fields & 0x2)
641 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
642 if (fields & 0x4)
643 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
644 if (fields & 0x8)
645 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
646
647 return mask;
648}
649
650static struct event_constraint *
651snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
652{
653 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
654}
655
656static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
657{
658 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
659 struct extra_reg *er;
660 int idx = 0;
661
662 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
663 if (er->event != (event->hw.config & er->config_mask))
664 continue;
665 idx |= er->idx;
666 }
667
668 if (idx) {
669 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
670 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
671 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
672 reg1->idx = idx;
673 }
674 return 0;
675}
676
677static struct intel_uncore_ops snbep_uncore_cbox_ops = {
678 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
679 .hw_config = snbep_cbox_hw_config,
680 .get_constraint = snbep_cbox_get_constraint,
681 .put_constraint = snbep_cbox_put_constraint,
682};
683
684static struct intel_uncore_type snbep_uncore_cbox = {
685 .name = "cbox",
686 .num_counters = 4,
687 .num_boxes = 8,
688 .perf_ctr_bits = 44,
689 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
690 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
691 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
692 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
693 .msr_offset = SNBEP_CBO_MSR_OFFSET,
694 .num_shared_regs = 1,
695 .constraints = snbep_uncore_cbox_constraints,
696 .ops = &snbep_uncore_cbox_ops,
697 .format_group = &snbep_uncore_cbox_format_group,
698};
699
700static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
701{
702 struct hw_perf_event *hwc = &event->hw;
703 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
704 u64 config = reg1->config;
705
706 if (new_idx > reg1->idx)
707 config <<= 8 * (new_idx - reg1->idx);
708 else
709 config >>= 8 * (reg1->idx - new_idx);
710
711 if (modify) {
712 hwc->config += new_idx - reg1->idx;
713 reg1->config = config;
714 reg1->idx = new_idx;
715 }
716 return config;
717}
718
719static struct event_constraint *
720snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
721{
722 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
723 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
724 unsigned long flags;
725 int idx = reg1->idx;
726 u64 mask, config1 = reg1->config;
727 bool ok = false;
728
729 if (reg1->idx == EXTRA_REG_NONE ||
730 (!uncore_box_is_fake(box) && reg1->alloc))
731 return NULL;
732again:
733 mask = 0xffULL << (idx * 8);
734 raw_spin_lock_irqsave(&er->lock, flags);
735 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
736 !((config1 ^ er->config) & mask)) {
737 atomic_add(1 << (idx * 8), &er->ref);
738 er->config &= ~mask;
739 er->config |= config1 & mask;
740 ok = true;
741 }
742 raw_spin_unlock_irqrestore(&er->lock, flags);
743
744 if (!ok) {
745 idx = (idx + 1) % 4;
746 if (idx != reg1->idx) {
747 config1 = snbep_pcu_alter_er(event, idx, false);
748 goto again;
749 }
750 return &uncore_constraint_empty;
751 }
752
753 if (!uncore_box_is_fake(box)) {
754 if (idx != reg1->idx)
755 snbep_pcu_alter_er(event, idx, true);
756 reg1->alloc = 1;
757 }
758 return NULL;
759}
760
761static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
762{
763 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
764 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
765
766 if (uncore_box_is_fake(box) || !reg1->alloc)
767 return;
768
769 atomic_sub(1 << (reg1->idx * 8), &er->ref);
770 reg1->alloc = 0;
771}
772
773static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
774{
775 struct hw_perf_event *hwc = &event->hw;
776 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
777 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
778
779 if (ev_sel >= 0xb && ev_sel <= 0xe) {
780 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
781 reg1->idx = ev_sel - 0xb;
782 reg1->config = event->attr.config1 & (0xff << reg1->idx);
783 }
784 return 0;
785}
786
787static struct intel_uncore_ops snbep_uncore_pcu_ops = {
788 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
789 .hw_config = snbep_pcu_hw_config,
790 .get_constraint = snbep_pcu_get_constraint,
791 .put_constraint = snbep_pcu_put_constraint,
792};
793
794static struct intel_uncore_type snbep_uncore_pcu = {
795 .name = "pcu",
796 .num_counters = 4,
797 .num_boxes = 1,
798 .perf_ctr_bits = 48,
799 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
800 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
801 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
802 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
803 .num_shared_regs = 1,
804 .ops = &snbep_uncore_pcu_ops,
805 .format_group = &snbep_uncore_pcu_format_group,
806};
807
808static struct intel_uncore_type *snbep_msr_uncores[] = {
809 &snbep_uncore_ubox,
810 &snbep_uncore_cbox,
811 &snbep_uncore_pcu,
812 NULL,
813};
814
815enum {
816 SNBEP_PCI_QPI_PORT0_FILTER,
817 SNBEP_PCI_QPI_PORT1_FILTER,
818};
819
820static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
821{
822 struct hw_perf_event *hwc = &event->hw;
823 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
824 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
825
826 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
827 reg1->idx = 0;
828 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
829 reg1->config = event->attr.config1;
830 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
831 reg2->config = event->attr.config2;
832 }
833 return 0;
834}
835
836static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
837{
838 struct pci_dev *pdev = box->pci_dev;
839 struct hw_perf_event *hwc = &event->hw;
840 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
841 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
842
843 if (reg1->idx != EXTRA_REG_NONE) {
844 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
845 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
846 WARN_ON_ONCE(!filter_pdev);
847 if (filter_pdev) {
848 pci_write_config_dword(filter_pdev, reg1->reg,
849 (u32)reg1->config);
850 pci_write_config_dword(filter_pdev, reg1->reg + 4,
851 (u32)(reg1->config >> 32));
852 pci_write_config_dword(filter_pdev, reg2->reg,
853 (u32)reg2->config);
854 pci_write_config_dword(filter_pdev, reg2->reg + 4,
855 (u32)(reg2->config >> 32));
856 }
857 }
858
859 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
860}
861
862static struct intel_uncore_ops snbep_uncore_qpi_ops = {
863 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
864 .enable_event = snbep_qpi_enable_event,
865 .hw_config = snbep_qpi_hw_config,
866 .get_constraint = uncore_get_constraint,
867 .put_constraint = uncore_put_constraint,
868};
869
870#define SNBEP_UNCORE_PCI_COMMON_INIT() \
871 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
872 .event_ctl = SNBEP_PCI_PMON_CTL0, \
873 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
874 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
875 .ops = &snbep_uncore_pci_ops, \
876 .format_group = &snbep_uncore_format_group
877
878static struct intel_uncore_type snbep_uncore_ha = {
879 .name = "ha",
880 .num_counters = 4,
881 .num_boxes = 1,
882 .perf_ctr_bits = 48,
883 SNBEP_UNCORE_PCI_COMMON_INIT(),
884};
885
886static struct intel_uncore_type snbep_uncore_imc = {
887 .name = "imc",
888 .num_counters = 4,
889 .num_boxes = 4,
890 .perf_ctr_bits = 48,
891 .fixed_ctr_bits = 48,
892 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
893 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
894 .event_descs = snbep_uncore_imc_events,
895 SNBEP_UNCORE_PCI_COMMON_INIT(),
896};
897
898static struct intel_uncore_type snbep_uncore_qpi = {
899 .name = "qpi",
900 .num_counters = 4,
901 .num_boxes = 2,
902 .perf_ctr_bits = 48,
903 .perf_ctr = SNBEP_PCI_PMON_CTR0,
904 .event_ctl = SNBEP_PCI_PMON_CTL0,
905 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
906 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
907 .num_shared_regs = 1,
908 .ops = &snbep_uncore_qpi_ops,
909 .event_descs = snbep_uncore_qpi_events,
910 .format_group = &snbep_uncore_qpi_format_group,
911};
912
913
914static struct intel_uncore_type snbep_uncore_r2pcie = {
915 .name = "r2pcie",
916 .num_counters = 4,
917 .num_boxes = 1,
918 .perf_ctr_bits = 44,
919 .constraints = snbep_uncore_r2pcie_constraints,
920 SNBEP_UNCORE_PCI_COMMON_INIT(),
921};
922
923static struct intel_uncore_type snbep_uncore_r3qpi = {
924 .name = "r3qpi",
925 .num_counters = 3,
926 .num_boxes = 2,
927 .perf_ctr_bits = 44,
928 .constraints = snbep_uncore_r3qpi_constraints,
929 SNBEP_UNCORE_PCI_COMMON_INIT(),
930};
931
932enum {
933 SNBEP_PCI_UNCORE_HA,
934 SNBEP_PCI_UNCORE_IMC,
935 SNBEP_PCI_UNCORE_QPI,
936 SNBEP_PCI_UNCORE_R2PCIE,
937 SNBEP_PCI_UNCORE_R3QPI,
938};
939
940static struct intel_uncore_type *snbep_pci_uncores[] = {
941 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
942 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
943 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
944 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
945 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
946 NULL,
947};
948
949static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
950 { /* Home Agent */
951 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
952 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
953 },
954 { /* MC Channel 0 */
955 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
956 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
957 },
958 { /* MC Channel 1 */
959 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
960 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
961 },
962 { /* MC Channel 2 */
963 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
964 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
965 },
966 { /* MC Channel 3 */
967 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
968 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
969 },
970 { /* QPI Port 0 */
971 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
972 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
973 },
974 { /* QPI Port 1 */
975 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
976 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
977 },
978 { /* R2PCIe */
979 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
980 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
981 },
982 { /* R3QPI Link 0 */
983 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
984 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
985 },
986 { /* R3QPI Link 1 */
987 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
988 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
989 },
990 { /* QPI Port 0 filter */
991 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
992 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
993 SNBEP_PCI_QPI_PORT0_FILTER),
994 },
995 { /* QPI Port 0 filter */
996 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
997 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
998 SNBEP_PCI_QPI_PORT1_FILTER),
999 },
1000 { /* end: all zeroes */ }
1001};
1002
1003static struct pci_driver snbep_uncore_pci_driver = {
1004 .name = "snbep_uncore",
1005 .id_table = snbep_uncore_pci_ids,
1006};
1007
1008/*
1009 * build pci bus to socket mapping
1010 */
1011static int snbep_pci2phy_map_init(int devid)
1012{
1013 struct pci_dev *ubox_dev = NULL;
1014 int i, bus, nodeid;
1015 int err = 0;
1016 u32 config = 0;
1017
1018 while (1) {
1019 /* find the UBOX device */
1020 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1021 if (!ubox_dev)
1022 break;
1023 bus = ubox_dev->bus->number;
1024 /* get the Node ID of the local register */
1025 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1026 if (err)
1027 break;
1028 nodeid = config;
1029 /* get the Node ID mapping */
1030 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1031 if (err)
1032 break;
1033 /*
1034 * every three bits in the Node ID mapping register maps
1035 * to a particular node.
1036 */
1037 for (i = 0; i < 8; i++) {
1038 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1039 uncore_pcibus_to_physid[bus] = i;
1040 break;
1041 }
1042 }
1043 }
1044
1045 if (!err) {
1046 /*
1047 * For PCI bus with no UBOX device, find the next bus
1048 * that has UBOX device and use its mapping.
1049 */
1050 i = -1;
1051 for (bus = 255; bus >= 0; bus--) {
1052 if (uncore_pcibus_to_physid[bus] >= 0)
1053 i = uncore_pcibus_to_physid[bus];
1054 else
1055 uncore_pcibus_to_physid[bus] = i;
1056 }
1057 }
1058
1059 if (ubox_dev)
1060 pci_dev_put(ubox_dev);
1061
1062 return err ? pcibios_err_to_errno(err) : 0;
1063}
1064/* end of Sandy Bridge-EP uncore support */
1065
1066/* IvyTown uncore support */
1067static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1068{
1069 unsigned msr = uncore_msr_box_ctl(box);
1070 if (msr)
1071 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1072}
1073
1074static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1075{
1076 struct pci_dev *pdev = box->pci_dev;
1077
1078 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1079}
1080
1081#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1082 .init_box = ivt_uncore_msr_init_box, \
1083 .disable_box = snbep_uncore_msr_disable_box, \
1084 .enable_box = snbep_uncore_msr_enable_box, \
1085 .disable_event = snbep_uncore_msr_disable_event, \
1086 .enable_event = snbep_uncore_msr_enable_event, \
1087 .read_counter = uncore_msr_read_counter
1088
1089static struct intel_uncore_ops ivt_uncore_msr_ops = {
1090 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1091};
1092
1093static struct intel_uncore_ops ivt_uncore_pci_ops = {
1094 .init_box = ivt_uncore_pci_init_box,
1095 .disable_box = snbep_uncore_pci_disable_box,
1096 .enable_box = snbep_uncore_pci_enable_box,
1097 .disable_event = snbep_uncore_pci_disable_event,
1098 .enable_event = snbep_uncore_pci_enable_event,
1099 .read_counter = snbep_uncore_pci_read_counter,
1100};
1101
1102#define IVT_UNCORE_PCI_COMMON_INIT() \
1103 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1104 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1105 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1106 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1107 .ops = &ivt_uncore_pci_ops, \
1108 .format_group = &ivt_uncore_format_group
1109
1110static struct attribute *ivt_uncore_formats_attr[] = {
1111 &format_attr_event.attr,
1112 &format_attr_umask.attr,
1113 &format_attr_edge.attr,
1114 &format_attr_inv.attr,
1115 &format_attr_thresh8.attr,
1116 NULL,
1117};
1118
1119static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1120 &format_attr_event.attr,
1121 &format_attr_umask.attr,
1122 &format_attr_edge.attr,
1123 &format_attr_inv.attr,
1124 &format_attr_thresh5.attr,
1125 NULL,
1126};
1127
1128static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1129 &format_attr_event.attr,
1130 &format_attr_umask.attr,
1131 &format_attr_edge.attr,
1132 &format_attr_tid_en.attr,
1133 &format_attr_thresh8.attr,
1134 &format_attr_filter_tid.attr,
1135 &format_attr_filter_link.attr,
1136 &format_attr_filter_state2.attr,
1137 &format_attr_filter_nid2.attr,
1138 &format_attr_filter_opc2.attr,
1139 NULL,
1140};
1141
1142static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1143 &format_attr_event_ext.attr,
1144 &format_attr_occ_sel.attr,
1145 &format_attr_edge.attr,
1146 &format_attr_thresh5.attr,
1147 &format_attr_occ_invert.attr,
1148 &format_attr_occ_edge.attr,
1149 &format_attr_filter_band0.attr,
1150 &format_attr_filter_band1.attr,
1151 &format_attr_filter_band2.attr,
1152 &format_attr_filter_band3.attr,
1153 NULL,
1154};
1155
1156static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1157 &format_attr_event_ext.attr,
1158 &format_attr_umask.attr,
1159 &format_attr_edge.attr,
1160 &format_attr_thresh8.attr,
1161 &format_attr_match_rds.attr,
1162 &format_attr_match_rnid30.attr,
1163 &format_attr_match_rnid4.attr,
1164 &format_attr_match_dnid.attr,
1165 &format_attr_match_mc.attr,
1166 &format_attr_match_opc.attr,
1167 &format_attr_match_vnw.attr,
1168 &format_attr_match0.attr,
1169 &format_attr_match1.attr,
1170 &format_attr_mask_rds.attr,
1171 &format_attr_mask_rnid30.attr,
1172 &format_attr_mask_rnid4.attr,
1173 &format_attr_mask_dnid.attr,
1174 &format_attr_mask_mc.attr,
1175 &format_attr_mask_opc.attr,
1176 &format_attr_mask_vnw.attr,
1177 &format_attr_mask0.attr,
1178 &format_attr_mask1.attr,
1179 NULL,
1180};
1181
1182static struct attribute_group ivt_uncore_format_group = {
1183 .name = "format",
1184 .attrs = ivt_uncore_formats_attr,
1185};
1186
1187static struct attribute_group ivt_uncore_ubox_format_group = {
1188 .name = "format",
1189 .attrs = ivt_uncore_ubox_formats_attr,
1190};
1191
1192static struct attribute_group ivt_uncore_cbox_format_group = {
1193 .name = "format",
1194 .attrs = ivt_uncore_cbox_formats_attr,
1195};
1196
1197static struct attribute_group ivt_uncore_pcu_format_group = {
1198 .name = "format",
1199 .attrs = ivt_uncore_pcu_formats_attr,
1200};
1201
1202static struct attribute_group ivt_uncore_qpi_format_group = {
1203 .name = "format",
1204 .attrs = ivt_uncore_qpi_formats_attr,
1205};
1206
1207static struct intel_uncore_type ivt_uncore_ubox = {
1208 .name = "ubox",
1209 .num_counters = 2,
1210 .num_boxes = 1,
1211 .perf_ctr_bits = 44,
1212 .fixed_ctr_bits = 48,
1213 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1214 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1215 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1216 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1217 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1218 .ops = &ivt_uncore_msr_ops,
1219 .format_group = &ivt_uncore_ubox_format_group,
1220};
1221
1222static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1223 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1224 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1226
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1228 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1229 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1230 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1231 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1232 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1233 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1234 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1235 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1236 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1237 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1238 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1239 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1240 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1241 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1242 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1243 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1244 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1253 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1254 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1255 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1256 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1257 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1258 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1259 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1260 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1261 EVENT_EXTRA_END
1262};
1263
1264static u64 ivt_cbox_filter_mask(int fields)
1265{
1266 u64 mask = 0;
1267
1268 if (fields & 0x1)
1269 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1270 if (fields & 0x2)
1271 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1272 if (fields & 0x4)
1273 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1274 if (fields & 0x8)
1275 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1276 if (fields & 0x10)
1277 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1278
1279 return mask;
1280}
1281
1282static struct event_constraint *
1283ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1284{
1285 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1286}
1287
1288static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1289{
1290 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1291 struct extra_reg *er;
1292 int idx = 0;
1293
1294 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1295 if (er->event != (event->hw.config & er->config_mask))
1296 continue;
1297 idx |= er->idx;
1298 }
1299
1300 if (idx) {
1301 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1302 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1303 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1304 reg1->idx = idx;
1305 }
1306 return 0;
1307}
1308
1309static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1310{
1311 struct hw_perf_event *hwc = &event->hw;
1312 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1313
1314 if (reg1->idx != EXTRA_REG_NONE) {
1315 u64 filter = uncore_shared_reg_config(box, 0);
1316 wrmsrl(reg1->reg, filter & 0xffffffff);
1317 wrmsrl(reg1->reg + 6, filter >> 32);
1318 }
1319
1320 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1321}
1322
1323static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1324 .init_box = ivt_uncore_msr_init_box,
1325 .disable_box = snbep_uncore_msr_disable_box,
1326 .enable_box = snbep_uncore_msr_enable_box,
1327 .disable_event = snbep_uncore_msr_disable_event,
1328 .enable_event = ivt_cbox_enable_event,
1329 .read_counter = uncore_msr_read_counter,
1330 .hw_config = ivt_cbox_hw_config,
1331 .get_constraint = ivt_cbox_get_constraint,
1332 .put_constraint = snbep_cbox_put_constraint,
1333};
1334
1335static struct intel_uncore_type ivt_uncore_cbox = {
1336 .name = "cbox",
1337 .num_counters = 4,
1338 .num_boxes = 15,
1339 .perf_ctr_bits = 44,
1340 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1341 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1342 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1343 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1344 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1345 .num_shared_regs = 1,
1346 .constraints = snbep_uncore_cbox_constraints,
1347 .ops = &ivt_uncore_cbox_ops,
1348 .format_group = &ivt_uncore_cbox_format_group,
1349};
1350
1351static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1352 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1353 .hw_config = snbep_pcu_hw_config,
1354 .get_constraint = snbep_pcu_get_constraint,
1355 .put_constraint = snbep_pcu_put_constraint,
1356};
1357
1358static struct intel_uncore_type ivt_uncore_pcu = {
1359 .name = "pcu",
1360 .num_counters = 4,
1361 .num_boxes = 1,
1362 .perf_ctr_bits = 48,
1363 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1364 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1365 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1366 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1367 .num_shared_regs = 1,
1368 .ops = &ivt_uncore_pcu_ops,
1369 .format_group = &ivt_uncore_pcu_format_group,
1370};
1371
1372static struct intel_uncore_type *ivt_msr_uncores[] = {
1373 &ivt_uncore_ubox,
1374 &ivt_uncore_cbox,
1375 &ivt_uncore_pcu,
1376 NULL,
1377};
1378
1379static struct intel_uncore_type ivt_uncore_ha = {
1380 .name = "ha",
1381 .num_counters = 4,
1382 .num_boxes = 2,
1383 .perf_ctr_bits = 48,
1384 IVT_UNCORE_PCI_COMMON_INIT(),
1385};
1386
1387static struct intel_uncore_type ivt_uncore_imc = {
1388 .name = "imc",
1389 .num_counters = 4,
1390 .num_boxes = 8,
1391 .perf_ctr_bits = 48,
1392 .fixed_ctr_bits = 48,
1393 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1394 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1395 IVT_UNCORE_PCI_COMMON_INIT(),
1396};
1397
1398/* registers in IRP boxes are not properly aligned */
1399static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1400static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1401
1402static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1403{
1404 struct pci_dev *pdev = box->pci_dev;
1405 struct hw_perf_event *hwc = &event->hw;
1406
1407 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1408 hwc->config | SNBEP_PMON_CTL_EN);
1409}
1410
1411static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1412{
1413 struct pci_dev *pdev = box->pci_dev;
1414 struct hw_perf_event *hwc = &event->hw;
1415
1416 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1417}
1418
1419static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1420{
1421 struct pci_dev *pdev = box->pci_dev;
1422 struct hw_perf_event *hwc = &event->hw;
1423 u64 count = 0;
1424
1425 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1426 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1427
1428 return count;
1429}
1430
1431static struct intel_uncore_ops ivt_uncore_irp_ops = {
1432 .init_box = ivt_uncore_pci_init_box,
1433 .disable_box = snbep_uncore_pci_disable_box,
1434 .enable_box = snbep_uncore_pci_enable_box,
1435 .disable_event = ivt_uncore_irp_disable_event,
1436 .enable_event = ivt_uncore_irp_enable_event,
1437 .read_counter = ivt_uncore_irp_read_counter,
1438};
1439
1440static struct intel_uncore_type ivt_uncore_irp = {
1441 .name = "irp",
1442 .num_counters = 4,
1443 .num_boxes = 1,
1444 .perf_ctr_bits = 48,
1445 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1446 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1447 .ops = &ivt_uncore_irp_ops,
1448 .format_group = &ivt_uncore_format_group,
1449};
1450
1451static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1452 .init_box = ivt_uncore_pci_init_box,
1453 .disable_box = snbep_uncore_pci_disable_box,
1454 .enable_box = snbep_uncore_pci_enable_box,
1455 .disable_event = snbep_uncore_pci_disable_event,
1456 .enable_event = snbep_qpi_enable_event,
1457 .read_counter = snbep_uncore_pci_read_counter,
1458 .hw_config = snbep_qpi_hw_config,
1459 .get_constraint = uncore_get_constraint,
1460 .put_constraint = uncore_put_constraint,
1461};
1462
1463static struct intel_uncore_type ivt_uncore_qpi = {
1464 .name = "qpi",
1465 .num_counters = 4,
1466 .num_boxes = 3,
1467 .perf_ctr_bits = 48,
1468 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1469 .event_ctl = SNBEP_PCI_PMON_CTL0,
1470 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1471 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1472 .num_shared_regs = 1,
1473 .ops = &ivt_uncore_qpi_ops,
1474 .format_group = &ivt_uncore_qpi_format_group,
1475};
1476
1477static struct intel_uncore_type ivt_uncore_r2pcie = {
1478 .name = "r2pcie",
1479 .num_counters = 4,
1480 .num_boxes = 1,
1481 .perf_ctr_bits = 44,
1482 .constraints = snbep_uncore_r2pcie_constraints,
1483 IVT_UNCORE_PCI_COMMON_INIT(),
1484};
1485
1486static struct intel_uncore_type ivt_uncore_r3qpi = {
1487 .name = "r3qpi",
1488 .num_counters = 3,
1489 .num_boxes = 2,
1490 .perf_ctr_bits = 44,
1491 .constraints = snbep_uncore_r3qpi_constraints,
1492 IVT_UNCORE_PCI_COMMON_INIT(),
1493};
1494
1495enum {
1496 IVT_PCI_UNCORE_HA,
1497 IVT_PCI_UNCORE_IMC,
1498 IVT_PCI_UNCORE_IRP,
1499 IVT_PCI_UNCORE_QPI,
1500 IVT_PCI_UNCORE_R2PCIE,
1501 IVT_PCI_UNCORE_R3QPI,
1502};
1503
1504static struct intel_uncore_type *ivt_pci_uncores[] = {
1505 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1506 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1507 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
1508 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1509 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1510 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1511 NULL,
1512};
1513
1514static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1515 { /* Home Agent 0 */
1516 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1517 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1518 },
1519 { /* Home Agent 1 */
1520 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1521 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1522 },
1523 { /* MC0 Channel 0 */
1524 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1525 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1526 },
1527 { /* MC0 Channel 1 */
1528 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1529 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1530 },
1531 { /* MC0 Channel 3 */
1532 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1533 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1534 },
1535 { /* MC0 Channel 4 */
1536 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1537 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1538 },
1539 { /* MC1 Channel 0 */
1540 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1541 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1542 },
1543 { /* MC1 Channel 1 */
1544 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1545 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1546 },
1547 { /* MC1 Channel 3 */
1548 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1549 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1550 },
1551 { /* MC1 Channel 4 */
1552 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1553 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1554 },
1555 { /* IRP */
1556 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1557 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1558 },
1559 { /* QPI0 Port 0 */
1560 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1561 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1562 },
1563 { /* QPI0 Port 1 */
1564 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1565 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1566 },
1567 { /* QPI1 Port 2 */
1568 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1569 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1570 },
1571 { /* R2PCIe */
1572 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1573 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1574 },
1575 { /* R3QPI0 Link 0 */
1576 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1577 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1578 },
1579 { /* R3QPI0 Link 1 */
1580 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1581 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1582 },
1583 { /* R3QPI1 Link 2 */
1584 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1585 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1586 },
1587 { /* QPI Port 0 filter */
1588 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1589 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1590 SNBEP_PCI_QPI_PORT0_FILTER),
1591 },
1592 { /* QPI Port 0 filter */
1593 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1594 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1595 SNBEP_PCI_QPI_PORT1_FILTER),
1596 },
1597 { /* end: all zeroes */ }
1598};
1599
1600static struct pci_driver ivt_uncore_pci_driver = {
1601 .name = "ivt_uncore",
1602 .id_table = ivt_uncore_pci_ids,
1603};
1604/* end of IvyTown uncore support */
1605
1606/* Nehalem-EX uncore support */ 161/* Nehalem-EX uncore support */
1607DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); 162DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1608DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); 163DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
@@ -3351,18 +1906,10 @@ static int __init uncore_pci_init(void)
3351 1906
3352 switch (boot_cpu_data.x86_model) { 1907 switch (boot_cpu_data.x86_model) {
3353 case 45: /* Sandy Bridge-EP */ 1908 case 45: /* Sandy Bridge-EP */
3354 ret = snbep_pci2phy_map_init(0x3ce0); 1909 ret = snbep_uncore_pci_init();
3355 if (ret)
3356 return ret;
3357 uncore_pci_uncores = snbep_pci_uncores;
3358 uncore_pci_driver = &snbep_uncore_pci_driver;
3359 break; 1910 break;
3360 case 62: /* IvyTown */ 1911 case 62: /* IvyTown */
3361 ret = snbep_pci2phy_map_init(0x0e1e); 1912 ret = ivt_uncore_pci_init();
3362 if (ret)
3363 return ret;
3364 uncore_pci_uncores = ivt_pci_uncores;
3365 uncore_pci_driver = &ivt_uncore_pci_driver;
3366 break; 1913 break;
3367 case 42: /* Sandy Bridge */ 1914 case 42: /* Sandy Bridge */
3368 ret = snb_uncore_pci_init(); 1915 ret = snb_uncore_pci_init();
@@ -3663,9 +2210,7 @@ static int __init uncore_cpu_init(void)
3663 snb_uncore_cpu_init(); 2210 snb_uncore_cpu_init();
3664 break; 2211 break;
3665 case 45: /* Sandy Bridge-EP */ 2212 case 45: /* Sandy Bridge-EP */
3666 if (snbep_uncore_cbox.num_boxes > max_cores) 2213 snbep_uncore_cpu_init();
3667 snbep_uncore_cbox.num_boxes = max_cores;
3668 uncore_msr_uncores = snbep_msr_uncores;
3669 break; 2214 break;
3670 case 46: /* Nehalem-EX */ 2215 case 46: /* Nehalem-EX */
3671 uncore_nhmex = true; 2216 uncore_nhmex = true;
@@ -3677,9 +2222,7 @@ static int __init uncore_cpu_init(void)
3677 uncore_msr_uncores = nhmex_msr_uncores; 2222 uncore_msr_uncores = nhmex_msr_uncores;
3678 break; 2223 break;
3679 case 62: /* IvyTown */ 2224 case 62: /* IvyTown */
3680 if (ivt_uncore_cbox.num_boxes > max_cores) 2225 ivt_uncore_cpu_init();
3681 ivt_uncore_cbox.num_boxes = max_cores;
3682 uncore_msr_uncores = ivt_msr_uncores;
3683 break; 2226 break;
3684 2227
3685 default: 2228 default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 86699caaacd8..538be93b5f19 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -24,164 +24,6 @@
24 24
25#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 25#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
26 26
27/* SNB-EP Box level control */
28#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
29#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
30#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
31#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
32#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
33 SNBEP_PMON_BOX_CTL_RST_CTRS | \
34 SNBEP_PMON_BOX_CTL_FRZ_EN)
35/* SNB-EP event control */
36#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
37#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
38#define SNBEP_PMON_CTL_RST (1 << 17)
39#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
40#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
41#define SNBEP_PMON_CTL_EN (1 << 22)
42#define SNBEP_PMON_CTL_INVERT (1 << 23)
43#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
44#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
45 SNBEP_PMON_CTL_UMASK_MASK | \
46 SNBEP_PMON_CTL_EDGE_DET | \
47 SNBEP_PMON_CTL_INVERT | \
48 SNBEP_PMON_CTL_TRESH_MASK)
49
50/* SNB-EP Ubox event control */
51#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
52#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
53 (SNBEP_PMON_CTL_EV_SEL_MASK | \
54 SNBEP_PMON_CTL_UMASK_MASK | \
55 SNBEP_PMON_CTL_EDGE_DET | \
56 SNBEP_PMON_CTL_INVERT | \
57 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
58
59#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
60#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_CBO_PMON_CTL_TID_EN)
62
63/* SNB-EP PCU event control */
64#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
65#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
66#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
67#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
68#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
69 (SNBEP_PMON_CTL_EV_SEL_MASK | \
70 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
71 SNBEP_PMON_CTL_EDGE_DET | \
72 SNBEP_PMON_CTL_EV_SEL_EXT | \
73 SNBEP_PMON_CTL_INVERT | \
74 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
75 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
76 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
77
78#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
79 (SNBEP_PMON_RAW_EVENT_MASK | \
80 SNBEP_PMON_CTL_EV_SEL_EXT)
81
82/* SNB-EP pci control register */
83#define SNBEP_PCI_PMON_BOX_CTL 0xf4
84#define SNBEP_PCI_PMON_CTL0 0xd8
85/* SNB-EP pci counter register */
86#define SNBEP_PCI_PMON_CTR0 0xa0
87
88/* SNB-EP home agent register */
89#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
90#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
91#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
92/* SNB-EP memory controller register */
93#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
94#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
95/* SNB-EP QPI register */
96#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
97#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
98#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
99#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
100
101/* SNB-EP Ubox register */
102#define SNBEP_U_MSR_PMON_CTR0 0xc16
103#define SNBEP_U_MSR_PMON_CTL0 0xc10
104
105#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
106#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
107
108/* SNB-EP Cbo register */
109#define SNBEP_C0_MSR_PMON_CTR0 0xd16
110#define SNBEP_C0_MSR_PMON_CTL0 0xd10
111#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
112#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
113#define SNBEP_CBO_MSR_OFFSET 0x20
114
115#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
116#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
117#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
118#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
119
120#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
121 .event = (e), \
122 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
123 .config_mask = (m), \
124 .idx = (i) \
125}
126
127/* SNB-EP PCU register */
128#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
129#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
130#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
131#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
132#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
133#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
134#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
135
136/* IVT event control */
137#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
138 SNBEP_PMON_BOX_CTL_RST_CTRS)
139#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
140 SNBEP_PMON_CTL_UMASK_MASK | \
141 SNBEP_PMON_CTL_EDGE_DET | \
142 SNBEP_PMON_CTL_TRESH_MASK)
143/* IVT Ubox */
144#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00
145#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
146#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
147
148#define IVT_U_MSR_PMON_RAW_EVENT_MASK \
149 (SNBEP_PMON_CTL_EV_SEL_MASK | \
150 SNBEP_PMON_CTL_UMASK_MASK | \
151 SNBEP_PMON_CTL_EDGE_DET | \
152 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
153/* IVT Cbo */
154#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \
155 SNBEP_CBO_PMON_CTL_TID_EN)
156
157#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
158#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
159#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
160#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
161#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
162#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
163#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
164#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63)
165
166/* IVT home agent */
167#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
168#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \
169 (IVT_PMON_RAW_EVENT_MASK | \
170 IVT_HA_PCI_PMON_CTL_Q_OCC_RST)
171/* IVT PCU */
172#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \
173 (SNBEP_PMON_CTL_EV_SEL_MASK | \
174 SNBEP_PMON_CTL_EV_SEL_EXT | \
175 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
176 SNBEP_PMON_CTL_EDGE_DET | \
177 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
178 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
179 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
180/* IVT QPI */
181#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \
182 (IVT_PMON_RAW_EVENT_MASK | \
183 SNBEP_PMON_CTL_EV_SEL_EXT)
184
185/* NHM-EX event control */ 27/* NHM-EX event control */
186#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff 28#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
187#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 29#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
@@ -666,3 +508,9 @@ int ivb_uncore_pci_init(void);
666int hsw_uncore_pci_init(void); 508int hsw_uncore_pci_init(void);
667void snb_uncore_cpu_init(void); 509void snb_uncore_cpu_init(void);
668void nhm_uncore_cpu_init(void); 510void nhm_uncore_cpu_init(void);
511
512/* perf_event_intel_uncore_snbep.c */
513int snbep_uncore_pci_init(void);
514void snbep_uncore_cpu_init(void);
515int ivt_uncore_pci_init(void);
516void ivt_uncore_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
new file mode 100644
index 000000000000..30468f434e9e
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -0,0 +1,1644 @@
1/* SandyBridge-EP/IvyTown uncore support */
2#include "perf_event_intel_uncore.h"
3
4
5/* SNB-EP Box level control */
6#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
7#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
8#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
9#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
10#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11 SNBEP_PMON_BOX_CTL_RST_CTRS | \
12 SNBEP_PMON_BOX_CTL_FRZ_EN)
13/* SNB-EP event control */
14#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
15#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
16#define SNBEP_PMON_CTL_RST (1 << 17)
17#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
18#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
19#define SNBEP_PMON_CTL_EN (1 << 22)
20#define SNBEP_PMON_CTL_INVERT (1 << 23)
21#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
22#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
23 SNBEP_PMON_CTL_UMASK_MASK | \
24 SNBEP_PMON_CTL_EDGE_DET | \
25 SNBEP_PMON_CTL_INVERT | \
26 SNBEP_PMON_CTL_TRESH_MASK)
27
28/* SNB-EP Ubox event control */
29#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
30#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
31 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32 SNBEP_PMON_CTL_UMASK_MASK | \
33 SNBEP_PMON_CTL_EDGE_DET | \
34 SNBEP_PMON_CTL_INVERT | \
35 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36
37#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
38#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
39 SNBEP_CBO_PMON_CTL_TID_EN)
40
41/* SNB-EP PCU event control */
42#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
43#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
44#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
45#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
46#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
47 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49 SNBEP_PMON_CTL_EDGE_DET | \
50 SNBEP_PMON_CTL_EV_SEL_EXT | \
51 SNBEP_PMON_CTL_INVERT | \
52 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
55
56#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
57 (SNBEP_PMON_RAW_EVENT_MASK | \
58 SNBEP_PMON_CTL_EV_SEL_EXT)
59
60/* SNB-EP pci control register */
61#define SNBEP_PCI_PMON_BOX_CTL 0xf4
62#define SNBEP_PCI_PMON_CTL0 0xd8
63/* SNB-EP pci counter register */
64#define SNBEP_PCI_PMON_CTR0 0xa0
65
66/* SNB-EP home agent register */
67#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
68#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
69#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
70/* SNB-EP memory controller register */
71#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
72#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
73/* SNB-EP QPI register */
74#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
75#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
76#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
77#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
78
79/* SNB-EP Ubox register */
80#define SNBEP_U_MSR_PMON_CTR0 0xc16
81#define SNBEP_U_MSR_PMON_CTL0 0xc10
82
83#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
84#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
85
86/* SNB-EP Cbo register */
87#define SNBEP_C0_MSR_PMON_CTR0 0xd16
88#define SNBEP_C0_MSR_PMON_CTL0 0xd10
89#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
90#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
91#define SNBEP_CBO_MSR_OFFSET 0x20
92
93#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
94#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
95#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
96#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
97
98#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
99 .event = (e), \
100 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
101 .config_mask = (m), \
102 .idx = (i) \
103}
104
105/* SNB-EP PCU register */
106#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
107#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
108#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
109#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
110#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
111#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
112#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
113
114/* IVT event control */
115#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116 SNBEP_PMON_BOX_CTL_RST_CTRS)
117#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
118 SNBEP_PMON_CTL_UMASK_MASK | \
119 SNBEP_PMON_CTL_EDGE_DET | \
120 SNBEP_PMON_CTL_TRESH_MASK)
121/* IVT Ubox */
122#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00
123#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
124#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
125
126#define IVT_U_MSR_PMON_RAW_EVENT_MASK \
127 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128 SNBEP_PMON_CTL_UMASK_MASK | \
129 SNBEP_PMON_CTL_EDGE_DET | \
130 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
131/* IVT Cbo */
132#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \
133 SNBEP_CBO_PMON_CTL_TID_EN)
134
135#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
136#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
137#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
138#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
139#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
140#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
141#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
142#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63)
143
144/* IVT home agent */
145#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
146#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \
147 (IVT_PMON_RAW_EVENT_MASK | \
148 IVT_HA_PCI_PMON_CTL_Q_OCC_RST)
149/* IVT PCU */
150#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \
151 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152 SNBEP_PMON_CTL_EV_SEL_EXT | \
153 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154 SNBEP_PMON_CTL_EDGE_DET | \
155 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
158/* IVT QPI */
159#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \
160 (IVT_PMON_RAW_EVENT_MASK | \
161 SNBEP_PMON_CTL_EV_SEL_EXT)
162
163#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
164 ((1ULL << (n)) - 1)))
165
166DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
167DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
168DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
169DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
170DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
171DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
172DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
173DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
174DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
175DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
176DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
177DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
178DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
179DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
180DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
181DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
182DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
183DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
184DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
185DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
186DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
187DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
188DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
189DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
190DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
191DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
192DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
193DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
194DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
195DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
196DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
197DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
198DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
199DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
200DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
201DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
202DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
203DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
204DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
205DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
206DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
207
208static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
209{
210 struct pci_dev *pdev = box->pci_dev;
211 int box_ctl = uncore_pci_box_ctl(box);
212 u32 config = 0;
213
214 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
215 config |= SNBEP_PMON_BOX_CTL_FRZ;
216 pci_write_config_dword(pdev, box_ctl, config);
217 }
218}
219
220static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
221{
222 struct pci_dev *pdev = box->pci_dev;
223 int box_ctl = uncore_pci_box_ctl(box);
224 u32 config = 0;
225
226 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
227 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
228 pci_write_config_dword(pdev, box_ctl, config);
229 }
230}
231
232static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
233{
234 struct pci_dev *pdev = box->pci_dev;
235 struct hw_perf_event *hwc = &event->hw;
236
237 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
238}
239
240static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
241{
242 struct pci_dev *pdev = box->pci_dev;
243 struct hw_perf_event *hwc = &event->hw;
244
245 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
246}
247
248static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
249{
250 struct pci_dev *pdev = box->pci_dev;
251 struct hw_perf_event *hwc = &event->hw;
252 u64 count = 0;
253
254 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
255 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
256
257 return count;
258}
259
260static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
261{
262 struct pci_dev *pdev = box->pci_dev;
263
264 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
265}
266
267static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
268{
269 u64 config;
270 unsigned msr;
271
272 msr = uncore_msr_box_ctl(box);
273 if (msr) {
274 rdmsrl(msr, config);
275 config |= SNBEP_PMON_BOX_CTL_FRZ;
276 wrmsrl(msr, config);
277 }
278}
279
280static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
281{
282 u64 config;
283 unsigned msr;
284
285 msr = uncore_msr_box_ctl(box);
286 if (msr) {
287 rdmsrl(msr, config);
288 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
289 wrmsrl(msr, config);
290 }
291}
292
293static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
294{
295 struct hw_perf_event *hwc = &event->hw;
296 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
297
298 if (reg1->idx != EXTRA_REG_NONE)
299 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
300
301 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
302}
303
304static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
305 struct perf_event *event)
306{
307 struct hw_perf_event *hwc = &event->hw;
308
309 wrmsrl(hwc->config_base, hwc->config);
310}
311
312static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
313{
314 unsigned msr = uncore_msr_box_ctl(box);
315
316 if (msr)
317 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
318}
319
320static struct attribute *snbep_uncore_formats_attr[] = {
321 &format_attr_event.attr,
322 &format_attr_umask.attr,
323 &format_attr_edge.attr,
324 &format_attr_inv.attr,
325 &format_attr_thresh8.attr,
326 NULL,
327};
328
329static struct attribute *snbep_uncore_ubox_formats_attr[] = {
330 &format_attr_event.attr,
331 &format_attr_umask.attr,
332 &format_attr_edge.attr,
333 &format_attr_inv.attr,
334 &format_attr_thresh5.attr,
335 NULL,
336};
337
338static struct attribute *snbep_uncore_cbox_formats_attr[] = {
339 &format_attr_event.attr,
340 &format_attr_umask.attr,
341 &format_attr_edge.attr,
342 &format_attr_tid_en.attr,
343 &format_attr_inv.attr,
344 &format_attr_thresh8.attr,
345 &format_attr_filter_tid.attr,
346 &format_attr_filter_nid.attr,
347 &format_attr_filter_state.attr,
348 &format_attr_filter_opc.attr,
349 NULL,
350};
351
352static struct attribute *snbep_uncore_pcu_formats_attr[] = {
353 &format_attr_event_ext.attr,
354 &format_attr_occ_sel.attr,
355 &format_attr_edge.attr,
356 &format_attr_inv.attr,
357 &format_attr_thresh5.attr,
358 &format_attr_occ_invert.attr,
359 &format_attr_occ_edge.attr,
360 &format_attr_filter_band0.attr,
361 &format_attr_filter_band1.attr,
362 &format_attr_filter_band2.attr,
363 &format_attr_filter_band3.attr,
364 NULL,
365};
366
367static struct attribute *snbep_uncore_qpi_formats_attr[] = {
368 &format_attr_event_ext.attr,
369 &format_attr_umask.attr,
370 &format_attr_edge.attr,
371 &format_attr_inv.attr,
372 &format_attr_thresh8.attr,
373 &format_attr_match_rds.attr,
374 &format_attr_match_rnid30.attr,
375 &format_attr_match_rnid4.attr,
376 &format_attr_match_dnid.attr,
377 &format_attr_match_mc.attr,
378 &format_attr_match_opc.attr,
379 &format_attr_match_vnw.attr,
380 &format_attr_match0.attr,
381 &format_attr_match1.attr,
382 &format_attr_mask_rds.attr,
383 &format_attr_mask_rnid30.attr,
384 &format_attr_mask_rnid4.attr,
385 &format_attr_mask_dnid.attr,
386 &format_attr_mask_mc.attr,
387 &format_attr_mask_opc.attr,
388 &format_attr_mask_vnw.attr,
389 &format_attr_mask0.attr,
390 &format_attr_mask1.attr,
391 NULL,
392};
393
394static struct uncore_event_desc snbep_uncore_imc_events[] = {
395 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
396 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
397 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
398 { /* end: all zeroes */ },
399};
400
401static struct uncore_event_desc snbep_uncore_qpi_events[] = {
402 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
403 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
404 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
405 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
406 { /* end: all zeroes */ },
407};
408
409static struct attribute_group snbep_uncore_format_group = {
410 .name = "format",
411 .attrs = snbep_uncore_formats_attr,
412};
413
414static struct attribute_group snbep_uncore_ubox_format_group = {
415 .name = "format",
416 .attrs = snbep_uncore_ubox_formats_attr,
417};
418
419static struct attribute_group snbep_uncore_cbox_format_group = {
420 .name = "format",
421 .attrs = snbep_uncore_cbox_formats_attr,
422};
423
424static struct attribute_group snbep_uncore_pcu_format_group = {
425 .name = "format",
426 .attrs = snbep_uncore_pcu_formats_attr,
427};
428
429static struct attribute_group snbep_uncore_qpi_format_group = {
430 .name = "format",
431 .attrs = snbep_uncore_qpi_formats_attr,
432};
433
434#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
435 .init_box = snbep_uncore_msr_init_box, \
436 .disable_box = snbep_uncore_msr_disable_box, \
437 .enable_box = snbep_uncore_msr_enable_box, \
438 .disable_event = snbep_uncore_msr_disable_event, \
439 .enable_event = snbep_uncore_msr_enable_event, \
440 .read_counter = uncore_msr_read_counter
441
442static struct intel_uncore_ops snbep_uncore_msr_ops = {
443 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
444};
445
446#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
447 .init_box = snbep_uncore_pci_init_box, \
448 .disable_box = snbep_uncore_pci_disable_box, \
449 .enable_box = snbep_uncore_pci_enable_box, \
450 .disable_event = snbep_uncore_pci_disable_event, \
451 .read_counter = snbep_uncore_pci_read_counter
452
453static struct intel_uncore_ops snbep_uncore_pci_ops = {
454 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
455 .enable_event = snbep_uncore_pci_enable_event, \
456};
457
458static struct event_constraint snbep_uncore_cbox_constraints[] = {
459 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
460 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
461 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
462 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
463 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
464 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
465 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
466 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
469 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
470 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
471 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
472 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
473 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
477 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
478 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
479 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
480 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
481 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
482 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
483 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
484 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
485 EVENT_CONSTRAINT_END
486};
487
488static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
489 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
490 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
491 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
492 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
493 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
494 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
495 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
496 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
497 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
498 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
499 EVENT_CONSTRAINT_END
500};
501
502static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
503 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
504 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
505 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
506 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
507 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
508 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
509 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
510 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
511 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
512 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
513 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
514 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
515 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
516 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
517 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
518 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
519 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
520 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
521 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
522 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
523 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
524 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
525 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
526 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
527 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
528 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
529 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
530 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
531 EVENT_CONSTRAINT_END
532};
533
534static struct intel_uncore_type snbep_uncore_ubox = {
535 .name = "ubox",
536 .num_counters = 2,
537 .num_boxes = 1,
538 .perf_ctr_bits = 44,
539 .fixed_ctr_bits = 48,
540 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
541 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
542 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
543 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
544 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
545 .ops = &snbep_uncore_msr_ops,
546 .format_group = &snbep_uncore_ubox_format_group,
547};
548
549static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
550 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
551 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
566 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
567 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
568 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
569 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
570 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
571 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
572 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
573 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
574 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
575 EVENT_EXTRA_END
576};
577
578static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
579{
580 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
581 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
582 int i;
583
584 if (uncore_box_is_fake(box))
585 return;
586
587 for (i = 0; i < 5; i++) {
588 if (reg1->alloc & (0x1 << i))
589 atomic_sub(1 << (i * 6), &er->ref);
590 }
591 reg1->alloc = 0;
592}
593
594static struct event_constraint *
595__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
596 u64 (*cbox_filter_mask)(int fields))
597{
598 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
599 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
600 int i, alloc = 0;
601 unsigned long flags;
602 u64 mask;
603
604 if (reg1->idx == EXTRA_REG_NONE)
605 return NULL;
606
607 raw_spin_lock_irqsave(&er->lock, flags);
608 for (i = 0; i < 5; i++) {
609 if (!(reg1->idx & (0x1 << i)))
610 continue;
611 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
612 continue;
613
614 mask = cbox_filter_mask(0x1 << i);
615 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
616 !((reg1->config ^ er->config) & mask)) {
617 atomic_add(1 << (i * 6), &er->ref);
618 er->config &= ~mask;
619 er->config |= reg1->config & mask;
620 alloc |= (0x1 << i);
621 } else {
622 break;
623 }
624 }
625 raw_spin_unlock_irqrestore(&er->lock, flags);
626 if (i < 5)
627 goto fail;
628
629 if (!uncore_box_is_fake(box))
630 reg1->alloc |= alloc;
631
632 return NULL;
633fail:
634 for (; i >= 0; i--) {
635 if (alloc & (0x1 << i))
636 atomic_sub(1 << (i * 6), &er->ref);
637 }
638 return &uncore_constraint_empty;
639}
640
641static u64 snbep_cbox_filter_mask(int fields)
642{
643 u64 mask = 0;
644
645 if (fields & 0x1)
646 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
647 if (fields & 0x2)
648 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
649 if (fields & 0x4)
650 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
651 if (fields & 0x8)
652 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
653
654 return mask;
655}
656
657static struct event_constraint *
658snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
659{
660 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
661}
662
663static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
664{
665 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
666 struct extra_reg *er;
667 int idx = 0;
668
669 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
670 if (er->event != (event->hw.config & er->config_mask))
671 continue;
672 idx |= er->idx;
673 }
674
675 if (idx) {
676 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
677 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
678 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
679 reg1->idx = idx;
680 }
681 return 0;
682}
683
684static struct intel_uncore_ops snbep_uncore_cbox_ops = {
685 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
686 .hw_config = snbep_cbox_hw_config,
687 .get_constraint = snbep_cbox_get_constraint,
688 .put_constraint = snbep_cbox_put_constraint,
689};
690
691static struct intel_uncore_type snbep_uncore_cbox = {
692 .name = "cbox",
693 .num_counters = 4,
694 .num_boxes = 8,
695 .perf_ctr_bits = 44,
696 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
697 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
698 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
699 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
700 .msr_offset = SNBEP_CBO_MSR_OFFSET,
701 .num_shared_regs = 1,
702 .constraints = snbep_uncore_cbox_constraints,
703 .ops = &snbep_uncore_cbox_ops,
704 .format_group = &snbep_uncore_cbox_format_group,
705};
706
707static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
708{
709 struct hw_perf_event *hwc = &event->hw;
710 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
711 u64 config = reg1->config;
712
713 if (new_idx > reg1->idx)
714 config <<= 8 * (new_idx - reg1->idx);
715 else
716 config >>= 8 * (reg1->idx - new_idx);
717
718 if (modify) {
719 hwc->config += new_idx - reg1->idx;
720 reg1->config = config;
721 reg1->idx = new_idx;
722 }
723 return config;
724}
725
726static struct event_constraint *
727snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
728{
729 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
730 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
731 unsigned long flags;
732 int idx = reg1->idx;
733 u64 mask, config1 = reg1->config;
734 bool ok = false;
735
736 if (reg1->idx == EXTRA_REG_NONE ||
737 (!uncore_box_is_fake(box) && reg1->alloc))
738 return NULL;
739again:
740 mask = 0xffULL << (idx * 8);
741 raw_spin_lock_irqsave(&er->lock, flags);
742 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
743 !((config1 ^ er->config) & mask)) {
744 atomic_add(1 << (idx * 8), &er->ref);
745 er->config &= ~mask;
746 er->config |= config1 & mask;
747 ok = true;
748 }
749 raw_spin_unlock_irqrestore(&er->lock, flags);
750
751 if (!ok) {
752 idx = (idx + 1) % 4;
753 if (idx != reg1->idx) {
754 config1 = snbep_pcu_alter_er(event, idx, false);
755 goto again;
756 }
757 return &uncore_constraint_empty;
758 }
759
760 if (!uncore_box_is_fake(box)) {
761 if (idx != reg1->idx)
762 snbep_pcu_alter_er(event, idx, true);
763 reg1->alloc = 1;
764 }
765 return NULL;
766}
767
768static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
769{
770 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
771 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
772
773 if (uncore_box_is_fake(box) || !reg1->alloc)
774 return;
775
776 atomic_sub(1 << (reg1->idx * 8), &er->ref);
777 reg1->alloc = 0;
778}
779
780static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
781{
782 struct hw_perf_event *hwc = &event->hw;
783 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
784 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
785
786 if (ev_sel >= 0xb && ev_sel <= 0xe) {
787 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
788 reg1->idx = ev_sel - 0xb;
789 reg1->config = event->attr.config1 & (0xff << reg1->idx);
790 }
791 return 0;
792}
793
794static struct intel_uncore_ops snbep_uncore_pcu_ops = {
795 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
796 .hw_config = snbep_pcu_hw_config,
797 .get_constraint = snbep_pcu_get_constraint,
798 .put_constraint = snbep_pcu_put_constraint,
799};
800
801static struct intel_uncore_type snbep_uncore_pcu = {
802 .name = "pcu",
803 .num_counters = 4,
804 .num_boxes = 1,
805 .perf_ctr_bits = 48,
806 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
807 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
808 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
809 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
810 .num_shared_regs = 1,
811 .ops = &snbep_uncore_pcu_ops,
812 .format_group = &snbep_uncore_pcu_format_group,
813};
814
815static struct intel_uncore_type *snbep_msr_uncores[] = {
816 &snbep_uncore_ubox,
817 &snbep_uncore_cbox,
818 &snbep_uncore_pcu,
819 NULL,
820};
821
822void snbep_uncore_cpu_init(void)
823{
824 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
825 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
826 uncore_msr_uncores = snbep_msr_uncores;
827}
828
829enum {
830 SNBEP_PCI_QPI_PORT0_FILTER,
831 SNBEP_PCI_QPI_PORT1_FILTER,
832};
833
834static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
835{
836 struct hw_perf_event *hwc = &event->hw;
837 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
838 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
839
840 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
841 reg1->idx = 0;
842 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
843 reg1->config = event->attr.config1;
844 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
845 reg2->config = event->attr.config2;
846 }
847 return 0;
848}
849
850static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
851{
852 struct pci_dev *pdev = box->pci_dev;
853 struct hw_perf_event *hwc = &event->hw;
854 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
855 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
856
857 if (reg1->idx != EXTRA_REG_NONE) {
858 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
859 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
860 WARN_ON_ONCE(!filter_pdev);
861 if (filter_pdev) {
862 pci_write_config_dword(filter_pdev, reg1->reg,
863 (u32)reg1->config);
864 pci_write_config_dword(filter_pdev, reg1->reg + 4,
865 (u32)(reg1->config >> 32));
866 pci_write_config_dword(filter_pdev, reg2->reg,
867 (u32)reg2->config);
868 pci_write_config_dword(filter_pdev, reg2->reg + 4,
869 (u32)(reg2->config >> 32));
870 }
871 }
872
873 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
874}
875
876static struct intel_uncore_ops snbep_uncore_qpi_ops = {
877 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
878 .enable_event = snbep_qpi_enable_event,
879 .hw_config = snbep_qpi_hw_config,
880 .get_constraint = uncore_get_constraint,
881 .put_constraint = uncore_put_constraint,
882};
883
884#define SNBEP_UNCORE_PCI_COMMON_INIT() \
885 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
886 .event_ctl = SNBEP_PCI_PMON_CTL0, \
887 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
888 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
889 .ops = &snbep_uncore_pci_ops, \
890 .format_group = &snbep_uncore_format_group
891
892static struct intel_uncore_type snbep_uncore_ha = {
893 .name = "ha",
894 .num_counters = 4,
895 .num_boxes = 1,
896 .perf_ctr_bits = 48,
897 SNBEP_UNCORE_PCI_COMMON_INIT(),
898};
899
900static struct intel_uncore_type snbep_uncore_imc = {
901 .name = "imc",
902 .num_counters = 4,
903 .num_boxes = 4,
904 .perf_ctr_bits = 48,
905 .fixed_ctr_bits = 48,
906 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
907 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
908 .event_descs = snbep_uncore_imc_events,
909 SNBEP_UNCORE_PCI_COMMON_INIT(),
910};
911
912static struct intel_uncore_type snbep_uncore_qpi = {
913 .name = "qpi",
914 .num_counters = 4,
915 .num_boxes = 2,
916 .perf_ctr_bits = 48,
917 .perf_ctr = SNBEP_PCI_PMON_CTR0,
918 .event_ctl = SNBEP_PCI_PMON_CTL0,
919 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
920 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
921 .num_shared_regs = 1,
922 .ops = &snbep_uncore_qpi_ops,
923 .event_descs = snbep_uncore_qpi_events,
924 .format_group = &snbep_uncore_qpi_format_group,
925};
926
927
928static struct intel_uncore_type snbep_uncore_r2pcie = {
929 .name = "r2pcie",
930 .num_counters = 4,
931 .num_boxes = 1,
932 .perf_ctr_bits = 44,
933 .constraints = snbep_uncore_r2pcie_constraints,
934 SNBEP_UNCORE_PCI_COMMON_INIT(),
935};
936
937static struct intel_uncore_type snbep_uncore_r3qpi = {
938 .name = "r3qpi",
939 .num_counters = 3,
940 .num_boxes = 2,
941 .perf_ctr_bits = 44,
942 .constraints = snbep_uncore_r3qpi_constraints,
943 SNBEP_UNCORE_PCI_COMMON_INIT(),
944};
945
946enum {
947 SNBEP_PCI_UNCORE_HA,
948 SNBEP_PCI_UNCORE_IMC,
949 SNBEP_PCI_UNCORE_QPI,
950 SNBEP_PCI_UNCORE_R2PCIE,
951 SNBEP_PCI_UNCORE_R3QPI,
952};
953
954static struct intel_uncore_type *snbep_pci_uncores[] = {
955 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
956 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
957 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
958 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
959 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
960 NULL,
961};
962
963static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
964 { /* Home Agent */
965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
966 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
967 },
968 { /* MC Channel 0 */
969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
970 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
971 },
972 { /* MC Channel 1 */
973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
974 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
975 },
976 { /* MC Channel 2 */
977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
978 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
979 },
980 { /* MC Channel 3 */
981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
982 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
983 },
984 { /* QPI Port 0 */
985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
986 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
987 },
988 { /* QPI Port 1 */
989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
990 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
991 },
992 { /* R2PCIe */
993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
994 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
995 },
996 { /* R3QPI Link 0 */
997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
998 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
999 },
1000 { /* R3QPI Link 1 */
1001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1002 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1003 },
1004 { /* QPI Port 0 filter */
1005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1006 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1007 SNBEP_PCI_QPI_PORT0_FILTER),
1008 },
1009 { /* QPI Port 0 filter */
1010 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1011 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1012 SNBEP_PCI_QPI_PORT1_FILTER),
1013 },
1014 { /* end: all zeroes */ }
1015};
1016
1017static struct pci_driver snbep_uncore_pci_driver = {
1018 .name = "snbep_uncore",
1019 .id_table = snbep_uncore_pci_ids,
1020};
1021
1022/*
1023 * build pci bus to socket mapping
1024 */
1025static int snbep_pci2phy_map_init(int devid)
1026{
1027 struct pci_dev *ubox_dev = NULL;
1028 int i, bus, nodeid;
1029 int err = 0;
1030 u32 config = 0;
1031
1032 while (1) {
1033 /* find the UBOX device */
1034 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1035 if (!ubox_dev)
1036 break;
1037 bus = ubox_dev->bus->number;
1038 /* get the Node ID of the local register */
1039 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1040 if (err)
1041 break;
1042 nodeid = config;
1043 /* get the Node ID mapping */
1044 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1045 if (err)
1046 break;
1047 /*
1048 * every three bits in the Node ID mapping register maps
1049 * to a particular node.
1050 */
1051 for (i = 0; i < 8; i++) {
1052 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1053 uncore_pcibus_to_physid[bus] = i;
1054 break;
1055 }
1056 }
1057 }
1058
1059 if (!err) {
1060 /*
1061 * For PCI bus with no UBOX device, find the next bus
1062 * that has UBOX device and use its mapping.
1063 */
1064 i = -1;
1065 for (bus = 255; bus >= 0; bus--) {
1066 if (uncore_pcibus_to_physid[bus] >= 0)
1067 i = uncore_pcibus_to_physid[bus];
1068 else
1069 uncore_pcibus_to_physid[bus] = i;
1070 }
1071 }
1072
1073 if (ubox_dev)
1074 pci_dev_put(ubox_dev);
1075
1076 return err ? pcibios_err_to_errno(err) : 0;
1077}
1078
1079int snbep_uncore_pci_init(void)
1080{
1081 int ret = snbep_pci2phy_map_init(0x3ce0);
1082 if (ret)
1083 return ret;
1084 uncore_pci_uncores = snbep_pci_uncores;
1085 uncore_pci_driver = &snbep_uncore_pci_driver;
1086 return 0;
1087}
1088/* end of Sandy Bridge-EP uncore support */
1089
1090/* IvyTown uncore support */
1091static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1092{
1093 unsigned msr = uncore_msr_box_ctl(box);
1094 if (msr)
1095 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1096}
1097
1098static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1099{
1100 struct pci_dev *pdev = box->pci_dev;
1101
1102 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1103}
1104
1105#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1106 .init_box = ivt_uncore_msr_init_box, \
1107 .disable_box = snbep_uncore_msr_disable_box, \
1108 .enable_box = snbep_uncore_msr_enable_box, \
1109 .disable_event = snbep_uncore_msr_disable_event, \
1110 .enable_event = snbep_uncore_msr_enable_event, \
1111 .read_counter = uncore_msr_read_counter
1112
1113static struct intel_uncore_ops ivt_uncore_msr_ops = {
1114 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1115};
1116
1117static struct intel_uncore_ops ivt_uncore_pci_ops = {
1118 .init_box = ivt_uncore_pci_init_box,
1119 .disable_box = snbep_uncore_pci_disable_box,
1120 .enable_box = snbep_uncore_pci_enable_box,
1121 .disable_event = snbep_uncore_pci_disable_event,
1122 .enable_event = snbep_uncore_pci_enable_event,
1123 .read_counter = snbep_uncore_pci_read_counter,
1124};
1125
1126#define IVT_UNCORE_PCI_COMMON_INIT() \
1127 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1128 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1129 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1130 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1131 .ops = &ivt_uncore_pci_ops, \
1132 .format_group = &ivt_uncore_format_group
1133
1134static struct attribute *ivt_uncore_formats_attr[] = {
1135 &format_attr_event.attr,
1136 &format_attr_umask.attr,
1137 &format_attr_edge.attr,
1138 &format_attr_inv.attr,
1139 &format_attr_thresh8.attr,
1140 NULL,
1141};
1142
1143static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1144 &format_attr_event.attr,
1145 &format_attr_umask.attr,
1146 &format_attr_edge.attr,
1147 &format_attr_inv.attr,
1148 &format_attr_thresh5.attr,
1149 NULL,
1150};
1151
1152static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1153 &format_attr_event.attr,
1154 &format_attr_umask.attr,
1155 &format_attr_edge.attr,
1156 &format_attr_tid_en.attr,
1157 &format_attr_thresh8.attr,
1158 &format_attr_filter_tid.attr,
1159 &format_attr_filter_link.attr,
1160 &format_attr_filter_state2.attr,
1161 &format_attr_filter_nid2.attr,
1162 &format_attr_filter_opc2.attr,
1163 NULL,
1164};
1165
1166static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1167 &format_attr_event_ext.attr,
1168 &format_attr_occ_sel.attr,
1169 &format_attr_edge.attr,
1170 &format_attr_thresh5.attr,
1171 &format_attr_occ_invert.attr,
1172 &format_attr_occ_edge.attr,
1173 &format_attr_filter_band0.attr,
1174 &format_attr_filter_band1.attr,
1175 &format_attr_filter_band2.attr,
1176 &format_attr_filter_band3.attr,
1177 NULL,
1178};
1179
1180static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1181 &format_attr_event_ext.attr,
1182 &format_attr_umask.attr,
1183 &format_attr_edge.attr,
1184 &format_attr_thresh8.attr,
1185 &format_attr_match_rds.attr,
1186 &format_attr_match_rnid30.attr,
1187 &format_attr_match_rnid4.attr,
1188 &format_attr_match_dnid.attr,
1189 &format_attr_match_mc.attr,
1190 &format_attr_match_opc.attr,
1191 &format_attr_match_vnw.attr,
1192 &format_attr_match0.attr,
1193 &format_attr_match1.attr,
1194 &format_attr_mask_rds.attr,
1195 &format_attr_mask_rnid30.attr,
1196 &format_attr_mask_rnid4.attr,
1197 &format_attr_mask_dnid.attr,
1198 &format_attr_mask_mc.attr,
1199 &format_attr_mask_opc.attr,
1200 &format_attr_mask_vnw.attr,
1201 &format_attr_mask0.attr,
1202 &format_attr_mask1.attr,
1203 NULL,
1204};
1205
1206static struct attribute_group ivt_uncore_format_group = {
1207 .name = "format",
1208 .attrs = ivt_uncore_formats_attr,
1209};
1210
1211static struct attribute_group ivt_uncore_ubox_format_group = {
1212 .name = "format",
1213 .attrs = ivt_uncore_ubox_formats_attr,
1214};
1215
1216static struct attribute_group ivt_uncore_cbox_format_group = {
1217 .name = "format",
1218 .attrs = ivt_uncore_cbox_formats_attr,
1219};
1220
1221static struct attribute_group ivt_uncore_pcu_format_group = {
1222 .name = "format",
1223 .attrs = ivt_uncore_pcu_formats_attr,
1224};
1225
1226static struct attribute_group ivt_uncore_qpi_format_group = {
1227 .name = "format",
1228 .attrs = ivt_uncore_qpi_formats_attr,
1229};
1230
1231static struct intel_uncore_type ivt_uncore_ubox = {
1232 .name = "ubox",
1233 .num_counters = 2,
1234 .num_boxes = 1,
1235 .perf_ctr_bits = 44,
1236 .fixed_ctr_bits = 48,
1237 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1238 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1239 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1240 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1241 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1242 .ops = &ivt_uncore_msr_ops,
1243 .format_group = &ivt_uncore_ubox_format_group,
1244};
1245
1246static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1247 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1248 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1252 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1253 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1254 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1255 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1256 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1257 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1258 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1259 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1260 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1261 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1262 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1263 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1264 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1265 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1266 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1267 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1268 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1269 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1270 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1271 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1272 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1273 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1274 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1275 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1276 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1277 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1278 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1279 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1280 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1281 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1282 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1283 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1284 EVENT_EXTRA_END
1285};
1286
1287static u64 ivt_cbox_filter_mask(int fields)
1288{
1289 u64 mask = 0;
1290
1291 if (fields & 0x1)
1292 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1293 if (fields & 0x2)
1294 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1295 if (fields & 0x4)
1296 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1297 if (fields & 0x8)
1298 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1299 if (fields & 0x10)
1300 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1301
1302 return mask;
1303}
1304
1305static struct event_constraint *
1306ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1307{
1308 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1309}
1310
1311static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1312{
1313 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1314 struct extra_reg *er;
1315 int idx = 0;
1316
1317 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1318 if (er->event != (event->hw.config & er->config_mask))
1319 continue;
1320 idx |= er->idx;
1321 }
1322
1323 if (idx) {
1324 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1325 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1326 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1327 reg1->idx = idx;
1328 }
1329 return 0;
1330}
1331
1332static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1333{
1334 struct hw_perf_event *hwc = &event->hw;
1335 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1336
1337 if (reg1->idx != EXTRA_REG_NONE) {
1338 u64 filter = uncore_shared_reg_config(box, 0);
1339 wrmsrl(reg1->reg, filter & 0xffffffff);
1340 wrmsrl(reg1->reg + 6, filter >> 32);
1341 }
1342
1343 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1344}
1345
1346static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1347 .init_box = ivt_uncore_msr_init_box,
1348 .disable_box = snbep_uncore_msr_disable_box,
1349 .enable_box = snbep_uncore_msr_enable_box,
1350 .disable_event = snbep_uncore_msr_disable_event,
1351 .enable_event = ivt_cbox_enable_event,
1352 .read_counter = uncore_msr_read_counter,
1353 .hw_config = ivt_cbox_hw_config,
1354 .get_constraint = ivt_cbox_get_constraint,
1355 .put_constraint = snbep_cbox_put_constraint,
1356};
1357
1358static struct intel_uncore_type ivt_uncore_cbox = {
1359 .name = "cbox",
1360 .num_counters = 4,
1361 .num_boxes = 15,
1362 .perf_ctr_bits = 44,
1363 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1364 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1365 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1366 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1367 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1368 .num_shared_regs = 1,
1369 .constraints = snbep_uncore_cbox_constraints,
1370 .ops = &ivt_uncore_cbox_ops,
1371 .format_group = &ivt_uncore_cbox_format_group,
1372};
1373
1374static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1375 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1376 .hw_config = snbep_pcu_hw_config,
1377 .get_constraint = snbep_pcu_get_constraint,
1378 .put_constraint = snbep_pcu_put_constraint,
1379};
1380
1381static struct intel_uncore_type ivt_uncore_pcu = {
1382 .name = "pcu",
1383 .num_counters = 4,
1384 .num_boxes = 1,
1385 .perf_ctr_bits = 48,
1386 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1387 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1388 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1389 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1390 .num_shared_regs = 1,
1391 .ops = &ivt_uncore_pcu_ops,
1392 .format_group = &ivt_uncore_pcu_format_group,
1393};
1394
1395static struct intel_uncore_type *ivt_msr_uncores[] = {
1396 &ivt_uncore_ubox,
1397 &ivt_uncore_cbox,
1398 &ivt_uncore_pcu,
1399 NULL,
1400};
1401
1402void ivt_uncore_cpu_init(void)
1403{
1404 if (ivt_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1405 ivt_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1406 uncore_msr_uncores = ivt_msr_uncores;
1407}
1408
1409static struct intel_uncore_type ivt_uncore_ha = {
1410 .name = "ha",
1411 .num_counters = 4,
1412 .num_boxes = 2,
1413 .perf_ctr_bits = 48,
1414 IVT_UNCORE_PCI_COMMON_INIT(),
1415};
1416
1417static struct intel_uncore_type ivt_uncore_imc = {
1418 .name = "imc",
1419 .num_counters = 4,
1420 .num_boxes = 8,
1421 .perf_ctr_bits = 48,
1422 .fixed_ctr_bits = 48,
1423 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1424 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1425 IVT_UNCORE_PCI_COMMON_INIT(),
1426};
1427
1428/* registers in IRP boxes are not properly aligned */
1429static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1430static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1431
1432static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1433{
1434 struct pci_dev *pdev = box->pci_dev;
1435 struct hw_perf_event *hwc = &event->hw;
1436
1437 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1438 hwc->config | SNBEP_PMON_CTL_EN);
1439}
1440
1441static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1442{
1443 struct pci_dev *pdev = box->pci_dev;
1444 struct hw_perf_event *hwc = &event->hw;
1445
1446 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1447}
1448
1449static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1450{
1451 struct pci_dev *pdev = box->pci_dev;
1452 struct hw_perf_event *hwc = &event->hw;
1453 u64 count = 0;
1454
1455 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1456 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1457
1458 return count;
1459}
1460
1461static struct intel_uncore_ops ivt_uncore_irp_ops = {
1462 .init_box = ivt_uncore_pci_init_box,
1463 .disable_box = snbep_uncore_pci_disable_box,
1464 .enable_box = snbep_uncore_pci_enable_box,
1465 .disable_event = ivt_uncore_irp_disable_event,
1466 .enable_event = ivt_uncore_irp_enable_event,
1467 .read_counter = ivt_uncore_irp_read_counter,
1468};
1469
1470static struct intel_uncore_type ivt_uncore_irp = {
1471 .name = "irp",
1472 .num_counters = 4,
1473 .num_boxes = 1,
1474 .perf_ctr_bits = 48,
1475 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1476 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1477 .ops = &ivt_uncore_irp_ops,
1478 .format_group = &ivt_uncore_format_group,
1479};
1480
1481static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1482 .init_box = ivt_uncore_pci_init_box,
1483 .disable_box = snbep_uncore_pci_disable_box,
1484 .enable_box = snbep_uncore_pci_enable_box,
1485 .disable_event = snbep_uncore_pci_disable_event,
1486 .enable_event = snbep_qpi_enable_event,
1487 .read_counter = snbep_uncore_pci_read_counter,
1488 .hw_config = snbep_qpi_hw_config,
1489 .get_constraint = uncore_get_constraint,
1490 .put_constraint = uncore_put_constraint,
1491};
1492
1493static struct intel_uncore_type ivt_uncore_qpi = {
1494 .name = "qpi",
1495 .num_counters = 4,
1496 .num_boxes = 3,
1497 .perf_ctr_bits = 48,
1498 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1499 .event_ctl = SNBEP_PCI_PMON_CTL0,
1500 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1501 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1502 .num_shared_regs = 1,
1503 .ops = &ivt_uncore_qpi_ops,
1504 .format_group = &ivt_uncore_qpi_format_group,
1505};
1506
1507static struct intel_uncore_type ivt_uncore_r2pcie = {
1508 .name = "r2pcie",
1509 .num_counters = 4,
1510 .num_boxes = 1,
1511 .perf_ctr_bits = 44,
1512 .constraints = snbep_uncore_r2pcie_constraints,
1513 IVT_UNCORE_PCI_COMMON_INIT(),
1514};
1515
1516static struct intel_uncore_type ivt_uncore_r3qpi = {
1517 .name = "r3qpi",
1518 .num_counters = 3,
1519 .num_boxes = 2,
1520 .perf_ctr_bits = 44,
1521 .constraints = snbep_uncore_r3qpi_constraints,
1522 IVT_UNCORE_PCI_COMMON_INIT(),
1523};
1524
1525enum {
1526 IVT_PCI_UNCORE_HA,
1527 IVT_PCI_UNCORE_IMC,
1528 IVT_PCI_UNCORE_IRP,
1529 IVT_PCI_UNCORE_QPI,
1530 IVT_PCI_UNCORE_R2PCIE,
1531 IVT_PCI_UNCORE_R3QPI,
1532};
1533
1534static struct intel_uncore_type *ivt_pci_uncores[] = {
1535 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1536 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1537 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
1538 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1539 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1540 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1541 NULL,
1542};
1543
1544static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1545 { /* Home Agent 0 */
1546 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1547 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1548 },
1549 { /* Home Agent 1 */
1550 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1551 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1552 },
1553 { /* MC0 Channel 0 */
1554 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1555 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1556 },
1557 { /* MC0 Channel 1 */
1558 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1559 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1560 },
1561 { /* MC0 Channel 3 */
1562 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1563 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1564 },
1565 { /* MC0 Channel 4 */
1566 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1567 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1568 },
1569 { /* MC1 Channel 0 */
1570 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1571 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1572 },
1573 { /* MC1 Channel 1 */
1574 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1575 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1576 },
1577 { /* MC1 Channel 3 */
1578 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1579 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1580 },
1581 { /* MC1 Channel 4 */
1582 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1583 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1584 },
1585 { /* IRP */
1586 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1587 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1588 },
1589 { /* QPI0 Port 0 */
1590 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1591 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1592 },
1593 { /* QPI0 Port 1 */
1594 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1595 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1596 },
1597 { /* QPI1 Port 2 */
1598 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1599 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1600 },
1601 { /* R2PCIe */
1602 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1603 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1604 },
1605 { /* R3QPI0 Link 0 */
1606 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1607 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1608 },
1609 { /* R3QPI0 Link 1 */
1610 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1611 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1612 },
1613 { /* R3QPI1 Link 2 */
1614 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1615 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1616 },
1617 { /* QPI Port 0 filter */
1618 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1619 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1620 SNBEP_PCI_QPI_PORT0_FILTER),
1621 },
1622 { /* QPI Port 0 filter */
1623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1624 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1625 SNBEP_PCI_QPI_PORT1_FILTER),
1626 },
1627 { /* end: all zeroes */ }
1628};
1629
1630static struct pci_driver ivt_uncore_pci_driver = {
1631 .name = "ivt_uncore",
1632 .id_table = ivt_uncore_pci_ids,
1633};
1634
1635int ivt_uncore_pci_init(void)
1636{
1637 int ret = snbep_pci2phy_map_init(0x0e1e);
1638 if (ret)
1639 return ret;
1640 uncore_pci_uncores = ivt_pci_uncores;
1641 uncore_pci_driver = &ivt_uncore_pci_driver;
1642 return 0;
1643}
1644/* end of IvyTown uncore support */