aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2013-04-16 07:51:06 -0400
committerIngo Molnar <mingo@kernel.org>2013-04-21 05:01:23 -0400
commit46bdd905987199febdef611bab40e2b1ac0036b8 (patch)
tree5186d2c27d7fa9a63e285984746b007a099db882
parent22cc4ccf63e10e361531bf61e6e6c96c53a2f665 (diff)
perf/x86/intel: Fix SNB-EP CBO and PCU uncore PMU filter management
The existing code assumes all Cbox and PCU events are using filter, but actually the filter is event specific. Furthermore the filter is sub-divided into multiple fields which are used by different events. Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: peterz@infradead.org Cc: ak@linux.intel.com Link: http://lkml.kernel.org/r/1366113067-3262-3-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Reported-by: Stephane Eranian <eranian@google.com>
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c326
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h13
2 files changed, 280 insertions, 59 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 50d4a1c58106..8f590ea9ece0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -17,6 +17,9 @@ static struct event_constraint constraint_fixed =
17static struct event_constraint constraint_empty = 17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0); 18 EVENT_CONSTRAINT(0, 0, 0);
19 19
20#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
21 ((1ULL << (n)) - 1)))
22
20DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 23DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 24DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
22DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 25DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
@@ -110,6 +113,21 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even
110 reg1->alloc = 0; 113 reg1->alloc = 0;
111} 114}
112 115
116static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
117{
118 struct intel_uncore_extra_reg *er;
119 unsigned long flags;
120 u64 config;
121
122 er = &box->shared_regs[idx];
123
124 raw_spin_lock_irqsave(&er->lock, flags);
125 config = er->config;
126 raw_spin_unlock_irqrestore(&er->lock, flags);
127
128 return config;
129}
130
113/* Sandy Bridge-EP uncore support */ 131/* Sandy Bridge-EP uncore support */
114static struct intel_uncore_type snbep_uncore_cbox; 132static struct intel_uncore_type snbep_uncore_cbox;
115static struct intel_uncore_type snbep_uncore_pcu; 133static struct intel_uncore_type snbep_uncore_pcu;
@@ -205,7 +223,7 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
205 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 223 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
206 224
207 if (reg1->idx != EXTRA_REG_NONE) 225 if (reg1->idx != EXTRA_REG_NONE)
208 wrmsrl(reg1->reg, reg1->config); 226 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
209 227
210 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 228 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
211} 229}
@@ -226,29 +244,6 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
226 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 244 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
227} 245}
228 246
229static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
230{
231 struct hw_perf_event *hwc = &event->hw;
232 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
233
234 if (box->pmu->type == &snbep_uncore_cbox) {
235 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
236 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
237 reg1->config = event->attr.config1 &
238 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
239 } else {
240 if (box->pmu->type == &snbep_uncore_pcu) {
241 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
242 reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
243 } else {
244 return 0;
245 }
246 }
247 reg1->idx = 0;
248
249 return 0;
250}
251
252static struct attribute *snbep_uncore_formats_attr[] = { 247static struct attribute *snbep_uncore_formats_attr[] = {
253 &format_attr_event.attr, 248 &format_attr_event.attr,
254 &format_attr_umask.attr, 249 &format_attr_umask.attr,
@@ -345,16 +340,16 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
345 .attrs = snbep_uncore_qpi_formats_attr, 340 .attrs = snbep_uncore_qpi_formats_attr,
346}; 341};
347 342
343#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
344 .init_box = snbep_uncore_msr_init_box, \
345 .disable_box = snbep_uncore_msr_disable_box, \
346 .enable_box = snbep_uncore_msr_enable_box, \
347 .disable_event = snbep_uncore_msr_disable_event, \
348 .enable_event = snbep_uncore_msr_enable_event, \
349 .read_counter = uncore_msr_read_counter
350
348static struct intel_uncore_ops snbep_uncore_msr_ops = { 351static struct intel_uncore_ops snbep_uncore_msr_ops = {
349 .init_box = snbep_uncore_msr_init_box, 352 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
350 .disable_box = snbep_uncore_msr_disable_box,
351 .enable_box = snbep_uncore_msr_enable_box,
352 .disable_event = snbep_uncore_msr_disable_event,
353 .enable_event = snbep_uncore_msr_enable_event,
354 .read_counter = uncore_msr_read_counter,
355 .get_constraint = uncore_get_constraint,
356 .put_constraint = uncore_put_constraint,
357 .hw_config = snbep_uncore_hw_config,
358}; 353};
359 354
360static struct intel_uncore_ops snbep_uncore_pci_ops = { 355static struct intel_uncore_ops snbep_uncore_pci_ops = {
@@ -446,6 +441,145 @@ static struct intel_uncore_type snbep_uncore_ubox = {
446 .format_group = &snbep_uncore_ubox_format_group, 441 .format_group = &snbep_uncore_ubox_format_group,
447}; 442};
448 443
444static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
445 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
446 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
447 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
448 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
449 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
450 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
451 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
452 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
453 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
454 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
455 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
456 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
457 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
458 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
459 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
460 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
461 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
462 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
463 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
464 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
465 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
466 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
467 EVENT_EXTRA_END
468};
469
470static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
471{
472 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
473 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
474 int i;
475
476 if (uncore_box_is_fake(box))
477 return;
478
479 for (i = 0; i < 5; i++) {
480 if (reg1->alloc & (0x1 << i))
481 atomic_sub(1 << (i * 6), &er->ref);
482 }
483 reg1->alloc = 0;
484}
485
486static struct event_constraint *
487__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
488 u64 (*cbox_filter_mask)(int fields))
489{
490 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
491 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
492 int i, alloc = 0;
493 unsigned long flags;
494 u64 mask;
495
496 if (reg1->idx == EXTRA_REG_NONE)
497 return NULL;
498
499 raw_spin_lock_irqsave(&er->lock, flags);
500 for (i = 0; i < 5; i++) {
501 if (!(reg1->idx & (0x1 << i)))
502 continue;
503 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
504 continue;
505
506 mask = cbox_filter_mask(0x1 << i);
507 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
508 !((reg1->config ^ er->config) & mask)) {
509 atomic_add(1 << (i * 6), &er->ref);
510 er->config &= ~mask;
511 er->config |= reg1->config & mask;
512 alloc |= (0x1 << i);
513 } else {
514 break;
515 }
516 }
517 raw_spin_unlock_irqrestore(&er->lock, flags);
518 if (i < 5)
519 goto fail;
520
521 if (!uncore_box_is_fake(box))
522 reg1->alloc |= alloc;
523
524 return 0;
525fail:
526 for (; i >= 0; i--) {
527 if (alloc & (0x1 << i))
528 atomic_sub(1 << (i * 6), &er->ref);
529 }
530 return &constraint_empty;
531}
532
533static u64 snbep_cbox_filter_mask(int fields)
534{
535 u64 mask = 0;
536
537 if (fields & 0x1)
538 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
539 if (fields & 0x2)
540 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
541 if (fields & 0x4)
542 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
543 if (fields & 0x8)
544 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
545
546 return mask;
547}
548
549static struct event_constraint *
550snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
551{
552 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
553}
554
555static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
556{
557 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
558 struct extra_reg *er;
559 int idx = 0;
560
561 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
562 if (er->event != (event->hw.config & er->config_mask))
563 continue;
564 idx |= er->idx;
565 }
566
567 if (idx) {
568 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
569 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
570 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
571 reg1->idx = idx;
572 }
573 return 0;
574}
575
576static struct intel_uncore_ops snbep_uncore_cbox_ops = {
577 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
578 .hw_config = snbep_cbox_hw_config,
579 .get_constraint = snbep_cbox_get_constraint,
580 .put_constraint = snbep_cbox_put_constraint,
581};
582
449static struct intel_uncore_type snbep_uncore_cbox = { 583static struct intel_uncore_type snbep_uncore_cbox = {
450 .name = "cbox", 584 .name = "cbox",
451 .num_counters = 4, 585 .num_counters = 4,
@@ -458,10 +592,104 @@ static struct intel_uncore_type snbep_uncore_cbox = {
458 .msr_offset = SNBEP_CBO_MSR_OFFSET, 592 .msr_offset = SNBEP_CBO_MSR_OFFSET,
459 .num_shared_regs = 1, 593 .num_shared_regs = 1,
460 .constraints = snbep_uncore_cbox_constraints, 594 .constraints = snbep_uncore_cbox_constraints,
461 .ops = &snbep_uncore_msr_ops, 595 .ops = &snbep_uncore_cbox_ops,
462 .format_group = &snbep_uncore_cbox_format_group, 596 .format_group = &snbep_uncore_cbox_format_group,
463}; 597};
464 598
599static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
600{
601 struct hw_perf_event *hwc = &event->hw;
602 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
603 u64 config = reg1->config;
604
605 if (new_idx > reg1->idx)
606 config <<= 8 * (new_idx - reg1->idx);
607 else
608 config >>= 8 * (reg1->idx - new_idx);
609
610 if (modify) {
611 hwc->config += new_idx - reg1->idx;
612 reg1->config = config;
613 reg1->idx = new_idx;
614 }
615 return config;
616}
617
618static struct event_constraint *
619snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
620{
621 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
622 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
623 unsigned long flags;
624 int idx = reg1->idx;
625 u64 mask, config1 = reg1->config;
626 bool ok = false;
627
628 if (reg1->idx == EXTRA_REG_NONE ||
629 (!uncore_box_is_fake(box) && reg1->alloc))
630 return NULL;
631again:
632 mask = 0xff << (idx * 8);
633 raw_spin_lock_irqsave(&er->lock, flags);
634 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
635 !((config1 ^ er->config) & mask)) {
636 atomic_add(1 << (idx * 8), &er->ref);
637 er->config &= ~mask;
638 er->config |= config1 & mask;
639 ok = true;
640 }
641 raw_spin_unlock_irqrestore(&er->lock, flags);
642
643 if (!ok) {
644 idx = (idx + 1) % 4;
645 if (idx != reg1->idx) {
646 config1 = snbep_pcu_alter_er(event, idx, false);
647 goto again;
648 }
649 return &constraint_empty;
650 }
651
652 if (!uncore_box_is_fake(box)) {
653 if (idx != reg1->idx)
654 snbep_pcu_alter_er(event, idx, true);
655 reg1->alloc = 1;
656 }
657 return NULL;
658}
659
660static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
661{
662 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
663 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
664
665 if (uncore_box_is_fake(box) || !reg1->alloc)
666 return;
667
668 atomic_sub(1 << (reg1->idx * 8), &er->ref);
669 reg1->alloc = 0;
670}
671
672static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
673{
674 struct hw_perf_event *hwc = &event->hw;
675 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
676 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
677
678 if (ev_sel >= 0xb && ev_sel <= 0xe) {
679 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
680 reg1->idx = ev_sel - 0xb;
681 reg1->config = event->attr.config1 & (0xff << reg1->idx);
682 }
683 return 0;
684}
685
686static struct intel_uncore_ops snbep_uncore_pcu_ops = {
687 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
688 .hw_config = snbep_pcu_hw_config,
689 .get_constraint = snbep_pcu_get_constraint,
690 .put_constraint = snbep_pcu_put_constraint,
691};
692
465static struct intel_uncore_type snbep_uncore_pcu = { 693static struct intel_uncore_type snbep_uncore_pcu = {
466 .name = "pcu", 694 .name = "pcu",
467 .num_counters = 4, 695 .num_counters = 4,
@@ -472,7 +700,7 @@ static struct intel_uncore_type snbep_uncore_pcu = {
472 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 700 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
473 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 701 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
474 .num_shared_regs = 1, 702 .num_shared_regs = 1,
475 .ops = &snbep_uncore_msr_ops, 703 .ops = &snbep_uncore_pcu_ops,
476 .format_group = &snbep_uncore_pcu_format_group, 704 .format_group = &snbep_uncore_pcu_format_group,
477}; 705};
478 706
@@ -808,9 +1036,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
808/* end of Nehalem uncore support */ 1036/* end of Nehalem uncore support */
809 1037
810/* Nehalem-EX uncore support */ 1038/* Nehalem-EX uncore support */
811#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
812 ((1ULL << (n)) - 1)))
813
814DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); 1039DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
815DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); 1040DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
816DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); 1041DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
@@ -1161,7 +1386,7 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1161}; 1386};
1162 1387
1163/* Nehalem-EX or Westmere-EX ? */ 1388/* Nehalem-EX or Westmere-EX ? */
1164bool uncore_nhmex; 1389static bool uncore_nhmex;
1165 1390
1166static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) 1391static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1167{ 1392{
@@ -1239,7 +1464,7 @@ static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1239 atomic_sub(1 << (idx * 8), &er->ref); 1464 atomic_sub(1 << (idx * 8), &er->ref);
1240} 1465}
1241 1466
1242u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) 1467static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1243{ 1468{
1244 struct hw_perf_event *hwc = &event->hw; 1469 struct hw_perf_event *hwc = &event->hw;
1245 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1470 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1554,7 +1779,7 @@ static struct intel_uncore_type nhmex_uncore_mbox = {
1554 .format_group = &nhmex_uncore_mbox_format_group, 1779 .format_group = &nhmex_uncore_mbox_format_group,
1555}; 1780};
1556 1781
1557void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) 1782static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1558{ 1783{
1559 struct hw_perf_event *hwc = &event->hw; 1784 struct hw_perf_event *hwc = &event->hw;
1560 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1785 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1724,21 +1949,6 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
1724 return 0; 1949 return 0;
1725} 1950}
1726 1951
1727static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1728{
1729 struct intel_uncore_extra_reg *er;
1730 unsigned long flags;
1731 u64 config;
1732
1733 er = &box->shared_regs[idx];
1734
1735 raw_spin_lock_irqsave(&er->lock, flags);
1736 config = er->config;
1737 raw_spin_unlock_irqrestore(&er->lock, flags);
1738
1739 return config;
1740}
1741
1742static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1952static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1743{ 1953{
1744 struct hw_perf_event *hwc = &event->hw; 1954 struct hw_perf_event *hwc = &event->hw;
@@ -1759,7 +1969,7 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
1759 case 2: 1969 case 2:
1760 case 3: 1970 case 3:
1761 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), 1971 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1762 nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); 1972 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
1763 break; 1973 break;
1764 case 4: 1974 case 4:
1765 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), 1975 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
@@ -2285,7 +2495,7 @@ out:
2285 return ret; 2495 return ret;
2286} 2496}
2287 2497
2288int uncore_pmu_event_init(struct perf_event *event) 2498static int uncore_pmu_event_init(struct perf_event *event)
2289{ 2499{
2290 struct intel_uncore_pmu *pmu; 2500 struct intel_uncore_pmu *pmu;
2291 struct intel_uncore_box *box; 2501 struct intel_uncore_box *box;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index e68a4550e952..f14a3413a85d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -148,9 +148,20 @@
148#define SNBEP_C0_MSR_PMON_CTL0 0xd10 148#define SNBEP_C0_MSR_PMON_CTL0 0xd10
149#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 149#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
150#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 150#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
151#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
152#define SNBEP_CBO_MSR_OFFSET 0x20 151#define SNBEP_CBO_MSR_OFFSET 0x20
153 152
153#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
154#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
155#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
156#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
157
158#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
159 .event = (e), \
160 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
161 .config_mask = (m), \
162 .idx = (i) \
163}
164
154/* SNB-EP PCU register */ 165/* SNB-EP PCU register */
155#define SNBEP_PCU_MSR_PMON_CTR0 0xc36 166#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
156#define SNBEP_PCU_MSR_PMON_CTL0 0xc30 167#define SNBEP_PCU_MSR_PMON_CTL0 0xc30