aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:20:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:20:11 -0400
commitade0899b298ba2c43bfd6abd8cbc2545944cde0c (patch)
treea448dfb440b3b958b6306bb43620cd5d76f504bf /arch/x86
parent871a0596cb2f51b57dc583d1a7c4be0186582fe7 (diff)
parent95cf59ea72331d0093010543b8951bb43f262cac (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "This tree includes some late late perf items that missed the first round: tools: - Bash auto completion improvements, now we can auto complete the tools long options, tracepoint event names, etc, from Namhyung Kim. - Look up thread using tid instead of pid in 'perf sched'. - Move global variables into a perf_kvm struct, from David Ahern. - Hists refactorings, preparatory for improved 'diff' command, from Jiri Olsa. - Hists refactorings, preparatory for event group viewieng work, from Namhyung Kim. - Remove double negation on optional feature macro definitions, from Namhyung Kim. - Remove several cases of needless global variables, on most builtins. - misc fixes kernel: - sysfs support for IBS on AMD CPUs, from Robert Richter. - Support for an upcoming Intel CPU, the Xeon-Phi / Knights Corner HPC blade PMU, from Vince Weaver. - misc fixes" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits) perf: Fix perf_cgroup_switch for sw-events perf: Clarify perf_cpu_context::active_pmu usage by renaming it to ::unique_pmu perf/AMD/IBS: Add sysfs support perf hists: Add more helpers for hist entry stat perf hists: Move he->stat.nr_events initialization to a template perf hists: Introduce struct he_stat perf diff: Removing the total_period argument from output code perf tool: Add hpp interface to enable/disable hpp column perf tools: Removing hists pair argument from output path perf hists: Separate overhead and baseline columns perf diff: Refactor diff displacement possition info perf hists: Add struct hists pointer to struct hist_entry perf tools: Complete tracepoint event names perf/x86: Add support for Intel Xeon-Phi Knights Corner PMU perf evlist: Remove some unused methods perf evlist: Introduce add_newtp method perf kvm: Move global variables into a perf_kvm struct perf tools: Convert to BACKTRACE_SUPPORT perf tools: Long option completion support for each subcommands perf tools: Complete long option names of perf command ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c61
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c248
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
7 files changed, 311 insertions, 13 deletions
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index fbee9714d9ab..7f0edceb7563 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -121,6 +121,11 @@
121#define MSR_P6_EVNTSEL0 0x00000186 121#define MSR_P6_EVNTSEL0 0x00000186
122#define MSR_P6_EVNTSEL1 0x00000187 122#define MSR_P6_EVNTSEL1 0x00000187
123 123
124#define MSR_KNC_PERFCTR0 0x00000020
125#define MSR_KNC_PERFCTR1 0x00000021
126#define MSR_KNC_EVNTSEL0 0x00000028
127#define MSR_KNC_EVNTSEL1 0x00000029
128
124/* AMD64 MSRs. Not complete. See the architecture manual for a more 129/* AMD64 MSRs. Not complete. See the architecture manual for a more
125 complete list. */ 130 complete list. */
126 131
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index d30a6a9a0121..a0e067d3d96c 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -32,7 +32,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
32 32
33ifdef CONFIG_PERF_EVENTS 33ifdef CONFIG_PERF_EVENTS
34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o 34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o 35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o 37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
38endif 38endif
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8b6defe7eefc..271d25700297 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -626,6 +626,8 @@ int p4_pmu_init(void);
626 626
627int p6_pmu_init(void); 627int p6_pmu_init(void);
628 628
629int knc_pmu_init(void);
630
629#else /* CONFIG_CPU_SUP_INTEL */ 631#else /* CONFIG_CPU_SUP_INTEL */
630 632
631static inline void reserve_ds_buffers(void) 633static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index eebd5ffe1bba..6336bcbd0618 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -41,17 +41,22 @@ struct cpu_perf_ibs {
41}; 41};
42 42
43struct perf_ibs { 43struct perf_ibs {
44 struct pmu pmu; 44 struct pmu pmu;
45 unsigned int msr; 45 unsigned int msr;
46 u64 config_mask; 46 u64 config_mask;
47 u64 cnt_mask; 47 u64 cnt_mask;
48 u64 enable_mask; 48 u64 enable_mask;
49 u64 valid_mask; 49 u64 valid_mask;
50 u64 max_period; 50 u64 max_period;
51 unsigned long offset_mask[1]; 51 unsigned long offset_mask[1];
52 int offset_max; 52 int offset_max;
53 struct cpu_perf_ibs __percpu *pcpu; 53 struct cpu_perf_ibs __percpu *pcpu;
54 u64 (*get_count)(u64 config); 54
55 struct attribute **format_attrs;
56 struct attribute_group format_group;
57 const struct attribute_group *attr_groups[2];
58
59 u64 (*get_count)(u64 config);
55}; 60};
56 61
57struct perf_ibs_data { 62struct perf_ibs_data {
@@ -446,6 +451,19 @@ static void perf_ibs_del(struct perf_event *event, int flags)
446 451
447static void perf_ibs_read(struct perf_event *event) { } 452static void perf_ibs_read(struct perf_event *event) { }
448 453
454PMU_FORMAT_ATTR(rand_en, "config:57");
455PMU_FORMAT_ATTR(cnt_ctl, "config:19");
456
457static struct attribute *ibs_fetch_format_attrs[] = {
458 &format_attr_rand_en.attr,
459 NULL,
460};
461
462static struct attribute *ibs_op_format_attrs[] = {
463 NULL, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
464 NULL,
465};
466
449static struct perf_ibs perf_ibs_fetch = { 467static struct perf_ibs perf_ibs_fetch = {
450 .pmu = { 468 .pmu = {
451 .task_ctx_nr = perf_invalid_context, 469 .task_ctx_nr = perf_invalid_context,
@@ -465,6 +483,7 @@ static struct perf_ibs perf_ibs_fetch = {
465 .max_period = IBS_FETCH_MAX_CNT << 4, 483 .max_period = IBS_FETCH_MAX_CNT << 4,
466 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK }, 484 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
467 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT, 485 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
486 .format_attrs = ibs_fetch_format_attrs,
468 487
469 .get_count = get_ibs_fetch_count, 488 .get_count = get_ibs_fetch_count,
470}; 489};
@@ -488,6 +507,7 @@ static struct perf_ibs perf_ibs_op = {
488 .max_period = IBS_OP_MAX_CNT << 4, 507 .max_period = IBS_OP_MAX_CNT << 4,
489 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK }, 508 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
490 .offset_max = MSR_AMD64_IBSOP_REG_COUNT, 509 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
510 .format_attrs = ibs_op_format_attrs,
491 511
492 .get_count = get_ibs_op_count, 512 .get_count = get_ibs_op_count,
493}; 513};
@@ -597,6 +617,17 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
597 617
598 perf_ibs->pcpu = pcpu; 618 perf_ibs->pcpu = pcpu;
599 619
620 /* register attributes */
621 if (perf_ibs->format_attrs[0]) {
622 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
623 perf_ibs->format_group.name = "format";
624 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
625
626 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
627 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
628 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
629 }
630
600 ret = perf_pmu_register(&perf_ibs->pmu, name, -1); 631 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
601 if (ret) { 632 if (ret) {
602 perf_ibs->pcpu = NULL; 633 perf_ibs->pcpu = NULL;
@@ -608,13 +639,19 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
608 639
609static __init int perf_event_ibs_init(void) 640static __init int perf_event_ibs_init(void)
610{ 641{
642 struct attribute **attr = ibs_op_format_attrs;
643
611 if (!ibs_caps) 644 if (!ibs_caps)
612 return -ENODEV; /* ibs not supported by the cpu */ 645 return -ENODEV; /* ibs not supported by the cpu */
613 646
614 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); 647 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
615 if (ibs_caps & IBS_CAPS_OPCNT) 648
649 if (ibs_caps & IBS_CAPS_OPCNT) {
616 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL; 650 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
651 *attr++ = &format_attr_cnt_ctl.attr;
652 }
617 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); 653 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
654
618 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); 655 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
619 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); 656 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
620 657
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6bca492b8547..324bb523d9d9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1906,6 +1906,8 @@ __init int intel_pmu_init(void)
1906 switch (boot_cpu_data.x86) { 1906 switch (boot_cpu_data.x86) {
1907 case 0x6: 1907 case 0x6:
1908 return p6_pmu_init(); 1908 return p6_pmu_init();
1909 case 0xb:
1910 return knc_pmu_init();
1909 case 0xf: 1911 case 0xf:
1910 return p4_pmu_init(); 1912 return p4_pmu_init();
1911 } 1913 }
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
new file mode 100644
index 000000000000..7c46bfdbc373
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -0,0 +1,248 @@
1/* Driver for Intel Xeon Phi "Knights Corner" PMU */
2
3#include <linux/perf_event.h>
4#include <linux/types.h>
5
6#include "perf_event.h"
7
8static const u64 knc_perfmon_event_map[] =
9{
10 [PERF_COUNT_HW_CPU_CYCLES] = 0x002a,
11 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0016,
12 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0028,
13 [PERF_COUNT_HW_CACHE_MISSES] = 0x0029,
14 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0012,
15 [PERF_COUNT_HW_BRANCH_MISSES] = 0x002b,
16};
17
18static __initconst u64 knc_hw_cache_event_ids
19 [PERF_COUNT_HW_CACHE_MAX]
20 [PERF_COUNT_HW_CACHE_OP_MAX]
21 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
22{
23 [ C(L1D) ] = {
24 [ C(OP_READ) ] = {
25 /* On Xeon Phi event "0" is a valid DATA_READ */
26 /* (L1 Data Cache Reads) Instruction. */
27 /* We code this as ARCH_PERFMON_EVENTSEL_INT as this */
28 /* bit will always be set in x86_pmu_hw_config(). */
29 [ C(RESULT_ACCESS) ] = ARCH_PERFMON_EVENTSEL_INT,
30 /* DATA_READ */
31 [ C(RESULT_MISS) ] = 0x0003, /* DATA_READ_MISS */
32 },
33 [ C(OP_WRITE) ] = {
34 [ C(RESULT_ACCESS) ] = 0x0001, /* DATA_WRITE */
35 [ C(RESULT_MISS) ] = 0x0004, /* DATA_WRITE_MISS */
36 },
37 [ C(OP_PREFETCH) ] = {
38 [ C(RESULT_ACCESS) ] = 0x0011, /* L1_DATA_PF1 */
39 [ C(RESULT_MISS) ] = 0x001c, /* L1_DATA_PF1_MISS */
40 },
41 },
42 [ C(L1I ) ] = {
43 [ C(OP_READ) ] = {
44 [ C(RESULT_ACCESS) ] = 0x000c, /* CODE_READ */
45 [ C(RESULT_MISS) ] = 0x000e, /* CODE_CACHE_MISS */
46 },
47 [ C(OP_WRITE) ] = {
48 [ C(RESULT_ACCESS) ] = -1,
49 [ C(RESULT_MISS) ] = -1,
50 },
51 [ C(OP_PREFETCH) ] = {
52 [ C(RESULT_ACCESS) ] = 0x0,
53 [ C(RESULT_MISS) ] = 0x0,
54 },
55 },
56 [ C(LL ) ] = {
57 [ C(OP_READ) ] = {
58 [ C(RESULT_ACCESS) ] = 0,
59 [ C(RESULT_MISS) ] = 0x10cb, /* L2_READ_MISS */
60 },
61 [ C(OP_WRITE) ] = {
62 [ C(RESULT_ACCESS) ] = 0x10cc, /* L2_WRITE_HIT */
63 [ C(RESULT_MISS) ] = 0,
64 },
65 [ C(OP_PREFETCH) ] = {
66 [ C(RESULT_ACCESS) ] = 0x10fc, /* L2_DATA_PF2 */
67 [ C(RESULT_MISS) ] = 0x10fe, /* L2_DATA_PF2_MISS */
68 },
69 },
70 [ C(DTLB) ] = {
71 [ C(OP_READ) ] = {
72 [ C(RESULT_ACCESS) ] = ARCH_PERFMON_EVENTSEL_INT,
73 /* DATA_READ */
74 /* see note on L1 OP_READ */
75 [ C(RESULT_MISS) ] = 0x0002, /* DATA_PAGE_WALK */
76 },
77 [ C(OP_WRITE) ] = {
78 [ C(RESULT_ACCESS) ] = 0x0001, /* DATA_WRITE */
79 [ C(RESULT_MISS) ] = 0x0002, /* DATA_PAGE_WALK */
80 },
81 [ C(OP_PREFETCH) ] = {
82 [ C(RESULT_ACCESS) ] = 0x0,
83 [ C(RESULT_MISS) ] = 0x0,
84 },
85 },
86 [ C(ITLB) ] = {
87 [ C(OP_READ) ] = {
88 [ C(RESULT_ACCESS) ] = 0x000c, /* CODE_READ */
89 [ C(RESULT_MISS) ] = 0x000d, /* CODE_PAGE_WALK */
90 },
91 [ C(OP_WRITE) ] = {
92 [ C(RESULT_ACCESS) ] = -1,
93 [ C(RESULT_MISS) ] = -1,
94 },
95 [ C(OP_PREFETCH) ] = {
96 [ C(RESULT_ACCESS) ] = -1,
97 [ C(RESULT_MISS) ] = -1,
98 },
99 },
100 [ C(BPU ) ] = {
101 [ C(OP_READ) ] = {
102 [ C(RESULT_ACCESS) ] = 0x0012, /* BRANCHES */
103 [ C(RESULT_MISS) ] = 0x002b, /* BRANCHES_MISPREDICTED */
104 },
105 [ C(OP_WRITE) ] = {
106 [ C(RESULT_ACCESS) ] = -1,
107 [ C(RESULT_MISS) ] = -1,
108 },
109 [ C(OP_PREFETCH) ] = {
110 [ C(RESULT_ACCESS) ] = -1,
111 [ C(RESULT_MISS) ] = -1,
112 },
113 },
114};
115
116
117static u64 knc_pmu_event_map(int hw_event)
118{
119 return knc_perfmon_event_map[hw_event];
120}
121
122static struct event_constraint knc_event_constraints[] =
123{
124 INTEL_EVENT_CONSTRAINT(0xc3, 0x1), /* HWP_L2HIT */
125 INTEL_EVENT_CONSTRAINT(0xc4, 0x1), /* HWP_L2MISS */
126 INTEL_EVENT_CONSTRAINT(0xc8, 0x1), /* L2_READ_HIT_E */
127 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* L2_READ_HIT_M */
128 INTEL_EVENT_CONSTRAINT(0xca, 0x1), /* L2_READ_HIT_S */
129 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* L2_READ_MISS */
130 INTEL_EVENT_CONSTRAINT(0xcc, 0x1), /* L2_WRITE_HIT */
131 INTEL_EVENT_CONSTRAINT(0xce, 0x1), /* L2_STRONGLY_ORDERED_STREAMING_VSTORES_MISS */
132 INTEL_EVENT_CONSTRAINT(0xcf, 0x1), /* L2_WEAKLY_ORDERED_STREAMING_VSTORE_MISS */
133 INTEL_EVENT_CONSTRAINT(0xd7, 0x1), /* L2_VICTIM_REQ_WITH_DATA */
134 INTEL_EVENT_CONSTRAINT(0xe3, 0x1), /* SNP_HITM_BUNIT */
135 INTEL_EVENT_CONSTRAINT(0xe6, 0x1), /* SNP_HIT_L2 */
136 INTEL_EVENT_CONSTRAINT(0xe7, 0x1), /* SNP_HITM_L2 */
137 INTEL_EVENT_CONSTRAINT(0xf1, 0x1), /* L2_DATA_READ_MISS_CACHE_FILL */
138 INTEL_EVENT_CONSTRAINT(0xf2, 0x1), /* L2_DATA_WRITE_MISS_CACHE_FILL */
139 INTEL_EVENT_CONSTRAINT(0xf6, 0x1), /* L2_DATA_READ_MISS_MEM_FILL */
140 INTEL_EVENT_CONSTRAINT(0xf7, 0x1), /* L2_DATA_WRITE_MISS_MEM_FILL */
141 INTEL_EVENT_CONSTRAINT(0xfc, 0x1), /* L2_DATA_PF2 */
142 INTEL_EVENT_CONSTRAINT(0xfd, 0x1), /* L2_DATA_PF2_DROP */
143 INTEL_EVENT_CONSTRAINT(0xfe, 0x1), /* L2_DATA_PF2_MISS */
144 INTEL_EVENT_CONSTRAINT(0xff, 0x1), /* L2_DATA_HIT_INFLIGHT_PF2 */
145 EVENT_CONSTRAINT_END
146};
147
148#define MSR_KNC_IA32_PERF_GLOBAL_STATUS 0x0000002d
149#define MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL 0x0000002e
150#define MSR_KNC_IA32_PERF_GLOBAL_CTRL 0x0000002f
151
152#define KNC_ENABLE_COUNTER0 0x00000001
153#define KNC_ENABLE_COUNTER1 0x00000002
154
155static void knc_pmu_disable_all(void)
156{
157 u64 val;
158
159 rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
160 val &= ~(KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1);
161 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
162}
163
164static void knc_pmu_enable_all(int added)
165{
166 u64 val;
167
168 rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
169 val |= (KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1);
170 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
171}
172
173static inline void
174knc_pmu_disable_event(struct perf_event *event)
175{
176 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
177 struct hw_perf_event *hwc = &event->hw;
178 u64 val;
179
180 val = hwc->config;
181 if (cpuc->enabled)
182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183
184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
185}
186
187static void knc_pmu_enable_event(struct perf_event *event)
188{
189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
190 struct hw_perf_event *hwc = &event->hw;
191 u64 val;
192
193 val = hwc->config;
194 if (cpuc->enabled)
195 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
196
197 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
198}
199
200PMU_FORMAT_ATTR(event, "config:0-7" );
201PMU_FORMAT_ATTR(umask, "config:8-15" );
202PMU_FORMAT_ATTR(edge, "config:18" );
203PMU_FORMAT_ATTR(inv, "config:23" );
204PMU_FORMAT_ATTR(cmask, "config:24-31" );
205
206static struct attribute *intel_knc_formats_attr[] = {
207 &format_attr_event.attr,
208 &format_attr_umask.attr,
209 &format_attr_edge.attr,
210 &format_attr_inv.attr,
211 &format_attr_cmask.attr,
212 NULL,
213};
214
215static __initconst struct x86_pmu knc_pmu = {
216 .name = "knc",
217 .handle_irq = x86_pmu_handle_irq,
218 .disable_all = knc_pmu_disable_all,
219 .enable_all = knc_pmu_enable_all,
220 .enable = knc_pmu_enable_event,
221 .disable = knc_pmu_disable_event,
222 .hw_config = x86_pmu_hw_config,
223 .schedule_events = x86_schedule_events,
224 .eventsel = MSR_KNC_EVNTSEL0,
225 .perfctr = MSR_KNC_PERFCTR0,
226 .event_map = knc_pmu_event_map,
227 .max_events = ARRAY_SIZE(knc_perfmon_event_map),
228 .apic = 1,
229 .max_period = (1ULL << 31) - 1,
230 .version = 0,
231 .num_counters = 2,
232 /* in theory 40 bits, early silicon is buggy though */
233 .cntval_bits = 32,
234 .cntval_mask = (1ULL << 32) - 1,
235 .get_event_constraints = x86_get_event_constraints,
236 .event_constraints = knc_event_constraints,
237 .format_attrs = intel_knc_formats_attr,
238};
239
240__init int knc_pmu_init(void)
241{
242 x86_pmu = knc_pmu;
243
244 memcpy(hw_cache_event_ids, knc_hw_cache_event_ids,
245 sizeof(hw_cache_event_ids));
246
247 return 0;
248}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 966512b2cacf..2e8caf03f593 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -56,6 +56,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
56 switch (boot_cpu_data.x86) { 56 switch (boot_cpu_data.x86) {
57 case 6: 57 case 6:
58 return msr - MSR_P6_PERFCTR0; 58 return msr - MSR_P6_PERFCTR0;
59 case 11:
60 return msr - MSR_KNC_PERFCTR0;
59 case 15: 61 case 15:
60 return msr - MSR_P4_BPU_PERFCTR0; 62 return msr - MSR_P4_BPU_PERFCTR0;
61 } 63 }
@@ -82,6 +84,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
82 switch (boot_cpu_data.x86) { 84 switch (boot_cpu_data.x86) {
83 case 6: 85 case 6:
84 return msr - MSR_P6_EVNTSEL0; 86 return msr - MSR_P6_EVNTSEL0;
87 case 11:
88 return msr - MSR_KNC_EVNTSEL0;
85 case 15: 89 case 15:
86 return msr - MSR_P4_BSU_ESCR0; 90 return msr - MSR_P4_BSU_ESCR0;
87 } 91 }