aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-08-06 11:26:58 -0400
committerIngo Molnar <mingo@kernel.org>2015-08-12 05:43:20 -0400
commit19b3340cf58d14decf2898fc795cc2b1fa49e79e (patch)
tree527fff56ba3f0c79f29ed0787bc5e61b3d5251d9
parent3d325bf0da91ca5d22f2525a72308dafd4fc0977 (diff)
perf/x86: Fix MSR PMU driver
Currently we only update the sysfs event files per available MSR, we didn't actually disallow creating unlisted events. Rework things such that the dectection, sysfs listing and event creation are better coordinated. Sadly it appears it's impossible to probe R/O MSRs under virt. This means we have to do the full model table to avoid listing all MSRs all the time. Tested-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Andy Lutomirski <luto@amacapital.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event_msr.c168
1 files changed, 84 insertions, 84 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index af216e9223e8..b0dd2e8a6d12 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -10,17 +10,63 @@ enum perf_msr_id {
10 PERF_MSR_EVENT_MAX, 10 PERF_MSR_EVENT_MAX,
11}; 11};
12 12
13bool test_aperfmperf(int idx)
14{
15 return boot_cpu_has(X86_FEATURE_APERFMPERF);
16}
17
18bool test_intel(int idx)
19{
20 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
21 boot_cpu_data.x86 != 6)
22 return false;
23
24 switch (boot_cpu_data.x86_model) {
25 case 30: /* 45nm Nehalem */
26 case 26: /* 45nm Nehalem-EP */
27 case 46: /* 45nm Nehalem-EX */
28
29 case 37: /* 32nm Westmere */
30 case 44: /* 32nm Westmere-EP */
31 case 47: /* 32nm Westmere-EX */
32
33 case 42: /* 32nm SandyBridge */
34 case 45: /* 32nm SandyBridge-E/EN/EP */
35
36 case 58: /* 22nm IvyBridge */
37 case 62: /* 22nm IvyBridge-EP/EX */
38
39 case 60: /* 22nm Haswell Core */
40 case 63: /* 22nm Haswell Server */
41 case 69: /* 22nm Haswell ULT */
42 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
43
44 case 61: /* 14nm Broadwell Core-M */
45 case 86: /* 14nm Broadwell Xeon D */
46 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
47 case 79: /* 14nm Broadwell Server */
48
49 case 55: /* 22nm Atom "Silvermont" */
50 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
51 case 76: /* 14nm Atom "Airmont" */
52 if (idx == PERF_MSR_SMI)
53 return true;
54 break;
55
56 case 78: /* 14nm Skylake Mobile */
57 case 94: /* 14nm Skylake Desktop */
58 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
59 return true;
60 break;
61 }
62
63 return false;
64}
65
13struct perf_msr { 66struct perf_msr {
14 int id;
15 u64 msr; 67 u64 msr;
16}; 68 struct perf_pmu_events_attr *attr;
17 69 bool (*test)(int idx);
18static struct perf_msr msr[] = {
19 { PERF_MSR_TSC, 0 },
20 { PERF_MSR_APERF, MSR_IA32_APERF },
21 { PERF_MSR_MPERF, MSR_IA32_MPERF },
22 { PERF_MSR_PPERF, MSR_PPERF },
23 { PERF_MSR_SMI, MSR_SMI_COUNT },
24}; 70};
25 71
26PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00"); 72PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
@@ -29,8 +75,16 @@ PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
29PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03"); 75PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
30PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04"); 76PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
31 77
78static struct perf_msr msr[] = {
79 [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
80 [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
81 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
82 [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
83 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
84};
85
32static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = { 86static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
33 &evattr_tsc.attr.attr, 87 NULL,
34}; 88};
35 89
36static struct attribute_group events_attr_group = { 90static struct attribute_group events_attr_group = {
@@ -74,6 +128,9 @@ static int msr_event_init(struct perf_event *event)
74 event->attr.sample_period) /* no sampling */ 128 event->attr.sample_period) /* no sampling */
75 return -EINVAL; 129 return -EINVAL;
76 130
131 if (!msr[cfg].attr)
132 return -EINVAL;
133
77 event->hw.idx = -1; 134 event->hw.idx = -1;
78 event->hw.event_base = msr[cfg].msr; 135 event->hw.event_base = msr[cfg].msr;
79 event->hw.config = cfg; 136 event->hw.config = cfg;
@@ -151,89 +208,32 @@ static struct pmu pmu_msr = {
151 .capabilities = PERF_PMU_CAP_NO_INTERRUPT, 208 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
152}; 209};
153 210
154static int __init intel_msr_init(int idx)
155{
156 if (boot_cpu_data.x86 != 6)
157 return 0;
158
159 switch (boot_cpu_data.x86_model) {
160 case 30: /* 45nm Nehalem */
161 case 26: /* 45nm Nehalem-EP */
162 case 46: /* 45nm Nehalem-EX */
163
164 case 37: /* 32nm Westmere */
165 case 44: /* 32nm Westmere-EP */
166 case 47: /* 32nm Westmere-EX */
167
168 case 42: /* 32nm SandyBridge */
169 case 45: /* 32nm SandyBridge-E/EN/EP */
170
171 case 58: /* 22nm IvyBridge */
172 case 62: /* 22nm IvyBridge-EP/EX */
173
174 case 60: /* 22nm Haswell Core */
175 case 63: /* 22nm Haswell Server */
176 case 69: /* 22nm Haswell ULT */
177 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
178
179 case 61: /* 14nm Broadwell Core-M */
180 case 86: /* 14nm Broadwell Xeon D */
181 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
182 case 79: /* 14nm Broadwell Server */
183 events_attrs[idx++] = &evattr_smi.attr.attr;
184 break;
185
186 case 78: /* 14nm Skylake Mobile */
187 case 94: /* 14nm Skylake Desktop */
188 events_attrs[idx++] = &evattr_pperf.attr.attr;
189 events_attrs[idx++] = &evattr_smi.attr.attr;
190 break;
191
192 case 55: /* 22nm Atom "Silvermont" */
193 case 76: /* 14nm Atom "Airmont" */
194 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
195 events_attrs[idx++] = &evattr_smi.attr.attr;
196 break;
197 }
198
199 events_attrs[idx] = NULL;
200
201 return 0;
202}
203
204static int __init amd_msr_init(int idx)
205{
206 return 0;
207}
208
209static int __init msr_init(void) 211static int __init msr_init(void)
210{ 212{
211 int err; 213 int i, j = 0;
212 int idx = 1;
213 214
214 if (boot_cpu_has(X86_FEATURE_APERFMPERF)) { 215 if (!boot_cpu_has(X86_FEATURE_TSC)) {
215 events_attrs[idx++] = &evattr_aperf.attr.attr; 216 pr_cont("no MSR PMU driver.\n");
216 events_attrs[idx++] = &evattr_mperf.attr.attr; 217 return 0;
217 events_attrs[idx] = NULL;
218 } 218 }
219 219
220 switch (boot_cpu_data.x86_vendor) { 220 /* Probe the MSRs. */
221 case X86_VENDOR_INTEL: 221 for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
222 err = intel_msr_init(idx); 222 u64 val;
223 break;
224
225 case X86_VENDOR_AMD:
226 err = amd_msr_init(idx);
227 break;
228 223
229 default: 224 /*
230 err = -ENOTSUPP; 225 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
226 */
227 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
228 msr[i].attr = NULL;
231 } 229 }
232 230
233 if (err != 0) { 231 /* List remaining MSRs in the sysfs attrs. */
234 pr_cont("no msr PMU driver.\n"); 232 for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
235 return 0; 233 if (msr[i].attr)
234 events_attrs[j++] = &msr[i].attr->attr.attr;
236 } 235 }
236 events_attrs[j] = NULL;
237 237
238 perf_pmu_register(&pmu_msr, "msr", -1); 238 perf_pmu_register(&pmu_msr, "msr", -1);
239 239