aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-03-20 14:59:03 -0400
committerIngo Molnar <mingo@kernel.org>2016-03-31 04:30:37 -0400
commit424646eeadab64da959f960928804e5289417819 (patch)
tree806b355b80a2d4be276c8d0e612c91360ed3bf27
parent49de0493e5f67a8023fa6fa5c89097c1f77de74e (diff)
x86/perf/intel/cstate: Sanitize probing
The whole probing functionality can simply be expressed with model matching and a bunch of structures describing the variants. This is a first step to make that driver modular. While at it, get rid of completely pointless comments and name the enums so they are self explaining. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [ Reworked probing to clear msr[].attr for all !present msrs. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@suse.de> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Link: http://lkml.kernel.org/r/20160320185623.500381872@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/cstate.c359
1 files changed, 160 insertions, 199 deletions
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 5c2f55fe142a..1aac40f1e4fe 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -106,22 +106,27 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev,
106 struct device_attribute *attr, 106 struct device_attribute *attr,
107 char *buf); 107 char *buf);
108 108
109/* Model -> events mapping */
110struct cstate_model {
111 unsigned long core_events;
112 unsigned long pkg_events;
113 unsigned long quirks;
114};
115
116/* Quirk flags */
117#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
118
109struct perf_cstate_msr { 119struct perf_cstate_msr {
110 u64 msr; 120 u64 msr;
111 struct perf_pmu_events_attr *attr; 121 struct perf_pmu_events_attr *attr;
112 bool (*test)(int idx);
113}; 122};
114 123
115 124
116/* cstate_core PMU */ 125/* cstate_core PMU */
117
118static struct pmu cstate_core_pmu; 126static struct pmu cstate_core_pmu;
119static bool has_cstate_core; 127static bool has_cstate_core;
120 128
121enum perf_cstate_core_id { 129enum perf_cstate_core_events {
122 /*
123 * cstate_core events
124 */
125 PERF_CSTATE_CORE_C1_RES = 0, 130 PERF_CSTATE_CORE_C1_RES = 0,
126 PERF_CSTATE_CORE_C3_RES, 131 PERF_CSTATE_CORE_C3_RES,
127 PERF_CSTATE_CORE_C6_RES, 132 PERF_CSTATE_CORE_C6_RES,
@@ -130,69 +135,16 @@ enum perf_cstate_core_id {
130 PERF_CSTATE_CORE_EVENT_MAX, 135 PERF_CSTATE_CORE_EVENT_MAX,
131}; 136};
132 137
133bool test_core(int idx)
134{
135 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
136 boot_cpu_data.x86 != 6)
137 return false;
138
139 switch (boot_cpu_data.x86_model) {
140 case 30: /* 45nm Nehalem */
141 case 26: /* 45nm Nehalem-EP */
142 case 46: /* 45nm Nehalem-EX */
143
144 case 37: /* 32nm Westmere */
145 case 44: /* 32nm Westmere-EP */
146 case 47: /* 32nm Westmere-EX */
147 if (idx == PERF_CSTATE_CORE_C3_RES ||
148 idx == PERF_CSTATE_CORE_C6_RES)
149 return true;
150 break;
151 case 42: /* 32nm SandyBridge */
152 case 45: /* 32nm SandyBridge-E/EN/EP */
153
154 case 58: /* 22nm IvyBridge */
155 case 62: /* 22nm IvyBridge-EP/EX */
156
157 case 60: /* 22nm Haswell Core */
158 case 63: /* 22nm Haswell Server */
159 case 69: /* 22nm Haswell ULT */
160 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
161
162 case 61: /* 14nm Broadwell Core-M */
163 case 86: /* 14nm Broadwell Xeon D */
164 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
165 case 79: /* 14nm Broadwell Server */
166
167 case 78: /* 14nm Skylake Mobile */
168 case 94: /* 14nm Skylake Desktop */
169 if (idx == PERF_CSTATE_CORE_C3_RES ||
170 idx == PERF_CSTATE_CORE_C6_RES ||
171 idx == PERF_CSTATE_CORE_C7_RES)
172 return true;
173 break;
174 case 55: /* 22nm Atom "Silvermont" */
175 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
176 case 76: /* 14nm Atom "Airmont" */
177 if (idx == PERF_CSTATE_CORE_C1_RES ||
178 idx == PERF_CSTATE_CORE_C6_RES)
179 return true;
180 break;
181 }
182
183 return false;
184}
185
186PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00"); 138PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
187PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01"); 139PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
188PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02"); 140PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
189PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03"); 141PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
190 142
191static struct perf_cstate_msr core_msr[] = { 143static struct perf_cstate_msr core_msr[] = {
192 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1, test_core, }, 144 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1 },
193 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3, test_core, }, 145 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3 },
194 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6, test_core, }, 146 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6 },
195 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7, test_core, }, 147 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7 },
196}; 148};
197 149
198static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = { 150static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
@@ -234,18 +186,11 @@ static const struct attribute_group *core_attr_groups[] = {
234 NULL, 186 NULL,
235}; 187};
236 188
237/* cstate_core PMU end */
238
239
240/* cstate_pkg PMU */ 189/* cstate_pkg PMU */
241
242static struct pmu cstate_pkg_pmu; 190static struct pmu cstate_pkg_pmu;
243static bool has_cstate_pkg; 191static bool has_cstate_pkg;
244 192
245enum perf_cstate_pkg_id { 193enum perf_cstate_pkg_events {
246 /*
247 * cstate_pkg events
248 */
249 PERF_CSTATE_PKG_C2_RES = 0, 194 PERF_CSTATE_PKG_C2_RES = 0,
250 PERF_CSTATE_PKG_C3_RES, 195 PERF_CSTATE_PKG_C3_RES,
251 PERF_CSTATE_PKG_C6_RES, 196 PERF_CSTATE_PKG_C6_RES,
@@ -257,69 +202,6 @@ enum perf_cstate_pkg_id {
257 PERF_CSTATE_PKG_EVENT_MAX, 202 PERF_CSTATE_PKG_EVENT_MAX,
258}; 203};
259 204
260bool test_pkg(int idx)
261{
262 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
263 boot_cpu_data.x86 != 6)
264 return false;
265
266 switch (boot_cpu_data.x86_model) {
267 case 30: /* 45nm Nehalem */
268 case 26: /* 45nm Nehalem-EP */
269 case 46: /* 45nm Nehalem-EX */
270
271 case 37: /* 32nm Westmere */
272 case 44: /* 32nm Westmere-EP */
273 case 47: /* 32nm Westmere-EX */
274 if (idx == PERF_CSTATE_CORE_C3_RES ||
275 idx == PERF_CSTATE_CORE_C6_RES ||
276 idx == PERF_CSTATE_CORE_C7_RES)
277 return true;
278 break;
279 case 42: /* 32nm SandyBridge */
280 case 45: /* 32nm SandyBridge-E/EN/EP */
281
282 case 58: /* 22nm IvyBridge */
283 case 62: /* 22nm IvyBridge-EP/EX */
284
285 case 60: /* 22nm Haswell Core */
286 case 63: /* 22nm Haswell Server */
287 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
288
289 case 61: /* 14nm Broadwell Core-M */
290 case 86: /* 14nm Broadwell Xeon D */
291 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
292 case 79: /* 14nm Broadwell Server */
293
294 case 78: /* 14nm Skylake Mobile */
295 case 94: /* 14nm Skylake Desktop */
296 if (idx == PERF_CSTATE_PKG_C2_RES ||
297 idx == PERF_CSTATE_PKG_C3_RES ||
298 idx == PERF_CSTATE_PKG_C6_RES ||
299 idx == PERF_CSTATE_PKG_C7_RES)
300 return true;
301 break;
302 case 55: /* 22nm Atom "Silvermont" */
303 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
304 case 76: /* 14nm Atom "Airmont" */
305 if (idx == PERF_CSTATE_CORE_C6_RES)
306 return true;
307 break;
308 case 69: /* 22nm Haswell ULT */
309 if (idx == PERF_CSTATE_PKG_C2_RES ||
310 idx == PERF_CSTATE_PKG_C3_RES ||
311 idx == PERF_CSTATE_PKG_C6_RES ||
312 idx == PERF_CSTATE_PKG_C7_RES ||
313 idx == PERF_CSTATE_PKG_C8_RES ||
314 idx == PERF_CSTATE_PKG_C9_RES ||
315 idx == PERF_CSTATE_PKG_C10_RES)
316 return true;
317 break;
318 }
319
320 return false;
321}
322
323PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00"); 205PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
324PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01"); 206PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
325PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02"); 207PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
@@ -329,13 +211,13 @@ PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
329PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06"); 211PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
330 212
331static struct perf_cstate_msr pkg_msr[] = { 213static struct perf_cstate_msr pkg_msr[] = {
332 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2, test_pkg, }, 214 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2 },
333 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3, test_pkg, }, 215 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3 },
334 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6, test_pkg, }, 216 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6 },
335 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7, test_pkg, }, 217 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7 },
336 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8, test_pkg, }, 218 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8 },
337 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9, test_pkg, }, 219 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9 },
338 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10, test_pkg, }, 220 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10 },
339}; 221};
340 222
341static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = { 223static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
@@ -366,8 +248,6 @@ static const struct attribute_group *pkg_attr_groups[] = {
366 NULL, 248 NULL,
367}; 249};
368 250
369/* cstate_pkg PMU end*/
370
371static ssize_t cstate_get_attr_cpumask(struct device *dev, 251static ssize_t cstate_get_attr_cpumask(struct device *dev,
372 struct device_attribute *attr, 252 struct device_attribute *attr,
373 char *buf) 253 char *buf)
@@ -552,48 +432,151 @@ static int cstate_cpu_notifier(struct notifier_block *self,
552 return NOTIFY_OK; 432 return NOTIFY_OK;
553} 433}
554 434
435static struct pmu cstate_core_pmu = {
436 .attr_groups = core_attr_groups,
437 .name = "cstate_core",
438 .task_ctx_nr = perf_invalid_context,
439 .event_init = cstate_pmu_event_init,
440 .add = cstate_pmu_event_add,
441 .del = cstate_pmu_event_del,
442 .start = cstate_pmu_event_start,
443 .stop = cstate_pmu_event_stop,
444 .read = cstate_pmu_event_update,
445 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
446};
447
448static struct pmu cstate_pkg_pmu = {
449 .attr_groups = pkg_attr_groups,
450 .name = "cstate_pkg",
451 .task_ctx_nr = perf_invalid_context,
452 .event_init = cstate_pmu_event_init,
453 .add = cstate_pmu_event_add,
454 .del = cstate_pmu_event_del,
455 .start = cstate_pmu_event_start,
456 .stop = cstate_pmu_event_stop,
457 .read = cstate_pmu_event_update,
458 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
459};
460
461static const struct cstate_model nhm_cstates __initconst = {
462 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
463 BIT(PERF_CSTATE_CORE_C6_RES),
464
465 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) |
466 BIT(PERF_CSTATE_PKG_C6_RES) |
467 BIT(PERF_CSTATE_PKG_C7_RES),
468};
469
470static const struct cstate_model snb_cstates __initconst = {
471 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
472 BIT(PERF_CSTATE_CORE_C6_RES) |
473 BIT(PERF_CSTATE_CORE_C7_RES),
474
475 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
476 BIT(PERF_CSTATE_PKG_C3_RES) |
477 BIT(PERF_CSTATE_PKG_C6_RES) |
478 BIT(PERF_CSTATE_PKG_C7_RES),
479};
480
481static const struct cstate_model hswult_cstates __initconst = {
482 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
483 BIT(PERF_CSTATE_CORE_C6_RES) |
484 BIT(PERF_CSTATE_CORE_C7_RES),
485
486 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
487 BIT(PERF_CSTATE_PKG_C3_RES) |
488 BIT(PERF_CSTATE_PKG_C6_RES) |
489 BIT(PERF_CSTATE_PKG_C7_RES) |
490 BIT(PERF_CSTATE_PKG_C8_RES) |
491 BIT(PERF_CSTATE_PKG_C9_RES) |
492 BIT(PERF_CSTATE_PKG_C10_RES),
493};
494
495static const struct cstate_model slm_cstates __initconst = {
496 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
497 BIT(PERF_CSTATE_CORE_C6_RES),
498
499 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
500 .quirks = SLM_PKG_C6_USE_C7_MSR,
501};
502
503#define X86_CSTATES_MODEL(model, states) \
504 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
505
506static const struct x86_cpu_id intel_cstates_match[] __initconst = {
507 X86_CSTATES_MODEL(30, nhm_cstates), /* 45nm Nehalem */
508 X86_CSTATES_MODEL(26, nhm_cstates), /* 45nm Nehalem-EP */
509 X86_CSTATES_MODEL(46, nhm_cstates), /* 45nm Nehalem-EX */
510
511 X86_CSTATES_MODEL(37, nhm_cstates), /* 32nm Westmere */
512 X86_CSTATES_MODEL(44, nhm_cstates), /* 32nm Westmere-EP */
513 X86_CSTATES_MODEL(47, nhm_cstates), /* 32nm Westmere-EX */
514
515 X86_CSTATES_MODEL(42, snb_cstates), /* 32nm SandyBridge */
516 X86_CSTATES_MODEL(45, snb_cstates), /* 32nm SandyBridge-E/EN/EP */
517
518 X86_CSTATES_MODEL(58, snb_cstates), /* 22nm IvyBridge */
519 X86_CSTATES_MODEL(62, snb_cstates), /* 22nm IvyBridge-EP/EX */
520
521 X86_CSTATES_MODEL(60, snb_cstates), /* 22nm Haswell Core */
522 X86_CSTATES_MODEL(63, snb_cstates), /* 22nm Haswell Server */
523 X86_CSTATES_MODEL(70, snb_cstates), /* 22nm Haswell + GT3e */
524
525 X86_CSTATES_MODEL(69, hswult_cstates), /* 22nm Haswell ULT */
526
527 X86_CSTATES_MODEL(55, slm_cstates), /* 22nm Atom Silvermont */
528 X86_CSTATES_MODEL(77, slm_cstates), /* 22nm Atom Avoton/Rangely */
529 X86_CSTATES_MODEL(76, slm_cstates), /* 22nm Atom Airmont */
530
531 X86_CSTATES_MODEL(61, snb_cstates), /* 14nm Broadwell Core-M */
532 X86_CSTATES_MODEL(86, snb_cstates), /* 14nm Broadwell Xeon D */
533 X86_CSTATES_MODEL(71, snb_cstates), /* 14nm Broadwell + GT3e */
534 X86_CSTATES_MODEL(79, snb_cstates), /* 14nm Broadwell Server */
535
536 X86_CSTATES_MODEL(78, snb_cstates), /* 14nm Skylake Mobile */
537 X86_CSTATES_MODEL(94, snb_cstates), /* 14nm Skylake Desktop */
538 { },
539};
540MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
541
555/* 542/*
556 * Probe the cstate events and insert the available one into sysfs attrs 543 * Probe the cstate events and insert the available one into sysfs attrs
557 * Return false if there is no available events. 544 * Return false if there are no available events.
558 */ 545 */
559static bool cstate_probe_msr(struct perf_cstate_msr *msr, 546static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
560 struct attribute **events_attrs, 547 struct perf_cstate_msr *msr,
561 int max_event_nr) 548 struct attribute **attrs)
562{ 549{
563 int i, j = 0; 550 bool found = false;
551 unsigned int bit;
564 u64 val; 552 u64 val;
565 553
566 /* Probe the cstate events. */ 554 for (bit = 0; bit < max; bit++) {
567 for (i = 0; i < max_event_nr; i++) { 555 if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
568 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) 556 *attrs++ = &msr[bit].attr->attr.attr;
569 msr[i].attr = NULL; 557 found = true;
570 } 558 } else {
571 559 msr[bit].attr = NULL;
572 /* List remaining events in the sysfs attrs. */ 560 }
573 for (i = 0; i < max_event_nr; i++) {
574 if (msr[i].attr)
575 events_attrs[j++] = &msr[i].attr->attr.attr;
576 } 561 }
577 events_attrs[j] = NULL; 562 *attrs = NULL;
578 563
579 return (j > 0) ? true : false; 564 return found;
580} 565}
581 566
582static int __init cstate_init(void) 567static int __init cstate_probe(const struct cstate_model *cm)
583{ 568{
584 /* SLM has different MSR for PKG C6 */ 569 /* SLM has different MSR for PKG C6 */
585 switch (boot_cpu_data.x86_model) { 570 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
586 case 55:
587 case 76:
588 case 77:
589 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 571 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
590 }
591 572
592 if (cstate_probe_msr(core_msr, core_events_attrs, PERF_CSTATE_CORE_EVENT_MAX)) 573 has_cstate_core = cstate_probe_msr(cm->core_events,
593 has_cstate_core = true; 574 PERF_CSTATE_CORE_EVENT_MAX,
575 core_msr, core_events_attrs);
594 576
595 if (cstate_probe_msr(pkg_msr, pkg_events_attrs, PERF_CSTATE_PKG_EVENT_MAX)) 577 has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
596 has_cstate_pkg = true; 578 PERF_CSTATE_PKG_EVENT_MAX,
579 pkg_msr, pkg_events_attrs);
597 580
598 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; 581 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
599} 582}
@@ -612,32 +595,6 @@ static void __init cstate_cpumask_init(void)
612 cpu_notifier_register_done(); 595 cpu_notifier_register_done();
613} 596}
614 597
615static struct pmu cstate_core_pmu = {
616 .attr_groups = core_attr_groups,
617 .name = "cstate_core",
618 .task_ctx_nr = perf_invalid_context,
619 .event_init = cstate_pmu_event_init,
620 .add = cstate_pmu_event_add, /* must have */
621 .del = cstate_pmu_event_del, /* must have */
622 .start = cstate_pmu_event_start,
623 .stop = cstate_pmu_event_stop,
624 .read = cstate_pmu_event_update,
625 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
626};
627
628static struct pmu cstate_pkg_pmu = {
629 .attr_groups = pkg_attr_groups,
630 .name = "cstate_pkg",
631 .task_ctx_nr = perf_invalid_context,
632 .event_init = cstate_pmu_event_init,
633 .add = cstate_pmu_event_add, /* must have */
634 .del = cstate_pmu_event_del, /* must have */
635 .start = cstate_pmu_event_start,
636 .stop = cstate_pmu_event_stop,
637 .read = cstate_pmu_event_update,
638 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
639};
640
641static void __init cstate_pmus_register(void) 598static void __init cstate_pmus_register(void)
642{ 599{
643 int err; 600 int err;
@@ -659,12 +616,17 @@ static void __init cstate_pmus_register(void)
659 616
660static int __init cstate_pmu_init(void) 617static int __init cstate_pmu_init(void)
661{ 618{
619 const struct x86_cpu_id *id;
662 int err; 620 int err;
663 621
664 if (cpu_has_hypervisor) 622 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
665 return -ENODEV; 623 return -ENODEV;
666 624
667 err = cstate_init(); 625 id = x86_match_cpu(intel_cstates_match);
626 if (!id)
627 return -ENODEV;
628
629 err = cstate_probe((const struct cstate_model *) id->driver_data);
668 if (err) 630 if (err)
669 return err; 631 return err;
670 632
@@ -674,5 +636,4 @@ static int __init cstate_pmu_init(void)
674 636
675 return 0; 637 return 0;
676} 638}
677
678device_initcall(cstate_pmu_init); 639device_initcall(cstate_pmu_init);