aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2012-06-20 14:46:34 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-05 15:19:40 -0400
commita1eac7ac903ea9afbd4f133659710a0588c8eca5 (patch)
tree37e4be441c02d01815d8b9a309d466abe937a8b9 /arch/x86/kernel
parent15c7ad51ad58cbd3b46112c1840bc7228bd354bf (diff)
perf/x86: Move Intel specific code to intel_pmu_init()
There is some Intel specific code in the generic x86 path. Move it to intel_pmu_init(). Since p4 and p6 pmus don't have fixed counters we may skip the check in case such a pmu is detected. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1340217996-2254-3-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c34
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c33
2 files changed, 35 insertions, 32 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 66805000260..7b4f1e871f7 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1307,7 +1307,6 @@ static struct attribute_group x86_pmu_format_group = {
1307static int __init init_hw_perf_events(void) 1307static int __init init_hw_perf_events(void)
1308{ 1308{
1309 struct x86_pmu_quirk *quirk; 1309 struct x86_pmu_quirk *quirk;
1310 struct event_constraint *c;
1311 int err; 1310 int err;
1312 1311
1313 pr_info("Performance Events: "); 1312 pr_info("Performance Events: ");
@@ -1338,21 +1337,8 @@ static int __init init_hw_perf_events(void)
1338 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1337 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1339 quirk->func(); 1338 quirk->func();
1340 1339
1341 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { 1340 if (!x86_pmu.intel_ctrl)
1342 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 1341 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1343 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
1344 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
1345 }
1346 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1347
1348 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
1349 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1350 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
1351 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
1352 }
1353
1354 x86_pmu.intel_ctrl |=
1355 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
1356 1342
1357 perf_events_lapic_init(); 1343 perf_events_lapic_init();
1358 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); 1344 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1361,22 +1347,6 @@ static int __init init_hw_perf_events(void)
1361 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1347 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1362 0, x86_pmu.num_counters, 0); 1348 0, x86_pmu.num_counters, 0);
1363 1349
1364 if (x86_pmu.event_constraints) {
1365 /*
1366 * event on fixed counter2 (REF_CYCLES) only works on this
1367 * counter, so do not extend mask to generic counters
1368 */
1369 for_each_event_constraint(c, x86_pmu.event_constraints) {
1370 if (c->cmask != X86_RAW_EVENT_MASK
1371 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
1372 continue;
1373 }
1374
1375 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1376 c->weight += x86_pmu.num_counters;
1377 }
1378 }
1379
1380 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1350 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1381 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1351 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1382 1352
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 5b0b362c7ae..2e9444c8014 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1765,6 +1765,7 @@ __init int intel_pmu_init(void)
1765 union cpuid10_edx edx; 1765 union cpuid10_edx edx;
1766 union cpuid10_eax eax; 1766 union cpuid10_eax eax;
1767 union cpuid10_ebx ebx; 1767 union cpuid10_ebx ebx;
1768 struct event_constraint *c;
1768 unsigned int unused; 1769 unsigned int unused;
1769 int version; 1770 int version;
1770 1771
@@ -1953,5 +1954,37 @@ __init int intel_pmu_init(void)
1953 } 1954 }
1954 } 1955 }
1955 1956
1957 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
1958 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1959 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
1960 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
1961 }
1962 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1963
1964 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
1965 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1966 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
1967 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
1968 }
1969
1970 x86_pmu.intel_ctrl |=
1971 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
1972
1973 if (x86_pmu.event_constraints) {
1974 /*
1975 * event on fixed counter2 (REF_CYCLES) only works on this
1976 * counter, so do not extend mask to generic counters
1977 */
1978 for_each_event_constraint(c, x86_pmu.event_constraints) {
1979 if (c->cmask != X86_RAW_EVENT_MASK
1980 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
1981 continue;
1982 }
1983
1984 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1985 c->weight += x86_pmu.num_counters;
1986 }
1987 }
1988
1956 return 0; 1989 return 0;
1957} 1990}