aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c10
1 files changed, 1 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 22eb3a1d4f9c..a310d19faca3 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -969,13 +969,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
969 if (!x86_pmu.num_counters_fixed) 969 if (!x86_pmu.num_counters_fixed)
970 return -1; 970 return -1;
971 971
972 /*
973 * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
974 */
975 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
976 boot_cpu_data.x86_model == 28)
977 return -1;
978
979 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 972 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
980 973
981 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 974 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1428,8 +1421,6 @@ static int intel_pmu_init(void)
1428 */ 1421 */
1429 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1422 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1430 1423
1431 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1432
1433 /* 1424 /*
1434 * Install the hw-cache-events table: 1425 * Install the hw-cache-events table:
1435 */ 1426 */
@@ -1514,6 +1505,7 @@ void __init init_hw_perf_counters(void)
1514 1505
1515 perf_counter_mask |= 1506 perf_counter_mask |=
1516 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1507 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1508 x86_pmu.intel_ctrl = perf_counter_mask;
1517 1509
1518 perf_counters_lapic_init(); 1510 perf_counters_lapic_init();
1519 register_die_notifier(&perf_counter_nmi_notifier); 1511 register_die_notifier(&perf_counter_nmi_notifier);