aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYong Wang <yong.y.wang@linux.intel.com>2009-06-23 22:13:24 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-24 04:51:24 -0400
commitc14dab5c0782ef632742963a66276a195418a63c (patch)
tree441e3da9c7346f010b129a8826d21323b7d41d0e /arch
parentf7679dabfaf69840b000d238a020cee7157aca17 (diff)
perf_counter, x86: Set global control MSR correctly
Previous code made an assumption that the power on value of global control MSR has enabled all fixed and general purpose counters properly. However, this is not the case for certain Intel processors, such as Atom - and it might also be firmware dependent. Each enable bit in IA32_PERF_GLOBAL_CTRL is AND'ed with the enable bits for all privilege levels in the respective IA32_PERFEVTSELx or IA32_PERF_FIXED_CTR_CTRL MSRs to start/stop the counting of respective counters. Counting is enabled if the AND'ed results is true; counting is disabled when the result is false. The end result is that all fixed counters are always disabled on Atom processors because the assumption is just invalid. Fix this by not initializing the ctrl-mask out of the global MSR, but setting it to perf_counter_mask. Reported-by: Stephane Eranian <eranian@googlemail.com> Signed-off-by: Yong Wang <yong.y.wang@intel.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <20090624021324.GA2788@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c10
1 files changed, 1 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 22eb3a1d4f9c..a310d19faca3 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -969,13 +969,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
969 if (!x86_pmu.num_counters_fixed) 969 if (!x86_pmu.num_counters_fixed)
970 return -1; 970 return -1;
971 971
972 /*
973 * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
974 */
975 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
976 boot_cpu_data.x86_model == 28)
977 return -1;
978
979 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 972 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
980 973
981 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 974 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1428,8 +1421,6 @@ static int intel_pmu_init(void)
1428 */ 1421 */
1429 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1422 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1430 1423
1431 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1432
1433 /* 1424 /*
1434 * Install the hw-cache-events table: 1425 * Install the hw-cache-events table:
1435 */ 1426 */
@@ -1514,6 +1505,7 @@ void __init init_hw_perf_counters(void)
1514 1505
1515 perf_counter_mask |= 1506 perf_counter_mask |=
1516 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1507 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1508 x86_pmu.intel_ctrl = perf_counter_mask;
1517 1509
1518 perf_counters_lapic_init(); 1510 perf_counters_lapic_init();
1519 register_die_notifier(&perf_counter_nmi_notifier); 1511 register_die_notifier(&perf_counter_nmi_notifier);