aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-18 03:59:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:14 -0400
commit7bb497bd885eedd0f56dfe3cc1b5ff20710d33b9 (patch)
tree4f743d26a10f0a978ad2cfd0ef23c936c5ea0034
parent4e193bd4dfdc983d12969b51439b4a1fbaf2daad (diff)
perf_counter: fix crash on perfmon v1 systems
Impact: fix boot crash on Intel Perfmon Version 1 systems Intel Perfmon v1 does not support the global MSRs, nor does it offer the generalized MSR ranges. So support v2 and later CPUs only. Also mark pmc_ops as read-mostly - to avoid false cacheline sharing. Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 22dab06c08a4..6cba9d47b711 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -57,12 +57,14 @@ struct pmc_x86_ops {
57 int max_events; 57 int max_events;
58}; 58};
59 59
60static struct pmc_x86_ops *pmc_ops; 60static struct pmc_x86_ops *pmc_ops __read_mostly;
61 61
62static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { 62static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
63 .enabled = 1, 63 .enabled = 1,
64}; 64};
65 65
66static __read_mostly int intel_perfmon_version;
67
66/* 68/*
67 * Intel PerfMon v3. Used on Core2 and later. 69 * Intel PerfMon v3. Used on Core2 and later.
68 */ 70 */
@@ -613,7 +615,7 @@ void perf_counter_print_debug(void)
613 cpu = smp_processor_id(); 615 cpu = smp_processor_id();
614 cpuc = &per_cpu(cpu_hw_counters, cpu); 616 cpuc = &per_cpu(cpu_hw_counters, cpu);
615 617
616 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 618 if (intel_perfmon_version >= 2) {
617 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 619 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
618 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 620 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
619 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 621 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
@@ -930,10 +932,10 @@ static struct pmc_x86_ops pmc_amd_ops = {
930 932
931static struct pmc_x86_ops *pmc_intel_init(void) 933static struct pmc_x86_ops *pmc_intel_init(void)
932{ 934{
935 union cpuid10_edx edx;
933 union cpuid10_eax eax; 936 union cpuid10_eax eax;
934 unsigned int ebx;
935 unsigned int unused; 937 unsigned int unused;
936 union cpuid10_edx edx; 938 unsigned int ebx;
937 939
938 /* 940 /*
939 * Check whether the Architectural PerfMon supports 941 * Check whether the Architectural PerfMon supports
@@ -943,8 +945,12 @@ static struct pmc_x86_ops *pmc_intel_init(void)
943 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 945 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
944 return NULL; 946 return NULL;
945 947
948 intel_perfmon_version = eax.split.version_id;
949 if (intel_perfmon_version < 2)
950 return NULL;
951
946 pr_info("Intel Performance Monitoring support detected.\n"); 952 pr_info("Intel Performance Monitoring support detected.\n");
947 pr_info("... version: %d\n", eax.split.version_id); 953 pr_info("... version: %d\n", intel_perfmon_version);
948 pr_info("... bit width: %d\n", eax.split.bit_width); 954 pr_info("... bit width: %d\n", eax.split.bit_width);
949 pr_info("... mask length: %d\n", eax.split.mask_length); 955 pr_info("... mask length: %d\n", eax.split.mask_length);
950 956