aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:13 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:08 -0400
commitfaa28ae018ed004a22aa4a7704e04ccdde4a941e (patch)
tree0cfe5d6a13fcc52acc65913304431986cef7dcfd /arch
parent0933e5c6a680ba8d8d786a6f7fa377b7ec0d1e49 (diff)
perf_counter, x86: make pmu version generic
This makes the use of the version variable generic. Also, some debug messages have been generalized. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-17-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 75dbb1f0900e..15d2c03e16f1 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -39,6 +39,8 @@ struct cpu_hw_counters {
39 * struct x86_pmu - generic x86 pmu 39 * struct x86_pmu - generic x86 pmu
40 */ 40 */
41struct x86_pmu { 41struct x86_pmu {
42 const char *name;
43 int version;
42 int (*handle_irq)(struct pt_regs *, int); 44 int (*handle_irq)(struct pt_regs *, int);
43 u64 (*save_disable_all)(void); 45 u64 (*save_disable_all)(void);
44 void (*restore_all)(u64); 46 void (*restore_all)(u64);
@@ -61,8 +63,6 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
61 .enabled = 1, 63 .enabled = 1,
62}; 64};
63 65
64static __read_mostly int intel_perfmon_version;
65
66/* 66/*
67 * Intel PerfMon v3. Used on Core2 and later. 67 * Intel PerfMon v3. Used on Core2 and later.
68 */ 68 */
@@ -658,7 +658,7 @@ void perf_counter_print_debug(void)
658 cpu = smp_processor_id(); 658 cpu = smp_processor_id();
659 cpuc = &per_cpu(cpu_hw_counters, cpu); 659 cpuc = &per_cpu(cpu_hw_counters, cpu);
660 660
661 if (intel_perfmon_version >= 2) { 661 if (x86_pmu.version >= 2) {
662 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 662 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
663 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 663 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
664 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 664 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
@@ -884,6 +884,7 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
884}; 884};
885 885
886static struct x86_pmu intel_pmu = { 886static struct x86_pmu intel_pmu = {
887 .name = "Intel",
887 .handle_irq = intel_pmu_handle_irq, 888 .handle_irq = intel_pmu_handle_irq,
888 .save_disable_all = intel_pmu_save_disable_all, 889 .save_disable_all = intel_pmu_save_disable_all,
889 .restore_all = intel_pmu_restore_all, 890 .restore_all = intel_pmu_restore_all,
@@ -897,6 +898,7 @@ static struct x86_pmu intel_pmu = {
897}; 898};
898 899
899static struct x86_pmu amd_pmu = { 900static struct x86_pmu amd_pmu = {
901 .name = "AMD",
900 .handle_irq = amd_pmu_handle_irq, 902 .handle_irq = amd_pmu_handle_irq,
901 .save_disable_all = amd_pmu_save_disable_all, 903 .save_disable_all = amd_pmu_save_disable_all,
902 .restore_all = amd_pmu_restore_all, 904 .restore_all = amd_pmu_restore_all,
@@ -918,6 +920,7 @@ static int intel_pmu_init(void)
918 union cpuid10_eax eax; 920 union cpuid10_eax eax;
919 unsigned int unused; 921 unsigned int unused;
920 unsigned int ebx; 922 unsigned int ebx;
923 int version;
921 924
922 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 925 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
923 return -ENODEV; 926 return -ENODEV;
@@ -930,16 +933,12 @@ static int intel_pmu_init(void)
930 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 933 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
931 return -ENODEV; 934 return -ENODEV;
932 935
933 intel_perfmon_version = eax.split.version_id; 936 version = eax.split.version_id;
934 if (intel_perfmon_version < 2) 937 if (version < 2)
935 return -ENODEV; 938 return -ENODEV;
936 939
937 pr_info("Intel Performance Monitoring support detected.\n");
938 pr_info("... version: %d\n", intel_perfmon_version);
939 pr_info("... bit width: %d\n", eax.split.bit_width);
940 pr_info("... mask length: %d\n", eax.split.mask_length);
941
942 x86_pmu = intel_pmu; 940 x86_pmu = intel_pmu;
941 x86_pmu.version = version;
943 x86_pmu.num_counters = eax.split.num_counters; 942 x86_pmu.num_counters = eax.split.num_counters;
944 x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; 943 x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
945 x86_pmu.counter_bits = eax.split.bit_width; 944 x86_pmu.counter_bits = eax.split.bit_width;
@@ -951,7 +950,6 @@ static int intel_pmu_init(void)
951static int amd_pmu_init(void) 950static int amd_pmu_init(void)
952{ 951{
953 x86_pmu = amd_pmu; 952 x86_pmu = amd_pmu;
954 pr_info("AMD Performance Monitoring support detected.\n");
955 return 0; 953 return 0;
956} 954}
957 955
@@ -972,6 +970,10 @@ void __init init_hw_perf_counters(void)
972 if (err != 0) 970 if (err != 0)
973 return; 971 return;
974 972
973 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
974 pr_info("... version: %d\n", x86_pmu.version);
975 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
976
975 pr_info("... num counters: %d\n", x86_pmu.num_counters); 977 pr_info("... num counters: %d\n", x86_pmu.num_counters);
976 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 978 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
977 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 979 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;