aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 5a52d73ccfa7..7c72a9423636 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -913,7 +913,7 @@ static struct x86_pmu amd_pmu = {
913 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 913 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
914}; 914};
915 915
916static struct x86_pmu *intel_pmu_init(void) 916static int intel_pmu_init(void)
917{ 917{
918 union cpuid10_edx edx; 918 union cpuid10_edx edx;
919 union cpuid10_eax eax; 919 union cpuid10_eax eax;
@@ -921,7 +921,7 @@ static struct x86_pmu *intel_pmu_init(void)
921 unsigned int ebx; 921 unsigned int ebx;
922 922
923 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 923 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
924 return NULL; 924 return -ENODEV;
925 925
926 /* 926 /*
927 * Check whether the Architectural PerfMon supports 927 * Check whether the Architectural PerfMon supports
@@ -929,49 +929,54 @@ static struct x86_pmu *intel_pmu_init(void)
929 */ 929 */
930 cpuid(10, &eax.full, &ebx, &unused, &edx.full); 930 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
931 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 931 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
932 return NULL; 932 return -ENODEV;
933 933
934 intel_perfmon_version = eax.split.version_id; 934 intel_perfmon_version = eax.split.version_id;
935 if (intel_perfmon_version < 2) 935 if (intel_perfmon_version < 2)
936 return NULL; 936 return -ENODEV;
937 937
938 pr_info("Intel Performance Monitoring support detected.\n"); 938 pr_info("Intel Performance Monitoring support detected.\n");
939 pr_info("... version: %d\n", intel_perfmon_version); 939 pr_info("... version: %d\n", intel_perfmon_version);
940 pr_info("... bit width: %d\n", eax.split.bit_width); 940 pr_info("... bit width: %d\n", eax.split.bit_width);
941 pr_info("... mask length: %d\n", eax.split.mask_length); 941 pr_info("... mask length: %d\n", eax.split.mask_length);
942 942
943 x86_pmu = &intel_pmu;
944
943 nr_counters_generic = eax.split.num_counters; 945 nr_counters_generic = eax.split.num_counters;
944 nr_counters_fixed = edx.split.num_counters_fixed; 946 nr_counters_fixed = edx.split.num_counters_fixed;
945 counter_value_mask = (1ULL << eax.split.bit_width) - 1; 947 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
946 948
947 return &intel_pmu; 949 return 0;
948} 950}
949 951
950static struct x86_pmu *amd_pmu_init(void) 952static int amd_pmu_init(void)
951{ 953{
954 x86_pmu = &amd_pmu;
955
952 nr_counters_generic = 4; 956 nr_counters_generic = 4;
953 nr_counters_fixed = 0; 957 nr_counters_fixed = 0;
954 counter_value_mask = 0x0000FFFFFFFFFFFFULL; 958 counter_value_mask = 0x0000FFFFFFFFFFFFULL;
955 counter_value_bits = 48; 959 counter_value_bits = 48;
956 960
957 pr_info("AMD Performance Monitoring support detected.\n"); 961 pr_info("AMD Performance Monitoring support detected.\n");
958 962 return 0;
959 return &amd_pmu;
960} 963}
961 964
962void __init init_hw_perf_counters(void) 965void __init init_hw_perf_counters(void)
963{ 966{
967 int err;
968
964 switch (boot_cpu_data.x86_vendor) { 969 switch (boot_cpu_data.x86_vendor) {
965 case X86_VENDOR_INTEL: 970 case X86_VENDOR_INTEL:
966 x86_pmu = intel_pmu_init(); 971 err = intel_pmu_init();
967 break; 972 break;
968 case X86_VENDOR_AMD: 973 case X86_VENDOR_AMD:
969 x86_pmu = amd_pmu_init(); 974 err = amd_pmu_init();
970 break; 975 break;
971 default: 976 default:
972 return; 977 return;
973 } 978 }
974 if (!x86_pmu) 979 if (err != 0)
975 return; 980 return;
976 981
977 pr_info("... num counters: %d\n", nr_counters_generic); 982 pr_info("... num counters: %d\n", nr_counters_generic);