diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 34 |
1 files changed, 13 insertions, 21 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index df9012bbd211..2d3681bbb522 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <asm/stacktrace.h> | 23 | #include <asm/stacktrace.h> |
24 | #include <asm/nmi.h> | 24 | #include <asm/nmi.h> |
25 | 25 | ||
26 | static bool perf_counters_initialized __read_mostly; | ||
27 | static u64 perf_counter_mask __read_mostly; | 26 | static u64 perf_counter_mask __read_mostly; |
28 | 27 | ||
29 | struct cpu_hw_counters { | 28 | struct cpu_hw_counters { |
@@ -227,6 +226,11 @@ static void hw_perf_counter_destroy(struct perf_counter *counter) | |||
227 | } | 226 | } |
228 | } | 227 | } |
229 | 228 | ||
229 | static inline int x86_pmu_initialized(void) | ||
230 | { | ||
231 | return x86_pmu.handle_irq != NULL; | ||
232 | } | ||
233 | |||
230 | /* | 234 | /* |
231 | * Setup the hardware configuration for a given hw_event_type | 235 | * Setup the hardware configuration for a given hw_event_type |
232 | */ | 236 | */ |
@@ -240,8 +244,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
240 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | 244 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
241 | return -ENOSYS; | 245 | return -ENOSYS; |
242 | 246 | ||
243 | if (unlikely(!perf_counters_initialized)) | 247 | if (!x86_pmu_initialized()) |
244 | return -EINVAL; | 248 | return -ENODEV; |
245 | 249 | ||
246 | err = 0; | 250 | err = 0; |
247 | if (atomic_inc_not_zero(&num_counters)) { | 251 | if (atomic_inc_not_zero(&num_counters)) { |
@@ -348,9 +352,8 @@ static u64 amd_pmu_save_disable_all(void) | |||
348 | 352 | ||
349 | u64 hw_perf_save_disable(void) | 353 | u64 hw_perf_save_disable(void) |
350 | { | 354 | { |
351 | if (unlikely(!perf_counters_initialized)) | 355 | if (!x86_pmu_initialized()) |
352 | return 0; | 356 | return 0; |
353 | |||
354 | return x86_pmu.save_disable_all(); | 357 | return x86_pmu.save_disable_all(); |
355 | } | 358 | } |
356 | /* | 359 | /* |
@@ -388,9 +391,8 @@ static void amd_pmu_restore_all(u64 ctrl) | |||
388 | 391 | ||
389 | void hw_perf_restore(u64 ctrl) | 392 | void hw_perf_restore(u64 ctrl) |
390 | { | 393 | { |
391 | if (unlikely(!perf_counters_initialized)) | 394 | if (!x86_pmu_initialized()) |
392 | return; | 395 | return; |
393 | |||
394 | x86_pmu.restore_all(ctrl); | 396 | x86_pmu.restore_all(ctrl); |
395 | } | 397 | } |
396 | /* | 398 | /* |
@@ -402,8 +404,6 @@ static inline u64 intel_pmu_get_status(u64 mask) | |||
402 | { | 404 | { |
403 | u64 status; | 405 | u64 status; |
404 | 406 | ||
405 | if (unlikely(!perf_counters_initialized)) | ||
406 | return 0; | ||
407 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | 407 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
408 | 408 | ||
409 | return status; | 409 | return status; |
@@ -417,10 +417,6 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
417 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 417 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
418 | { | 418 | { |
419 | int err; | 419 | int err; |
420 | |||
421 | if (unlikely(!perf_counters_initialized)) | ||
422 | return; | ||
423 | |||
424 | err = checking_wrmsrl(hwc->config_base + idx, | 420 | err = checking_wrmsrl(hwc->config_base + idx, |
425 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 421 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
426 | } | 422 | } |
@@ -428,10 +424,6 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
428 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 424 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
429 | { | 425 | { |
430 | int err; | 426 | int err; |
431 | |||
432 | if (unlikely(!perf_counters_initialized)) | ||
433 | return; | ||
434 | |||
435 | err = checking_wrmsrl(hwc->config_base + idx, | 427 | err = checking_wrmsrl(hwc->config_base + idx, |
436 | hwc->config); | 428 | hwc->config); |
437 | } | 429 | } |
@@ -787,10 +779,10 @@ void perf_counter_unthrottle(void) | |||
787 | { | 779 | { |
788 | struct cpu_hw_counters *cpuc; | 780 | struct cpu_hw_counters *cpuc; |
789 | 781 | ||
790 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 782 | if (!x86_pmu_initialized()) |
791 | return; | 783 | return; |
792 | 784 | ||
793 | if (unlikely(!perf_counters_initialized)) | 785 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
794 | return; | 786 | return; |
795 | 787 | ||
796 | cpuc = &__get_cpu_var(cpu_hw_counters); | 788 | cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -829,8 +821,9 @@ void perf_counters_lapic_init(int nmi) | |||
829 | { | 821 | { |
830 | u32 apic_val; | 822 | u32 apic_val; |
831 | 823 | ||
832 | if (!perf_counters_initialized) | 824 | if (!x86_pmu_initialized()) |
833 | return; | 825 | return; |
826 | |||
834 | /* | 827 | /* |
835 | * Enable the performance counter vector in the APIC LVT: | 828 | * Enable the performance counter vector in the APIC LVT: |
836 | */ | 829 | */ |
@@ -988,7 +981,6 @@ void __init init_hw_perf_counters(void) | |||
988 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 981 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
989 | 982 | ||
990 | pr_info("... counter mask: %016Lx\n", perf_counter_mask); | 983 | pr_info("... counter mask: %016Lx\n", perf_counter_mask); |
991 | perf_counters_initialized = true; | ||
992 | 984 | ||
993 | perf_counters_lapic_init(0); | 985 | perf_counters_lapic_init(0); |
994 | register_die_notifier(&perf_counter_nmi_notifier); | 986 | register_die_notifier(&perf_counter_nmi_notifier); |