aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:12 -0400
commit85cf9dba92152bb4edec118b2f4f0be1ae7fdcab (patch)
tree0fac17369fe20b2510b19748c70a61af5298c7db
parentd43698918bd46c71d494555fb92195fbea1fcb6c (diff)
perf_counter, x86: change and remove pmu initialization checks
Some functions are only called if the pmu was proper initialized. That initalization checks can be removed. The way to check initialization changed too. Now, the pointer to the interrupt handler is checked. If it exists the pmu is initialized. This also removes a static variable and uses struct x86_pmu as only data source for the check. [ Impact: simplify code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-24-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c34
1 files changed, 13 insertions, 21 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index df9012bbd211..2d3681bbb522 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -23,7 +23,6 @@
23#include <asm/stacktrace.h> 23#include <asm/stacktrace.h>
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25 25
26static bool perf_counters_initialized __read_mostly;
27static u64 perf_counter_mask __read_mostly; 26static u64 perf_counter_mask __read_mostly;
28 27
29struct cpu_hw_counters { 28struct cpu_hw_counters {
@@ -227,6 +226,11 @@ static void hw_perf_counter_destroy(struct perf_counter *counter)
227 } 226 }
228} 227}
229 228
229static inline int x86_pmu_initialized(void)
230{
231 return x86_pmu.handle_irq != NULL;
232}
233
230/* 234/*
231 * Setup the hardware configuration for a given hw_event_type 235 * Setup the hardware configuration for a given hw_event_type
232 */ 236 */
@@ -240,8 +244,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
240 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 244 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
241 return -ENOSYS; 245 return -ENOSYS;
242 246
243 if (unlikely(!perf_counters_initialized)) 247 if (!x86_pmu_initialized())
244 return -EINVAL; 248 return -ENODEV;
245 249
246 err = 0; 250 err = 0;
247 if (atomic_inc_not_zero(&num_counters)) { 251 if (atomic_inc_not_zero(&num_counters)) {
@@ -348,9 +352,8 @@ static u64 amd_pmu_save_disable_all(void)
348 352
349u64 hw_perf_save_disable(void) 353u64 hw_perf_save_disable(void)
350{ 354{
351 if (unlikely(!perf_counters_initialized)) 355 if (!x86_pmu_initialized())
352 return 0; 356 return 0;
353
354 return x86_pmu.save_disable_all(); 357 return x86_pmu.save_disable_all();
355} 358}
356/* 359/*
@@ -388,9 +391,8 @@ static void amd_pmu_restore_all(u64 ctrl)
388 391
389void hw_perf_restore(u64 ctrl) 392void hw_perf_restore(u64 ctrl)
390{ 393{
391 if (unlikely(!perf_counters_initialized)) 394 if (!x86_pmu_initialized())
392 return; 395 return;
393
394 x86_pmu.restore_all(ctrl); 396 x86_pmu.restore_all(ctrl);
395} 397}
396/* 398/*
@@ -402,8 +404,6 @@ static inline u64 intel_pmu_get_status(u64 mask)
402{ 404{
403 u64 status; 405 u64 status;
404 406
405 if (unlikely(!perf_counters_initialized))
406 return 0;
407 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 407 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
408 408
409 return status; 409 return status;
@@ -417,10 +417,6 @@ static inline void intel_pmu_ack_status(u64 ack)
417static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 417static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
418{ 418{
419 int err; 419 int err;
420
421 if (unlikely(!perf_counters_initialized))
422 return;
423
424 err = checking_wrmsrl(hwc->config_base + idx, 420 err = checking_wrmsrl(hwc->config_base + idx,
425 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 421 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
426} 422}
@@ -428,10 +424,6 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
428static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 424static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
429{ 425{
430 int err; 426 int err;
431
432 if (unlikely(!perf_counters_initialized))
433 return;
434
435 err = checking_wrmsrl(hwc->config_base + idx, 427 err = checking_wrmsrl(hwc->config_base + idx,
436 hwc->config); 428 hwc->config);
437} 429}
@@ -787,10 +779,10 @@ void perf_counter_unthrottle(void)
787{ 779{
788 struct cpu_hw_counters *cpuc; 780 struct cpu_hw_counters *cpuc;
789 781
790 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 782 if (!x86_pmu_initialized())
791 return; 783 return;
792 784
793 if (unlikely(!perf_counters_initialized)) 785 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
794 return; 786 return;
795 787
796 cpuc = &__get_cpu_var(cpu_hw_counters); 788 cpuc = &__get_cpu_var(cpu_hw_counters);
@@ -829,8 +821,9 @@ void perf_counters_lapic_init(int nmi)
829{ 821{
830 u32 apic_val; 822 u32 apic_val;
831 823
832 if (!perf_counters_initialized) 824 if (!x86_pmu_initialized())
833 return; 825 return;
826
834 /* 827 /*
835 * Enable the performance counter vector in the APIC LVT: 828 * Enable the performance counter vector in the APIC LVT:
836 */ 829 */
@@ -988,7 +981,6 @@ void __init init_hw_perf_counters(void)
988 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 981 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
989 982
990 pr_info("... counter mask: %016Lx\n", perf_counter_mask); 983 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
991 perf_counters_initialized = true;
992 984
993 perf_counters_lapic_init(0); 985 perf_counters_lapic_init(0);
994 register_die_notifier(&perf_counter_nmi_notifier); 986 register_die_notifier(&perf_counter_nmi_notifier);