aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:07 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:05 -0400
commitdee5d9067ca78b317538fd67930be4e09a83dbc5 (patch)
treeba0a1053401ca5a8967bff67bd6fd3ef87da4b76 /arch
parentb7f8859a8ed1937e2139c17b84878f1d413fa659 (diff)
perf_counter, x86: remove ack_status() from struct x86_pmu
This function is Intel only and not necessary for AMD cpus. [ Impact: simplify code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-11-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d0bb02919c63..6bbdc16cc69f 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -51,7 +51,6 @@ struct x86_pmu {
51 int (*handle_irq)(struct pt_regs *, int); 51 int (*handle_irq)(struct pt_regs *, int);
52 u64 (*save_disable_all)(void); 52 u64 (*save_disable_all)(void);
53 void (*restore_all)(u64); 53 void (*restore_all)(u64);
54 void (*ack_status)(u64);
55 void (*enable)(int, u64); 54 void (*enable)(int, u64);
56 void (*disable)(int, u64); 55 void (*disable)(int, u64);
57 unsigned eventsel; 56 unsigned eventsel;
@@ -415,23 +414,11 @@ static inline u64 intel_pmu_get_status(u64 mask)
415 return status; 414 return status;
416} 415}
417 416
418static void intel_pmu_ack_status(u64 ack) 417static inline void intel_pmu_ack_status(u64 ack)
419{ 418{
420 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 419 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
421} 420}
422 421
423static void amd_pmu_ack_status(u64 ack)
424{
425}
426
427static void hw_perf_ack_status(u64 ack)
428{
429 if (unlikely(!perf_counters_initialized))
430 return;
431
432 x86_pmu->ack_status(ack);
433}
434
435static void intel_pmu_enable_counter(int idx, u64 config) 422static void intel_pmu_enable_counter(int idx, u64 config)
436{ 423{
437 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, 424 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
@@ -788,7 +775,7 @@ again:
788 __x86_pmu_disable(counter, &counter->hw, bit); 775 __x86_pmu_disable(counter, &counter->hw, bit);
789 } 776 }
790 777
791 hw_perf_ack_status(ack); 778 intel_pmu_ack_status(ack);
792 779
793 /* 780 /*
794 * Repeat if there is more work to be done: 781 * Repeat if there is more work to be done:
@@ -904,7 +891,6 @@ static struct x86_pmu intel_pmu = {
904 .handle_irq = intel_pmu_handle_irq, 891 .handle_irq = intel_pmu_handle_irq,
905 .save_disable_all = intel_pmu_save_disable_all, 892 .save_disable_all = intel_pmu_save_disable_all,
906 .restore_all = intel_pmu_restore_all, 893 .restore_all = intel_pmu_restore_all,
907 .ack_status = intel_pmu_ack_status,
908 .enable = intel_pmu_enable_counter, 894 .enable = intel_pmu_enable_counter,
909 .disable = intel_pmu_disable_counter, 895 .disable = intel_pmu_disable_counter,
910 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 896 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
@@ -918,7 +904,6 @@ static struct x86_pmu amd_pmu = {
918 .handle_irq = amd_pmu_handle_irq, 904 .handle_irq = amd_pmu_handle_irq,
919 .save_disable_all = amd_pmu_save_disable_all, 905 .save_disable_all = amd_pmu_save_disable_all,
920 .restore_all = amd_pmu_restore_all, 906 .restore_all = amd_pmu_restore_all,
921 .ack_status = amd_pmu_ack_status,
922 .enable = amd_pmu_enable_counter, 907 .enable = amd_pmu_enable_counter,
923 .disable = amd_pmu_disable_counter, 908 .disable = amd_pmu_disable_counter,
924 .eventsel = MSR_K7_EVNTSEL0, 909 .eventsel = MSR_K7_EVNTSEL0,