aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-08 07:51:31 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-10 07:23:39 -0500
commit7645a24cbd01cbf4865d1273d5ddaa8d8c2ccb3a (patch)
tree60cbc260be0756d14224142b5773e1760557b826 /arch/x86
parentb83a46e7da4a948cc852ba7805dfb1a392dec861 (diff)
perf, x86: Remove checking_{wr,rd}msr() usage
We don't need checking_{wr,rd}msr() calls, since we should know what cpu we're running on and not use blindly poke at msrs. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c24
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
2 files changed, 20 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 335ee1d38b7..e24f6374f9f 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,6 +29,17 @@
29#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31 31
32#if 0
33#undef wrmsrl
34#define wrmsrl(msr, val) \
35do { \
36 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
37 (unsigned long)(val)); \
38 native_write_msr((msr), (u32)((u64)(val)), \
39 (u32)((u64)(val) >> 32)); \
40} while (0)
41#endif
42
32/* 43/*
33 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context 44 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
34 */ 45 */
@@ -821,14 +832,15 @@ void hw_perf_enable(void)
821 832
822static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) 833static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
823{ 834{
824 (void)checking_wrmsrl(hwc->config_base + hwc->idx, 835 wrmsrl(hwc->config_base + hwc->idx,
825 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); 836 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
826} 837}
827 838
828static inline void x86_pmu_disable_event(struct perf_event *event) 839static inline void x86_pmu_disable_event(struct perf_event *event)
829{ 840{
830 struct hw_perf_event *hwc = &event->hw; 841 struct hw_perf_event *hwc = &event->hw;
831 (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); 842
843 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
832} 844}
833 845
834static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 846static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -843,7 +855,7 @@ x86_perf_event_set_period(struct perf_event *event)
843 struct hw_perf_event *hwc = &event->hw; 855 struct hw_perf_event *hwc = &event->hw;
844 s64 left = atomic64_read(&hwc->period_left); 856 s64 left = atomic64_read(&hwc->period_left);
845 s64 period = hwc->sample_period; 857 s64 period = hwc->sample_period;
846 int err, ret = 0, idx = hwc->idx; 858 int ret = 0, idx = hwc->idx;
847 859
848 if (idx == X86_PMC_IDX_FIXED_BTS) 860 if (idx == X86_PMC_IDX_FIXED_BTS)
849 return 0; 861 return 0;
@@ -881,8 +893,8 @@ x86_perf_event_set_period(struct perf_event *event)
881 */ 893 */
882 atomic64_set(&hwc->prev_count, (u64)-left); 894 atomic64_set(&hwc->prev_count, (u64)-left);
883 895
884 err = checking_wrmsrl(hwc->event_base + idx, 896 wrmsrl(hwc->event_base + idx,
885 (u64)(-left) & x86_pmu.event_mask); 897 (u64)(-left) & x86_pmu.event_mask);
886 898
887 perf_event_update_userpage(event); 899 perf_event_update_userpage(event);
888 900
@@ -987,7 +999,7 @@ void perf_event_print_debug(void)
987 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); 999 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
988 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); 1000 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
989 } 1001 }
990 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1002 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
991 1003
992 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1004 for (idx = 0; idx < x86_pmu.num_events; idx++) {
993 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1005 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d3e2424069a..971dc6e7d54 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -525,7 +525,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
525 525
526 rdmsrl(hwc->config_base, ctrl_val); 526 rdmsrl(hwc->config_base, ctrl_val);
527 ctrl_val &= ~mask; 527 ctrl_val &= ~mask;
528 (void)checking_wrmsrl(hwc->config_base, ctrl_val); 528 wrmsrl(hwc->config_base, ctrl_val);
529} 529}
530 530
531static void intel_pmu_disable_event(struct perf_event *event) 531static void intel_pmu_disable_event(struct perf_event *event)
@@ -553,7 +553,6 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
553{ 553{
554 int idx = hwc->idx - X86_PMC_IDX_FIXED; 554 int idx = hwc->idx - X86_PMC_IDX_FIXED;
555 u64 ctrl_val, bits, mask; 555 u64 ctrl_val, bits, mask;
556 int err;
557 556
558 /* 557 /*
559 * Enable IRQ generation (0x8), 558 * Enable IRQ generation (0x8),
@@ -578,7 +577,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
578 rdmsrl(hwc->config_base, ctrl_val); 577 rdmsrl(hwc->config_base, ctrl_val);
579 ctrl_val &= ~mask; 578 ctrl_val &= ~mask;
580 ctrl_val |= bits; 579 ctrl_val |= bits;
581 err = checking_wrmsrl(hwc->config_base, ctrl_val); 580 wrmsrl(hwc->config_base, ctrl_val);
582} 581}
583 582
584static void intel_pmu_enable_event(struct perf_event *event) 583static void intel_pmu_enable_event(struct perf_event *event)