diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 31 |
1 files changed, 15 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 086127ba580f..2dd704fa1299 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -133,8 +133,8 @@ struct x86_pmu { | |||
133 | int (*handle_irq)(struct pt_regs *); | 133 | int (*handle_irq)(struct pt_regs *); |
134 | void (*disable_all)(void); | 134 | void (*disable_all)(void); |
135 | void (*enable_all)(void); | 135 | void (*enable_all)(void); |
136 | void (*enable)(struct hw_perf_event *, int); | 136 | void (*enable)(struct perf_event *); |
137 | void (*disable)(struct hw_perf_event *, int); | 137 | void (*disable)(struct perf_event *); |
138 | unsigned eventsel; | 138 | unsigned eventsel; |
139 | unsigned perfctr; | 139 | unsigned perfctr; |
140 | u64 (*event_map)(int); | 140 | u64 (*event_map)(int); |
@@ -845,7 +845,7 @@ void hw_perf_enable(void) | |||
845 | set_bit(hwc->idx, cpuc->active_mask); | 845 | set_bit(hwc->idx, cpuc->active_mask); |
846 | cpuc->events[hwc->idx] = event; | 846 | cpuc->events[hwc->idx] = event; |
847 | 847 | ||
848 | x86_pmu.enable(hwc, hwc->idx); | 848 | x86_pmu.enable(event); |
849 | perf_event_update_userpage(event); | 849 | perf_event_update_userpage(event); |
850 | } | 850 | } |
851 | cpuc->n_added = 0; | 851 | cpuc->n_added = 0; |
@@ -858,15 +858,16 @@ void hw_perf_enable(void) | |||
858 | x86_pmu.enable_all(); | 858 | x86_pmu.enable_all(); |
859 | } | 859 | } |
860 | 860 | ||
861 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 861 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) |
862 | { | 862 | { |
863 | (void)checking_wrmsrl(hwc->config_base + idx, | 863 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, |
864 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); | 864 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); |
865 | } | 865 | } |
866 | 866 | ||
867 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 867 | static inline void x86_pmu_disable_event(struct perf_event *event) |
868 | { | 868 | { |
869 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); | 869 | struct hw_perf_event *hwc = &event->hw; |
870 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); | ||
870 | } | 871 | } |
871 | 872 | ||
872 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 873 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event) | |||
927 | return ret; | 928 | return ret; |
928 | } | 929 | } |
929 | 930 | ||
930 | static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 931 | static void x86_pmu_enable_event(struct perf_event *event) |
931 | { | 932 | { |
932 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 933 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
933 | if (cpuc->enabled) | 934 | if (cpuc->enabled) |
934 | __x86_pmu_enable_event(hwc, idx); | 935 | __x86_pmu_enable_event(&event->hw); |
935 | } | 936 | } |
936 | 937 | ||
937 | /* | 938 | /* |
@@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event) | |||
974 | 975 | ||
975 | static int x86_pmu_start(struct perf_event *event) | 976 | static int x86_pmu_start(struct perf_event *event) |
976 | { | 977 | { |
977 | struct hw_perf_event *hwc = &event->hw; | 978 | if (event->hw.idx == -1) |
978 | |||
979 | if (hwc->idx == -1) | ||
980 | return -EAGAIN; | 979 | return -EAGAIN; |
981 | 980 | ||
982 | x86_perf_event_set_period(event); | 981 | x86_perf_event_set_period(event); |
983 | x86_pmu.enable(hwc, hwc->idx); | 982 | x86_pmu.enable(event); |
984 | 983 | ||
985 | return 0; | 984 | return 0; |
986 | } | 985 | } |
@@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event) | |||
994 | cpuc->events[hwc->idx] != event)) | 993 | cpuc->events[hwc->idx] != event)) |
995 | return; | 994 | return; |
996 | 995 | ||
997 | x86_pmu.enable(hwc, hwc->idx); | 996 | x86_pmu.enable(event); |
998 | } | 997 | } |
999 | 998 | ||
1000 | void perf_event_print_debug(void) | 999 | void perf_event_print_debug(void) |
@@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event) | |||
1059 | * could reenable again: | 1058 | * could reenable again: |
1060 | */ | 1059 | */ |
1061 | clear_bit(idx, cpuc->active_mask); | 1060 | clear_bit(idx, cpuc->active_mask); |
1062 | x86_pmu.disable(hwc, idx); | 1061 | x86_pmu.disable(event); |
1063 | 1062 | ||
1064 | /* | 1063 | /* |
1065 | * Drain the remaining delta count out of a event | 1064 | * Drain the remaining delta count out of a event |
@@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1127 | continue; | 1126 | continue; |
1128 | 1127 | ||
1129 | if (perf_event_overflow(event, 1, &data, regs)) | 1128 | if (perf_event_overflow(event, 1, &data, regs)) |
1130 | x86_pmu.disable(hwc, idx); | 1129 | x86_pmu.disable(event); |
1131 | } | 1130 | } |
1132 | 1131 | ||
1133 | if (handled) | 1132 | if (handled) |