diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 31 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 10 |
3 files changed, 38 insertions, 33 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 086127ba580f..2dd704fa1299 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -133,8 +133,8 @@ struct x86_pmu { | |||
133 | int (*handle_irq)(struct pt_regs *); | 133 | int (*handle_irq)(struct pt_regs *); |
134 | void (*disable_all)(void); | 134 | void (*disable_all)(void); |
135 | void (*enable_all)(void); | 135 | void (*enable_all)(void); |
136 | void (*enable)(struct hw_perf_event *, int); | 136 | void (*enable)(struct perf_event *); |
137 | void (*disable)(struct hw_perf_event *, int); | 137 | void (*disable)(struct perf_event *); |
138 | unsigned eventsel; | 138 | unsigned eventsel; |
139 | unsigned perfctr; | 139 | unsigned perfctr; |
140 | u64 (*event_map)(int); | 140 | u64 (*event_map)(int); |
@@ -845,7 +845,7 @@ void hw_perf_enable(void) | |||
845 | set_bit(hwc->idx, cpuc->active_mask); | 845 | set_bit(hwc->idx, cpuc->active_mask); |
846 | cpuc->events[hwc->idx] = event; | 846 | cpuc->events[hwc->idx] = event; |
847 | 847 | ||
848 | x86_pmu.enable(hwc, hwc->idx); | 848 | x86_pmu.enable(event); |
849 | perf_event_update_userpage(event); | 849 | perf_event_update_userpage(event); |
850 | } | 850 | } |
851 | cpuc->n_added = 0; | 851 | cpuc->n_added = 0; |
@@ -858,15 +858,16 @@ void hw_perf_enable(void) | |||
858 | x86_pmu.enable_all(); | 858 | x86_pmu.enable_all(); |
859 | } | 859 | } |
860 | 860 | ||
861 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 861 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) |
862 | { | 862 | { |
863 | (void)checking_wrmsrl(hwc->config_base + idx, | 863 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, |
864 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); | 864 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); |
865 | } | 865 | } |
866 | 866 | ||
867 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 867 | static inline void x86_pmu_disable_event(struct perf_event *event) |
868 | { | 868 | { |
869 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); | 869 | struct hw_perf_event *hwc = &event->hw; |
870 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); | ||
870 | } | 871 | } |
871 | 872 | ||
872 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 873 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event) | |||
927 | return ret; | 928 | return ret; |
928 | } | 929 | } |
929 | 930 | ||
930 | static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 931 | static void x86_pmu_enable_event(struct perf_event *event) |
931 | { | 932 | { |
932 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 933 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
933 | if (cpuc->enabled) | 934 | if (cpuc->enabled) |
934 | __x86_pmu_enable_event(hwc, idx); | 935 | __x86_pmu_enable_event(&event->hw); |
935 | } | 936 | } |
936 | 937 | ||
937 | /* | 938 | /* |
@@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event) | |||
974 | 975 | ||
975 | static int x86_pmu_start(struct perf_event *event) | 976 | static int x86_pmu_start(struct perf_event *event) |
976 | { | 977 | { |
977 | struct hw_perf_event *hwc = &event->hw; | 978 | if (event->hw.idx == -1) |
978 | |||
979 | if (hwc->idx == -1) | ||
980 | return -EAGAIN; | 979 | return -EAGAIN; |
981 | 980 | ||
982 | x86_perf_event_set_period(event); | 981 | x86_perf_event_set_period(event); |
983 | x86_pmu.enable(hwc, hwc->idx); | 982 | x86_pmu.enable(event); |
984 | 983 | ||
985 | return 0; | 984 | return 0; |
986 | } | 985 | } |
@@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event) | |||
994 | cpuc->events[hwc->idx] != event)) | 993 | cpuc->events[hwc->idx] != event)) |
995 | return; | 994 | return; |
996 | 995 | ||
997 | x86_pmu.enable(hwc, hwc->idx); | 996 | x86_pmu.enable(event); |
998 | } | 997 | } |
999 | 998 | ||
1000 | void perf_event_print_debug(void) | 999 | void perf_event_print_debug(void) |
@@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event) | |||
1059 | * could reenable again: | 1058 | * could reenable again: |
1060 | */ | 1059 | */ |
1061 | clear_bit(idx, cpuc->active_mask); | 1060 | clear_bit(idx, cpuc->active_mask); |
1062 | x86_pmu.disable(hwc, idx); | 1061 | x86_pmu.disable(event); |
1063 | 1062 | ||
1064 | /* | 1063 | /* |
1065 | * Drain the remaining delta count out of a event | 1064 | * Drain the remaining delta count out of a event |
@@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1127 | continue; | 1126 | continue; |
1128 | 1127 | ||
1129 | if (perf_event_overflow(event, 1, &data, regs)) | 1128 | if (perf_event_overflow(event, 1, &data, regs)) |
1130 | x86_pmu.disable(hwc, idx); | 1129 | x86_pmu.disable(event); |
1131 | } | 1130 | } |
1132 | 1131 | ||
1133 | if (handled) | 1132 | if (handled) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a4c9f160448e..a84094897799 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
548 | } | 548 | } |
549 | 549 | ||
550 | static inline void | 550 | static inline void |
551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | 551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
552 | { | 552 | { |
553 | int idx = __idx - X86_PMC_IDX_FIXED; | 553 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
554 | u64 ctrl_val, mask; | 554 | u64 ctrl_val, mask; |
555 | 555 | ||
556 | mask = 0xfULL << (idx * 4); | 556 | mask = 0xfULL << (idx * 4); |
@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) | |||
621 | } | 621 | } |
622 | 622 | ||
623 | static inline void | 623 | static inline void |
624 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 624 | intel_pmu_disable_event(struct perf_event *event) |
625 | { | 625 | { |
626 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 626 | struct hw_perf_event *hwc = &event->hw; |
627 | |||
628 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
627 | intel_pmu_disable_bts(); | 629 | intel_pmu_disable_bts(); |
628 | intel_pmu_drain_bts_buffer(); | 630 | intel_pmu_drain_bts_buffer(); |
629 | return; | 631 | return; |
630 | } | 632 | } |
631 | 633 | ||
632 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 634 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
633 | intel_pmu_disable_fixed(hwc, idx); | 635 | intel_pmu_disable_fixed(hwc); |
634 | return; | 636 | return; |
635 | } | 637 | } |
636 | 638 | ||
637 | x86_pmu_disable_event(hwc, idx); | 639 | x86_pmu_disable_event(event); |
638 | } | 640 | } |
639 | 641 | ||
640 | static inline void | 642 | static inline void |
641 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | 643 | intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
642 | { | 644 | { |
643 | int idx = __idx - X86_PMC_IDX_FIXED; | 645 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
644 | u64 ctrl_val, bits, mask; | 646 | u64 ctrl_val, bits, mask; |
645 | int err; | 647 | int err; |
646 | 648 | ||
@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | |||
670 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 672 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
671 | } | 673 | } |
672 | 674 | ||
673 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 675 | static void intel_pmu_enable_event(struct perf_event *event) |
674 | { | 676 | { |
675 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 677 | struct hw_perf_event *hwc = &event->hw; |
678 | |||
679 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
676 | if (!__get_cpu_var(cpu_hw_events).enabled) | 680 | if (!__get_cpu_var(cpu_hw_events).enabled) |
677 | return; | 681 | return; |
678 | 682 | ||
@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
681 | } | 685 | } |
682 | 686 | ||
683 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 687 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
684 | intel_pmu_enable_fixed(hwc, idx); | 688 | intel_pmu_enable_fixed(hwc); |
685 | return; | 689 | return; |
686 | } | 690 | } |
687 | 691 | ||
688 | __x86_pmu_enable_event(hwc, idx); | 692 | __x86_pmu_enable_event(hwc); |
689 | } | 693 | } |
690 | 694 | ||
691 | /* | 695 | /* |
@@ -771,7 +775,7 @@ again: | |||
771 | data.period = event->hw.last_period; | 775 | data.period = event->hw.last_period; |
772 | 776 | ||
773 | if (perf_event_overflow(event, 1, &data, regs)) | 777 | if (perf_event_overflow(event, 1, &data, regs)) |
774 | intel_pmu_disable_event(&event->hw, bit); | 778 | intel_pmu_disable_event(event); |
775 | } | 779 | } |
776 | 780 | ||
777 | intel_pmu_ack_status(ack); | 781 | intel_pmu_ack_status(ack); |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index a4e67b99d91c..a330485d14da 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static inline void | 79 | static inline void |
80 | p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 80 | p6_pmu_disable_event(struct perf_event *event) |
81 | { | 81 | { |
82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
83 | struct hw_perf_event *hwc = &event->hw; | ||
83 | u64 val = P6_NOP_EVENT; | 84 | u64 val = P6_NOP_EVENT; |
84 | 85 | ||
85 | if (cpuc->enabled) | 86 | if (cpuc->enabled) |
86 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 87 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
87 | 88 | ||
88 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 89 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); |
89 | } | 90 | } |
90 | 91 | ||
91 | static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 92 | static void p6_pmu_enable_event(struct perf_event *event) |
92 | { | 93 | { |
93 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 94 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
95 | struct hw_perf_event *hwc = &event->hw; | ||
94 | u64 val; | 96 | u64 val; |
95 | 97 | ||
96 | val = hwc->config; | 98 | val = hwc->config; |
97 | if (cpuc->enabled) | 99 | if (cpuc->enabled) |
98 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 100 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
99 | 101 | ||
100 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 102 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); |
101 | } | 103 | } |
102 | 104 | ||
103 | static __initconst struct x86_pmu p6_pmu = { | 105 | static __initconst struct x86_pmu p6_pmu = { |