diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-14 02:49:00 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:29 -0400 |
| commit | 33696fc0d141bbbcb12f75b69608ea83282e3117 (patch) | |
| tree | 72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /arch/sparc/kernel | |
| parent | 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff) | |
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sparc/kernel')
| -rw-r--r-- | arch/sparc/kernel/perf_event.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index d0131deeeaf6..37cae676536c 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
| @@ -664,7 +664,7 @@ out: | |||
| 664 | return pcr; | 664 | return pcr; |
| 665 | } | 665 | } |
| 666 | 666 | ||
| 667 | void hw_perf_enable(void) | 667 | static void sparc_pmu_pmu_enable(struct pmu *pmu) |
| 668 | { | 668 | { |
| 669 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 669 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 670 | u64 pcr; | 670 | u64 pcr; |
| @@ -691,7 +691,7 @@ void hw_perf_enable(void) | |||
| 691 | pcr_ops->write(cpuc->pcr); | 691 | pcr_ops->write(cpuc->pcr); |
| 692 | } | 692 | } |
| 693 | 693 | ||
| 694 | void hw_perf_disable(void) | 694 | static void sparc_pmu_pmu_disable(struct pmu *pmu) |
| 695 | { | 695 | { |
| 696 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 696 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 697 | u64 val; | 697 | u64 val; |
| @@ -718,7 +718,7 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
| 718 | int i; | 718 | int i; |
| 719 | 719 | ||
| 720 | local_irq_save(flags); | 720 | local_irq_save(flags); |
| 721 | perf_disable(); | 721 | perf_pmu_disable(event->pmu); |
| 722 | 722 | ||
| 723 | for (i = 0; i < cpuc->n_events; i++) { | 723 | for (i = 0; i < cpuc->n_events; i++) { |
| 724 | if (event == cpuc->event[i]) { | 724 | if (event == cpuc->event[i]) { |
| @@ -748,7 +748,7 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
| 748 | } | 748 | } |
| 749 | } | 749 | } |
| 750 | 750 | ||
| 751 | perf_enable(); | 751 | perf_pmu_enable(event->pmu); |
| 752 | local_irq_restore(flags); | 752 | local_irq_restore(flags); |
| 753 | } | 753 | } |
| 754 | 754 | ||
| @@ -991,7 +991,7 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
| 991 | unsigned long flags; | 991 | unsigned long flags; |
| 992 | 992 | ||
| 993 | local_irq_save(flags); | 993 | local_irq_save(flags); |
| 994 | perf_disable(); | 994 | perf_pmu_disable(event->pmu); |
| 995 | 995 | ||
| 996 | n0 = cpuc->n_events; | 996 | n0 = cpuc->n_events; |
| 997 | if (n0 >= perf_max_events) | 997 | if (n0 >= perf_max_events) |
| @@ -1020,7 +1020,7 @@ nocheck: | |||
| 1020 | 1020 | ||
| 1021 | ret = 0; | 1021 | ret = 0; |
| 1022 | out: | 1022 | out: |
| 1023 | perf_enable(); | 1023 | perf_pmu_enable(event->pmu); |
| 1024 | local_irq_restore(flags); | 1024 | local_irq_restore(flags); |
| 1025 | return ret; | 1025 | return ret; |
| 1026 | } | 1026 | } |
| @@ -1113,7 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) | |||
| 1113 | { | 1113 | { |
| 1114 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1114 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
| 1115 | 1115 | ||
| 1116 | perf_disable(); | 1116 | perf_pmu_disable(pmu); |
| 1117 | cpuhw->group_flag |= PERF_EVENT_TXN; | 1117 | cpuhw->group_flag |= PERF_EVENT_TXN; |
| 1118 | } | 1118 | } |
| 1119 | 1119 | ||
| @@ -1127,7 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) | |||
| 1127 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1127 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
| 1128 | 1128 | ||
| 1129 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 1129 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
| 1130 | perf_enable(); | 1130 | perf_pmu_enable(pmu); |
| 1131 | } | 1131 | } |
| 1132 | 1132 | ||
| 1133 | /* | 1133 | /* |
| @@ -1151,11 +1151,13 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) | |||
| 1151 | return -EAGAIN; | 1151 | return -EAGAIN; |
| 1152 | 1152 | ||
| 1153 | cpuc->group_flag &= ~PERF_EVENT_TXN; | 1153 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
| 1154 | perf_enable(); | 1154 | perf_pmu_enable(pmu); |
| 1155 | return 0; | 1155 | return 0; |
| 1156 | } | 1156 | } |
| 1157 | 1157 | ||
| 1158 | static struct pmu pmu = { | 1158 | static struct pmu pmu = { |
| 1159 | .pmu_enable = sparc_pmu_pmu_enable, | ||
| 1160 | .pmu_disable = sparc_pmu_pmu_disable, | ||
| 1159 | .event_init = sparc_pmu_event_init, | 1161 | .event_init = sparc_pmu_event_init, |
| 1160 | .enable = sparc_pmu_enable, | 1162 | .enable = sparc_pmu_enable, |
| 1161 | .disable = sparc_pmu_disable, | 1163 | .disable = sparc_pmu_disable, |
