diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-14 02:49:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:29 -0400 |
commit | 33696fc0d141bbbcb12f75b69608ea83282e3117 (patch) | |
tree | 72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /arch/powerpc/kernel/perf_event.c | |
parent | 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff) |
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_event.c')
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c1408821dbc2..deb84bbcb0e6 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
517 | * Disable all events to prevent PMU interrupts and to allow | 517 | * Disable all events to prevent PMU interrupts and to allow |
518 | * events to be added or removed. | 518 | * events to be added or removed. |
519 | */ | 519 | */ |
520 | void hw_perf_disable(void) | 520 | static void power_pmu_pmu_disable(struct pmu *pmu) |
521 | { | 521 | { |
522 | struct cpu_hw_events *cpuhw; | 522 | struct cpu_hw_events *cpuhw; |
523 | unsigned long flags; | 523 | unsigned long flags; |
@@ -565,7 +565,7 @@ void hw_perf_disable(void) | |||
565 | * If we were previously disabled and events were added, then | 565 | * If we were previously disabled and events were added, then |
566 | * put the new config on the PMU. | 566 | * put the new config on the PMU. |
567 | */ | 567 | */ |
568 | void hw_perf_enable(void) | 568 | static void power_pmu_pmu_enable(struct pmu *pmu) |
569 | { | 569 | { |
570 | struct perf_event *event; | 570 | struct perf_event *event; |
571 | struct cpu_hw_events *cpuhw; | 571 | struct cpu_hw_events *cpuhw; |
@@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
735 | int ret = -EAGAIN; | 735 | int ret = -EAGAIN; |
736 | 736 | ||
737 | local_irq_save(flags); | 737 | local_irq_save(flags); |
738 | perf_disable(); | 738 | perf_pmu_disable(event->pmu); |
739 | 739 | ||
740 | /* | 740 | /* |
741 | * Add the event to the list (if there is room) | 741 | * Add the event to the list (if there is room) |
@@ -769,7 +769,7 @@ nocheck: | |||
769 | 769 | ||
770 | ret = 0; | 770 | ret = 0; |
771 | out: | 771 | out: |
772 | perf_enable(); | 772 | perf_pmu_enable(event->pmu); |
773 | local_irq_restore(flags); | 773 | local_irq_restore(flags); |
774 | return ret; | 774 | return ret; |
775 | } | 775 | } |
@@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event) | |||
784 | unsigned long flags; | 784 | unsigned long flags; |
785 | 785 | ||
786 | local_irq_save(flags); | 786 | local_irq_save(flags); |
787 | perf_disable(); | 787 | perf_pmu_disable(event->pmu); |
788 | 788 | ||
789 | power_pmu_read(event); | 789 | power_pmu_read(event); |
790 | 790 | ||
@@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event) | |||
821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
822 | } | 822 | } |
823 | 823 | ||
824 | perf_enable(); | 824 | perf_pmu_enable(event->pmu); |
825 | local_irq_restore(flags); | 825 | local_irq_restore(flags); |
826 | } | 826 | } |
827 | 827 | ||
@@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
837 | if (!event->hw.idx || !event->hw.sample_period) | 837 | if (!event->hw.idx || !event->hw.sample_period) |
838 | return; | 838 | return; |
839 | local_irq_save(flags); | 839 | local_irq_save(flags); |
840 | perf_disable(); | 840 | perf_pmu_disable(event->pmu); |
841 | power_pmu_read(event); | 841 | power_pmu_read(event); |
842 | left = event->hw.sample_period; | 842 | left = event->hw.sample_period; |
843 | event->hw.last_period = left; | 843 | event->hw.last_period = left; |
@@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
848 | local64_set(&event->hw.prev_count, val); | 848 | local64_set(&event->hw.prev_count, val); |
849 | local64_set(&event->hw.period_left, left); | 849 | local64_set(&event->hw.period_left, left); |
850 | perf_event_update_userpage(event); | 850 | perf_event_update_userpage(event); |
851 | perf_enable(); | 851 | perf_pmu_enable(event->pmu); |
852 | local_irq_restore(flags); | 852 | local_irq_restore(flags); |
853 | } | 853 | } |
854 | 854 | ||
@@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu) | |||
861 | { | 861 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 863 | ||
864 | perf_disable(); | 864 | perf_pmu_disable(pmu); |
865 | cpuhw->group_flag |= PERF_EVENT_TXN; | 865 | cpuhw->group_flag |= PERF_EVENT_TXN; |
866 | cpuhw->n_txn_start = cpuhw->n_events; | 866 | cpuhw->n_txn_start = cpuhw->n_events; |
867 | } | 867 | } |
@@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) | |||
876 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 876 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
877 | 877 | ||
878 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 878 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
879 | perf_enable(); | 879 | perf_pmu_enable(pmu); |
880 | } | 880 | } |
881 | 881 | ||
882 | /* | 882 | /* |
@@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu) | |||
903 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 903 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
904 | 904 | ||
905 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 905 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
906 | perf_enable(); | 906 | perf_pmu_enable(pmu); |
907 | return 0; | 907 | return 0; |
908 | } | 908 | } |
909 | 909 | ||
@@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event) | |||
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | struct pmu power_pmu = { | 1133 | struct pmu power_pmu = { |
1134 | .pmu_enable = power_pmu_pmu_enable, | ||
1135 | .pmu_disable = power_pmu_pmu_disable, | ||
1134 | .event_init = power_pmu_event_init, | 1136 | .event_init = power_pmu_event_init, |
1135 | .enable = power_pmu_enable, | 1137 | .enable = power_pmu_enable, |
1136 | .disable = power_pmu_disable, | 1138 | .disable = power_pmu_disable, |