aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-14 02:49:00 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:29 -0400
commit33696fc0d141bbbcb12f75b69608ea83282e3117 (patch)
tree72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /arch/powerpc/kernel
parent24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff)
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/perf_event.c24
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c18
2 files changed, 23 insertions, 19 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index c1408821dbc2..deb84bbcb0e6 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
517 * Disable all events to prevent PMU interrupts and to allow 517 * Disable all events to prevent PMU interrupts and to allow
518 * events to be added or removed. 518 * events to be added or removed.
519 */ 519 */
520void hw_perf_disable(void) 520static void power_pmu_pmu_disable(struct pmu *pmu)
521{ 521{
522 struct cpu_hw_events *cpuhw; 522 struct cpu_hw_events *cpuhw;
523 unsigned long flags; 523 unsigned long flags;
@@ -565,7 +565,7 @@ void hw_perf_disable(void)
565 * If we were previously disabled and events were added, then 565 * If we were previously disabled and events were added, then
566 * put the new config on the PMU. 566 * put the new config on the PMU.
567 */ 567 */
568void hw_perf_enable(void) 568static void power_pmu_pmu_enable(struct pmu *pmu)
569{ 569{
570 struct perf_event *event; 570 struct perf_event *event;
571 struct cpu_hw_events *cpuhw; 571 struct cpu_hw_events *cpuhw;
@@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event)
735 int ret = -EAGAIN; 735 int ret = -EAGAIN;
736 736
737 local_irq_save(flags); 737 local_irq_save(flags);
738 perf_disable(); 738 perf_pmu_disable(event->pmu);
739 739
740 /* 740 /*
741 * Add the event to the list (if there is room) 741 * Add the event to the list (if there is room)
@@ -769,7 +769,7 @@ nocheck:
769 769
770 ret = 0; 770 ret = 0;
771 out: 771 out:
772 perf_enable(); 772 perf_pmu_enable(event->pmu);
773 local_irq_restore(flags); 773 local_irq_restore(flags);
774 return ret; 774 return ret;
775} 775}
@@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event)
784 unsigned long flags; 784 unsigned long flags;
785 785
786 local_irq_save(flags); 786 local_irq_save(flags);
787 perf_disable(); 787 perf_pmu_disable(event->pmu);
788 788
789 power_pmu_read(event); 789 power_pmu_read(event);
790 790
@@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event)
821 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); 821 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
822 } 822 }
823 823
824 perf_enable(); 824 perf_pmu_enable(event->pmu);
825 local_irq_restore(flags); 825 local_irq_restore(flags);
826} 826}
827 827
@@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
837 if (!event->hw.idx || !event->hw.sample_period) 837 if (!event->hw.idx || !event->hw.sample_period)
838 return; 838 return;
839 local_irq_save(flags); 839 local_irq_save(flags);
840 perf_disable(); 840 perf_pmu_disable(event->pmu);
841 power_pmu_read(event); 841 power_pmu_read(event);
842 left = event->hw.sample_period; 842 left = event->hw.sample_period;
843 event->hw.last_period = left; 843 event->hw.last_period = left;
@@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
848 local64_set(&event->hw.prev_count, val); 848 local64_set(&event->hw.prev_count, val);
849 local64_set(&event->hw.period_left, left); 849 local64_set(&event->hw.period_left, left);
850 perf_event_update_userpage(event); 850 perf_event_update_userpage(event);
851 perf_enable(); 851 perf_pmu_enable(event->pmu);
852 local_irq_restore(flags); 852 local_irq_restore(flags);
853} 853}
854 854
@@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
861{ 861{
862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
863 863
864 perf_disable(); 864 perf_pmu_disable(pmu);
865 cpuhw->group_flag |= PERF_EVENT_TXN; 865 cpuhw->group_flag |= PERF_EVENT_TXN;
866 cpuhw->n_txn_start = cpuhw->n_events; 866 cpuhw->n_txn_start = cpuhw->n_events;
867} 867}
@@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
876 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 876 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
877 877
878 cpuhw->group_flag &= ~PERF_EVENT_TXN; 878 cpuhw->group_flag &= ~PERF_EVENT_TXN;
879 perf_enable(); 879 perf_pmu_enable(pmu);
880} 880}
881 881
882/* 882/*
@@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
903 cpuhw->event[i]->hw.config = cpuhw->events[i]; 903 cpuhw->event[i]->hw.config = cpuhw->events[i];
904 904
905 cpuhw->group_flag &= ~PERF_EVENT_TXN; 905 cpuhw->group_flag &= ~PERF_EVENT_TXN;
906 perf_enable(); 906 perf_pmu_enable(pmu);
907 return 0; 907 return 0;
908} 908}
909 909
@@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event)
1131} 1131}
1132 1132
1133struct pmu power_pmu = { 1133struct pmu power_pmu = {
1134 .pmu_enable = power_pmu_pmu_enable,
1135 .pmu_disable = power_pmu_pmu_disable,
1134 .event_init = power_pmu_event_init, 1136 .event_init = power_pmu_event_init,
1135 .enable = power_pmu_enable, 1137 .enable = power_pmu_enable,
1136 .disable = power_pmu_disable, 1138 .disable = power_pmu_disable,
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 9bc84a7fd901..84b1974c628f 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
177 * Disable all events to prevent PMU interrupts and to allow 177 * Disable all events to prevent PMU interrupts and to allow
178 * events to be added or removed. 178 * events to be added or removed.
179 */ 179 */
180void hw_perf_disable(void) 180static void fsl_emb_pmu_pmu_disable(struct pmu *pmu)
181{ 181{
182 struct cpu_hw_events *cpuhw; 182 struct cpu_hw_events *cpuhw;
183 unsigned long flags; 183 unsigned long flags;
@@ -216,7 +216,7 @@ void hw_perf_disable(void)
216 * If we were previously disabled and events were added, then 216 * If we were previously disabled and events were added, then
217 * put the new config on the PMU. 217 * put the new config on the PMU.
218 */ 218 */
219void hw_perf_enable(void) 219static void fsl_emb_pmu_pmu_enable(struct pmu *pmu)
220{ 220{
221 struct cpu_hw_events *cpuhw; 221 struct cpu_hw_events *cpuhw;
222 unsigned long flags; 222 unsigned long flags;
@@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
271 u64 val; 271 u64 val;
272 int i; 272 int i;
273 273
274 perf_disable(); 274 perf_pmu_disable(event->pmu);
275 cpuhw = &get_cpu_var(cpu_hw_events); 275 cpuhw = &get_cpu_var(cpu_hw_events);
276 276
277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) 277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
311 ret = 0; 311 ret = 0;
312 out: 312 out:
313 put_cpu_var(cpu_hw_events); 313 put_cpu_var(cpu_hw_events);
314 perf_enable(); 314 perf_pmu_enable(event->pmu);
315 return ret; 315 return ret;
316} 316}
317 317
@@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
321 struct cpu_hw_events *cpuhw; 321 struct cpu_hw_events *cpuhw;
322 int i = event->hw.idx; 322 int i = event->hw.idx;
323 323
324 perf_disable(); 324 perf_pmu_disable(event->pmu);
325 if (i < 0) 325 if (i < 0)
326 goto out; 326 goto out;
327 327
@@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
349 cpuhw->n_events--; 349 cpuhw->n_events--;
350 350
351 out: 351 out:
352 perf_enable(); 352 perf_pmu_enable(event->pmu);
353 put_cpu_var(cpu_hw_events); 353 put_cpu_var(cpu_hw_events);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
367 if (event->hw.idx < 0 || !event->hw.sample_period) 367 if (event->hw.idx < 0 || !event->hw.sample_period)
368 return; 368 return;
369 local_irq_save(flags); 369 local_irq_save(flags);
370 perf_disable(); 370 perf_pmu_disable(event->pmu);
371 fsl_emb_pmu_read(event); 371 fsl_emb_pmu_read(event);
372 left = event->hw.sample_period; 372 left = event->hw.sample_period;
373 event->hw.last_period = left; 373 event->hw.last_period = left;
@@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
378 local64_set(&event->hw.prev_count, val); 378 local64_set(&event->hw.prev_count, val);
379 local64_set(&event->hw.period_left, left); 379 local64_set(&event->hw.period_left, left);
380 perf_event_update_userpage(event); 380 perf_event_update_userpage(event);
381 perf_enable(); 381 perf_pmu_enable(event->pmu);
382 local_irq_restore(flags); 382 local_irq_restore(flags);
383} 383}
384 384
@@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
524} 524}
525 525
526static struct pmu fsl_emb_pmu = { 526static struct pmu fsl_emb_pmu = {
527 .pmu_enable = fsl_emb_pmu_pmu_enable,
528 .pmu_disable = fsl_emb_pmu_pmu_disable,
527 .event_init = fsl_emb_pmu_event_init, 529 .event_init = fsl_emb_pmu_event_init,
528 .enable = fsl_emb_pmu_enable, 530 .enable = fsl_emb_pmu_enable,
529 .disable = fsl_emb_pmu_disable, 531 .disable = fsl_emb_pmu_disable,