aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_event_fsl_emb.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-14 02:49:00 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:29 -0400
commit33696fc0d141bbbcb12f75b69608ea83282e3117 (patch)
tree72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /arch/powerpc/kernel/perf_event_fsl_emb.c
parent24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff)
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_event_fsl_emb.c')
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 9bc84a7fd90..84b1974c628 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
177 * Disable all events to prevent PMU interrupts and to allow 177 * Disable all events to prevent PMU interrupts and to allow
178 * events to be added or removed. 178 * events to be added or removed.
179 */ 179 */
180void hw_perf_disable(void) 180static void fsl_emb_pmu_pmu_disable(struct pmu *pmu)
181{ 181{
182 struct cpu_hw_events *cpuhw; 182 struct cpu_hw_events *cpuhw;
183 unsigned long flags; 183 unsigned long flags;
@@ -216,7 +216,7 @@ void hw_perf_disable(void)
216 * If we were previously disabled and events were added, then 216 * If we were previously disabled and events were added, then
217 * put the new config on the PMU. 217 * put the new config on the PMU.
218 */ 218 */
219void hw_perf_enable(void) 219static void fsl_emb_pmu_pmu_enable(struct pmu *pmu)
220{ 220{
221 struct cpu_hw_events *cpuhw; 221 struct cpu_hw_events *cpuhw;
222 unsigned long flags; 222 unsigned long flags;
@@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
271 u64 val; 271 u64 val;
272 int i; 272 int i;
273 273
274 perf_disable(); 274 perf_pmu_disable(event->pmu);
275 cpuhw = &get_cpu_var(cpu_hw_events); 275 cpuhw = &get_cpu_var(cpu_hw_events);
276 276
277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) 277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
311 ret = 0; 311 ret = 0;
312 out: 312 out:
313 put_cpu_var(cpu_hw_events); 313 put_cpu_var(cpu_hw_events);
314 perf_enable(); 314 perf_pmu_enable(event->pmu);
315 return ret; 315 return ret;
316} 316}
317 317
@@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
321 struct cpu_hw_events *cpuhw; 321 struct cpu_hw_events *cpuhw;
322 int i = event->hw.idx; 322 int i = event->hw.idx;
323 323
324 perf_disable(); 324 perf_pmu_disable(event->pmu);
325 if (i < 0) 325 if (i < 0)
326 goto out; 326 goto out;
327 327
@@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
349 cpuhw->n_events--; 349 cpuhw->n_events--;
350 350
351 out: 351 out:
352 perf_enable(); 352 perf_pmu_enable(event->pmu);
353 put_cpu_var(cpu_hw_events); 353 put_cpu_var(cpu_hw_events);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
367 if (event->hw.idx < 0 || !event->hw.sample_period) 367 if (event->hw.idx < 0 || !event->hw.sample_period)
368 return; 368 return;
369 local_irq_save(flags); 369 local_irq_save(flags);
370 perf_disable(); 370 perf_pmu_disable(event->pmu);
371 fsl_emb_pmu_read(event); 371 fsl_emb_pmu_read(event);
372 left = event->hw.sample_period; 372 left = event->hw.sample_period;
373 event->hw.last_period = left; 373 event->hw.last_period = left;
@@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
378 local64_set(&event->hw.prev_count, val); 378 local64_set(&event->hw.prev_count, val);
379 local64_set(&event->hw.period_left, left); 379 local64_set(&event->hw.period_left, left);
380 perf_event_update_userpage(event); 380 perf_event_update_userpage(event);
381 perf_enable(); 381 perf_pmu_enable(event->pmu);
382 local_irq_restore(flags); 382 local_irq_restore(flags);
383} 383}
384 384
@@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
524} 524}
525 525
526static struct pmu fsl_emb_pmu = { 526static struct pmu fsl_emb_pmu = {
527 .pmu_enable = fsl_emb_pmu_pmu_enable,
528 .pmu_disable = fsl_emb_pmu_pmu_disable,
527 .event_init = fsl_emb_pmu_event_init, 529 .event_init = fsl_emb_pmu_event_init,
528 .enable = fsl_emb_pmu_enable, 530 .enable = fsl_emb_pmu_enable,
529 .disable = fsl_emb_pmu_disable, 531 .disable = fsl_emb_pmu_disable,