aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-03 05:40:36 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-03 05:49:53 -0400
commitdcd945e0d8a6d654e3e1de51faea9f98f1504aa5 (patch)
treec8bd2ab5a75744f9e210deab880e252b3d129555 /arch/powerpc
parent6984efb692e97ce5f75f26e595685c04c2061bac (diff)
perf_counter: powerpc: Fix race causing "oops trying to read PMC0" errors
When using interrupting counters and limited (non-interrupting) counters at the same time, it's possible that we get an interrupt in write_mmcr0() after writing MMCR0 but before we have set up the counters using limited PMCs. What happens then is that we get into perf_counter_interrupt() with counter->hw.idx = 0 for the limited counters, leading to the "oops trying to read PMC0" error message being printed. This fixes the problem by making perf_counter_interrupt() robust against counter->hw.idx being zero (the counter is just ignored in that case) and also by changing write_mmcr0() to write MMCR0 initially with the counter overflow interrupt enable bits masked (set to 0). If the MMCR0 value requested by the caller has either of those bits set, we write MMCR0 again with the requested value of those bits after setting up the limited counters properly. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Kacur <jkacur@redhat.com> Cc: Stephane Eranian <eranian@googlemail.com> LKML-Reference: <18982.17684.138182.954599@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_counter.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index ea54686cb787..4cc4ac5c791c 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -372,16 +372,28 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
372 372
373 /* 373 /*
374 * Write MMCR0, then read PMC5 and PMC6 immediately. 374 * Write MMCR0, then read PMC5 and PMC6 immediately.
375 * To ensure we don't get a performance monitor interrupt
376 * between writing MMCR0 and freezing/thawing the limited
377 * counters, we first write MMCR0 with the counter overflow
378 * interrupt enable bits turned off.
375 */ 379 */
376 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" 380 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
377 : "=&r" (pmc5), "=&r" (pmc6) 381 : "=&r" (pmc5), "=&r" (pmc6)
378 : "r" (mmcr0), "i" (SPRN_MMCR0), 382 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
383 "i" (SPRN_MMCR0),
379 "i" (SPRN_PMC5), "i" (SPRN_PMC6)); 384 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
380 385
381 if (mmcr0 & MMCR0_FC) 386 if (mmcr0 & MMCR0_FC)
382 freeze_limited_counters(cpuhw, pmc5, pmc6); 387 freeze_limited_counters(cpuhw, pmc5, pmc6);
383 else 388 else
384 thaw_limited_counters(cpuhw, pmc5, pmc6); 389 thaw_limited_counters(cpuhw, pmc5, pmc6);
390
391 /*
392 * Write the full MMCR0 including the counter overflow interrupt
393 * enable bits, if necessary.
394 */
395 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
396 mtspr(SPRN_MMCR0, mmcr0);
385} 397}
386 398
387/* 399/*
@@ -1108,7 +1120,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1108 1120
1109 for (i = 0; i < cpuhw->n_counters; ++i) { 1121 for (i = 0; i < cpuhw->n_counters; ++i) {
1110 counter = cpuhw->counter[i]; 1122 counter = cpuhw->counter[i];
1111 if (is_limited_pmc(counter->hw.idx)) 1123 if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
1112 continue; 1124 continue;
1113 val = read_pmc(counter->hw.idx); 1125 val = read_pmc(counter->hw.idx);
1114 if ((int)val < 0) { 1126 if ((int)val < 0) {