aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2009-08-13 03:51:55 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-13 06:18:43 -0400
commitbcfc2602e8541ac13b1def38e2591dca072cff7a (patch)
tree0e5601dac2fbc4ca1ff452cebd1adb345b710599 /kernel
parent8fd101f20bdf771949a8f3a5a779877d09b2fb56 (diff)
perf_counter: Fix swcounter context invariance
perf_swcounter_is_counting() uses a lock, which means we cannot use swcounters from NMI or when holding that particular lock, this is unintended. The below removes the lock, this opens up race window, but not worse than the swcounters already experience due to RCU traversal of the context in perf_swcounter_ctx_event(). This also fixes the hard lockups while opening a lockdep tracepoint counter. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Corey J Ashford <cjashfor@us.ibm.com> LKML-Reference: <1250149915.10001.66.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c44
1 files changed, 18 insertions, 26 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e26d2fcfa320..3dd4339589a0 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3444,40 +3444,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3444 3444
3445static int perf_swcounter_is_counting(struct perf_counter *counter) 3445static int perf_swcounter_is_counting(struct perf_counter *counter)
3446{ 3446{
3447 struct perf_counter_context *ctx; 3447 /*
3448 unsigned long flags; 3448 * The counter is active, we're good!
3449 int count; 3449 */
3450
3451 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3450 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3452 return 1; 3451 return 1;
3453 3452
3453 /*
3454 * The counter is off/error, not counting.
3455 */
3454 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3456 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3455 return 0; 3457 return 0;
3456 3458
3457 /* 3459 /*
3458 * If the counter is inactive, it could be just because 3460 * The counter is inactive, if the context is active
3459 * its task is scheduled out, or because it's in a group 3461 * we're part of a group that didn't make it on the 'pmu',
3460 * which could not go on the PMU. We want to count in 3462 * not counting.
3461 * the first case but not the second. If the context is
3462 * currently active then an inactive software counter must
3463 * be the second case. If it's not currently active then
3464 * we need to know whether the counter was active when the
3465 * context was last active, which we can determine by
3466 * comparing counter->tstamp_stopped with ctx->time.
3467 *
3468 * We are within an RCU read-side critical section,
3469 * which protects the existence of *ctx.
3470 */ 3463 */
3471 ctx = counter->ctx; 3464 if (counter->ctx->is_active)
3472 spin_lock_irqsave(&ctx->lock, flags); 3465 return 0;
3473 count = 1; 3466
3474 /* Re-check state now we have the lock */ 3467 /*
3475 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3468 * We're inactive and the context is too, this means the
3476 counter->ctx->is_active || 3469 * task is scheduled out, we're counting events that happen
3477 counter->tstamp_stopped < ctx->time) 3470 * to us, like migration events.
3478 count = 0; 3471 */
3479 spin_unlock_irqrestore(&ctx->lock, flags); 3472 return 1;
3480 return count;
3481} 3473}
3482 3474
3483static int perf_swcounter_match(struct perf_counter *counter, 3475static int perf_swcounter_match(struct perf_counter *counter,