diff options
| -rw-r--r-- | kernel/perf_counter.c | 44 |
1 files changed, 18 insertions, 26 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e26d2fcfa320..3dd4339589a0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
| @@ -3444,40 +3444,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | |||
| 3444 | 3444 | ||
| 3445 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3445 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
| 3446 | { | 3446 | { |
| 3447 | struct perf_counter_context *ctx; | 3447 | /* |
| 3448 | unsigned long flags; | 3448 | * The counter is active, we're good! |
| 3449 | int count; | 3449 | */ |
| 3450 | |||
| 3451 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 3450 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
| 3452 | return 1; | 3451 | return 1; |
| 3453 | 3452 | ||
| 3453 | /* | ||
| 3454 | * The counter is off/error, not counting. | ||
| 3455 | */ | ||
| 3454 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) | 3456 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) |
| 3455 | return 0; | 3457 | return 0; |
| 3456 | 3458 | ||
| 3457 | /* | 3459 | /* |
| 3458 | * If the counter is inactive, it could be just because | 3460 | * The counter is inactive, if the context is active |
| 3459 | * its task is scheduled out, or because it's in a group | 3461 | * we're part of a group that didn't make it on the 'pmu', |
| 3460 | * which could not go on the PMU. We want to count in | 3462 | * not counting. |
| 3461 | * the first case but not the second. If the context is | ||
| 3462 | * currently active then an inactive software counter must | ||
| 3463 | * be the second case. If it's not currently active then | ||
| 3464 | * we need to know whether the counter was active when the | ||
| 3465 | * context was last active, which we can determine by | ||
| 3466 | * comparing counter->tstamp_stopped with ctx->time. | ||
| 3467 | * | ||
| 3468 | * We are within an RCU read-side critical section, | ||
| 3469 | * which protects the existence of *ctx. | ||
| 3470 | */ | 3463 | */ |
| 3471 | ctx = counter->ctx; | 3464 | if (counter->ctx->is_active) |
| 3472 | spin_lock_irqsave(&ctx->lock, flags); | 3465 | return 0; |
| 3473 | count = 1; | 3466 | |
| 3474 | /* Re-check state now we have the lock */ | 3467 | /* |
| 3475 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || | 3468 | * We're inactive and the context is too, this means the |
| 3476 | counter->ctx->is_active || | 3469 | * task is scheduled out, we're counting events that happen |
| 3477 | counter->tstamp_stopped < ctx->time) | 3470 | * to us, like migration events. |
| 3478 | count = 0; | 3471 | */ |
| 3479 | spin_unlock_irqrestore(&ctx->lock, flags); | 3472 | return 1; |
| 3480 | return count; | ||
| 3481 | } | 3473 | } |
| 3482 | 3474 | ||
| 3483 | static int perf_swcounter_match(struct perf_counter *counter, | 3475 | static int perf_swcounter_match(struct perf_counter *counter, |
