diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-04-08 09:01:25 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-08 12:53:27 -0400 |
commit | e30e08f65c7ef6c230424264f09c3d53f117f58b (patch) | |
tree | bc30c6a31dc53932a5f7030402e87b36d7bddc07 /kernel/perf_counter.c | |
parent | 7333a8003cdc0470e8c0ae8b949cbc44f3165ff3 (diff) |
perf_counter: fix NMI race in task clock
We should not be updating ctx->time from NMI context, work around that.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090408130408.681326666@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 863703b3158f..84a39081344c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -319,8 +319,6 @@ static void __perf_counter_disable(void *info) | |||
319 | 319 | ||
320 | spin_lock_irqsave(&ctx->lock, flags); | 320 | spin_lock_irqsave(&ctx->lock, flags); |
321 | 321 | ||
322 | update_context_time(ctx); | ||
323 | |||
324 | /* | 322 | /* |
325 | * If the counter is on, turn it off. | 323 | * If the counter is on, turn it off. |
326 | * If it is in error state, leave it in error state. | 324 | * If it is in error state, leave it in error state. |
@@ -2335,13 +2333,11 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | |||
2335 | * Software counter: task time clock | 2333 | * Software counter: task time clock |
2336 | */ | 2334 | */ |
2337 | 2335 | ||
2338 | static void task_clock_perf_counter_update(struct perf_counter *counter) | 2336 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) |
2339 | { | 2337 | { |
2340 | u64 prev, now; | 2338 | u64 prev; |
2341 | s64 delta; | 2339 | s64 delta; |
2342 | 2340 | ||
2343 | now = counter->ctx->time; | ||
2344 | |||
2345 | prev = atomic64_xchg(&counter->hw.prev_count, now); | 2341 | prev = atomic64_xchg(&counter->hw.prev_count, now); |
2346 | delta = now - prev; | 2342 | delta = now - prev; |
2347 | atomic64_add(delta, &counter->count); | 2343 | atomic64_add(delta, &counter->count); |
@@ -2369,13 +2365,24 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) | |||
2369 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | 2365 | static void task_clock_perf_counter_disable(struct perf_counter *counter) |
2370 | { | 2366 | { |
2371 | hrtimer_cancel(&counter->hw.hrtimer); | 2367 | hrtimer_cancel(&counter->hw.hrtimer); |
2372 | task_clock_perf_counter_update(counter); | 2368 | task_clock_perf_counter_update(counter, counter->ctx->time); |
2369 | |||
2373 | } | 2370 | } |
2374 | 2371 | ||
2375 | static void task_clock_perf_counter_read(struct perf_counter *counter) | 2372 | static void task_clock_perf_counter_read(struct perf_counter *counter) |
2376 | { | 2373 | { |
2377 | update_context_time(counter->ctx); | 2374 | u64 time; |
2378 | task_clock_perf_counter_update(counter); | 2375 | |
2376 | if (!in_nmi()) { | ||
2377 | update_context_time(counter->ctx); | ||
2378 | time = counter->ctx->time; | ||
2379 | } else { | ||
2380 | u64 now = perf_clock(); | ||
2381 | u64 delta = now - counter->ctx->timestamp; | ||
2382 | time = counter->ctx->time + delta; | ||
2383 | } | ||
2384 | |||
2385 | task_clock_perf_counter_update(counter, time); | ||
2379 | } | 2386 | } |
2380 | 2387 | ||
2381 | static const struct hw_perf_counter_ops perf_ops_task_clock = { | 2388 | static const struct hw_perf_counter_ops perf_ops_task_clock = { |