aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-20 16:19:54 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-21 08:11:39 -0500
commit2b8988c9f7defe319cffe0cd362a7cd356c86f62 (patch)
tree0b9dde7d4c4304eaa1b701897609b1918d72e0f3
parent58e5ad1de3d6ad931c84f0cc8ef0655c922f30ad (diff)
perf: Fix time locking
Most sites updating ctx->time and event times do so under ctx->lock, make sure they all do. This was made possible by removing the __perf_event_read() call from __perf_event_sync_stat(), which already had this lock taken. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <20091120212509.102316434@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_event.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 028619dd6d0e..fdfae888a67c 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1526,8 +1526,11 @@ static void __perf_event_read(void *info)
1526 if (ctx->task && cpuctx->task_ctx != ctx) 1526 if (ctx->task && cpuctx->task_ctx != ctx)
1527 return; 1527 return;
1528 1528
1529 spin_lock(&ctx->lock);
1529 update_context_time(ctx); 1530 update_context_time(ctx);
1530 update_event_times(event); 1531 update_event_times(event);
1532 spin_unlock(&ctx->lock);
1533
1531 event->pmu->read(event); 1534 event->pmu->read(event);
1532} 1535}
1533 1536
@@ -1541,7 +1544,13 @@ static u64 perf_event_read(struct perf_event *event)
1541 smp_call_function_single(event->oncpu, 1544 smp_call_function_single(event->oncpu,
1542 __perf_event_read, event, 1); 1545 __perf_event_read, event, 1);
1543 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 1546 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1547 struct perf_event_context *ctx = event->ctx;
1548 unsigned long flags;
1549
1550 spin_lock_irqsave(&ctx->lock, flags);
1551 update_context_time(ctx);
1544 update_event_times(event); 1552 update_event_times(event);
1553 spin_unlock_irqrestore(&ctx->lock, flags);
1545 } 1554 }
1546 1555
1547 return atomic64_read(&event->count); 1556 return atomic64_read(&event->count);