aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-23 05:37:27 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-23 05:49:57 -0500
commitf67218c3e93abaf0f480bb94b53d234853ffe4de (patch)
tree8b82587c79c53376b9295e543932d746fd725260 /kernel
parent5e942bb33371254a474653123cd9e13a4c89ee44 (diff)
perf_events: Fix __perf_event_exit_task() vs. update_event_times() locking
Move the update_event_times() call in __perf_event_exit_task() into list_del_event() because that holds the proper lock (ctx->lock) and seems a more natural place to do the last time update. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20091123103819.842455480@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 8be2574b89b6..50f11b5f8c3d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -246,6 +246,44 @@ static void perf_unpin_context(struct perf_event_context *ctx)
246 put_ctx(ctx); 246 put_ctx(ctx);
247} 247}
248 248
249static inline u64 perf_clock(void)
250{
251 return cpu_clock(smp_processor_id());
252}
253
254/*
255 * Update the record of the current time in a context.
256 */
257static void update_context_time(struct perf_event_context *ctx)
258{
259 u64 now = perf_clock();
260
261 ctx->time += now - ctx->timestamp;
262 ctx->timestamp = now;
263}
264
265/*
266 * Update the total_time_enabled and total_time_running fields for a event.
267 */
268static void update_event_times(struct perf_event *event)
269{
270 struct perf_event_context *ctx = event->ctx;
271 u64 run_end;
272
273 if (event->state < PERF_EVENT_STATE_INACTIVE ||
274 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
275 return;
276
277 event->total_time_enabled = ctx->time - event->tstamp_enabled;
278
279 if (event->state == PERF_EVENT_STATE_INACTIVE)
280 run_end = event->tstamp_stopped;
281 else
282 run_end = ctx->time;
283
284 event->total_time_running = run_end - event->tstamp_running;
285}
286
249/* 287/*
250 * Add a event from the lists for its context. 288 * Add a event from the lists for its context.
251 * Must be called with ctx->mutex and ctx->lock held. 289 * Must be called with ctx->mutex and ctx->lock held.
@@ -294,6 +332,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
294 if (event->group_leader != event) 332 if (event->group_leader != event)
295 event->group_leader->nr_siblings--; 333 event->group_leader->nr_siblings--;
296 334
335 update_event_times(event);
297 event->state = PERF_EVENT_STATE_OFF; 336 event->state = PERF_EVENT_STATE_OFF;
298 337
299 /* 338 /*
@@ -454,44 +493,6 @@ retry:
454 spin_unlock_irq(&ctx->lock); 493 spin_unlock_irq(&ctx->lock);
455} 494}
456 495
457static inline u64 perf_clock(void)
458{
459 return cpu_clock(smp_processor_id());
460}
461
462/*
463 * Update the record of the current time in a context.
464 */
465static void update_context_time(struct perf_event_context *ctx)
466{
467 u64 now = perf_clock();
468
469 ctx->time += now - ctx->timestamp;
470 ctx->timestamp = now;
471}
472
473/*
474 * Update the total_time_enabled and total_time_running fields for a event.
475 */
476static void update_event_times(struct perf_event *event)
477{
478 struct perf_event_context *ctx = event->ctx;
479 u64 run_end;
480
481 if (event->state < PERF_EVENT_STATE_INACTIVE ||
482 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
483 return;
484
485 event->total_time_enabled = ctx->time - event->tstamp_enabled;
486
487 if (event->state == PERF_EVENT_STATE_INACTIVE)
488 run_end = event->tstamp_stopped;
489 else
490 run_end = ctx->time;
491
492 event->total_time_running = run_end - event->tstamp_running;
493}
494
495/* 496/*
496 * Update total_time_enabled and total_time_running for all events in a group. 497 * Update total_time_enabled and total_time_running for all events in a group.
497 */ 498 */
@@ -4931,7 +4932,6 @@ __perf_event_exit_task(struct perf_event *child_event,
4931{ 4932{
4932 struct perf_event *parent_event; 4933 struct perf_event *parent_event;
4933 4934
4934 update_event_times(child_event);
4935 perf_event_remove_from_context(child_event); 4935 perf_event_remove_from_context(child_event);
4936 4936
4937 parent_event = child_event->parent; 4937 parent_event = child_event->parent;