aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2011-06-06 18:23:28 -0400
committerIngo Molnar <mingo@elte.hu>2011-06-07 07:02:41 -0400
commitb58f6b0dd3d677338b9065388cc2cc942b86338e (patch)
tree1c10cd87480b3c05b100fb4d85afaecfe2dd5b1b /kernel/events
parent3ce2a0bc9dfb6423491afe0afc9f099e24b8cba4 (diff)
perf, core: Fix initial task_ctx/event installation
A lost Quilt refresh of 2c29ef0fef8 (perf: Simplify and fix __perf_install_in_context()) is causing grief and lockups, reported by Jiri Olsa. When installing an event in a task context, there's a number of issues: - there might not be an existing task context, in which case we should install the now current context; - there might already be a context, not the current one, in which case we should de-schedule the old and install the new; these cases were dealt with in the lost refresh, however there is one further case that was found in testing: - there might already be a context, the current one, in which case we should still de-schedule, and should take care to re-install it (note that task_ctx_sched_out() clears cpuctx->task_ctx). Reported-by: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1307399008.2497.971.camel@laptop Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ba89f40abe6..5e8c7b1389b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1505,25 +1505,31 @@ static int __perf_install_in_context(void *info)
1505 struct perf_event_context *task_ctx = cpuctx->task_ctx; 1505 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1506 struct task_struct *task = current; 1506 struct task_struct *task = current;
1507 1507
1508 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 1508 perf_ctx_lock(cpuctx, task_ctx);
1509 perf_pmu_disable(cpuctx->ctx.pmu); 1509 perf_pmu_disable(cpuctx->ctx.pmu);
1510 1510
1511 /* 1511 /*
1512 * If there was an active task_ctx schedule it out. 1512 * If there was an active task_ctx schedule it out.
1513 */ 1513 */
1514 if (task_ctx) { 1514 if (task_ctx)
1515 task_ctx_sched_out(task_ctx); 1515 task_ctx_sched_out(task_ctx);
1516 /* 1516
1517 * If the context we're installing events in is not the 1517 /*
1518 * active task_ctx, flip them. 1518 * If the context we're installing events in is not the
1519 */ 1519 * active task_ctx, flip them.
1520 if (ctx->task && task_ctx != ctx) { 1520 */
1521 raw_spin_unlock(&cpuctx->ctx.lock); 1521 if (ctx->task && task_ctx != ctx) {
1522 raw_spin_lock(&ctx->lock); 1522 if (task_ctx)
1523 cpuctx->task_ctx = task_ctx = ctx; 1523 raw_spin_unlock(&task_ctx->lock);
1524 } 1524 raw_spin_lock(&ctx->lock);
1525 task_ctx = ctx;
1526 }
1527
1528 if (task_ctx) {
1529 cpuctx->task_ctx = task_ctx;
1525 task = task_ctx->task; 1530 task = task_ctx->task;
1526 } 1531 }
1532
1527 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 1533 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1528 1534
1529 update_context_time(ctx); 1535 update_context_time(ctx);