aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/perf_counter.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index d55a50da2347..8bf997d86bf4 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -146,6 +146,14 @@ static void put_ctx(struct perf_counter_context *ctx)
146 } 146 }
147} 147}
148 148
149static void unclone_ctx(struct perf_counter_context *ctx)
150{
151 if (ctx->parent_ctx) {
152 put_ctx(ctx->parent_ctx);
153 ctx->parent_ctx = NULL;
154 }
155}
156
149/* 157/*
150 * Get the perf_counter_context for a task and lock it. 158 * Get the perf_counter_context for a task and lock it.
151 * This has to cope with with the fact that until it is locked, 159 * This has to cope with with the fact that until it is locked,
@@ -1463,10 +1471,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1463 /* 1471 /*
1464 * Unclone this context if we enabled any counter. 1472 * Unclone this context if we enabled any counter.
1465 */ 1473 */
1466 if (enabled && ctx->parent_ctx) { 1474 if (enabled)
1467 put_ctx(ctx->parent_ctx); 1475 unclone_ctx(ctx);
1468 ctx->parent_ctx = NULL;
1469 }
1470 1476
1471 spin_unlock(&ctx->lock); 1477 spin_unlock(&ctx->lock);
1472 1478
@@ -1526,7 +1532,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1526 1532
1527static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1533static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1528{ 1534{
1529 struct perf_counter_context *parent_ctx;
1530 struct perf_counter_context *ctx; 1535 struct perf_counter_context *ctx;
1531 struct perf_cpu_context *cpuctx; 1536 struct perf_cpu_context *cpuctx;
1532 struct task_struct *task; 1537 struct task_struct *task;
@@ -1586,11 +1591,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1586 retry: 1591 retry:
1587 ctx = perf_lock_task_context(task, &flags); 1592 ctx = perf_lock_task_context(task, &flags);
1588 if (ctx) { 1593 if (ctx) {
1589 parent_ctx = ctx->parent_ctx; 1594 unclone_ctx(ctx);
1590 if (parent_ctx) {
1591 put_ctx(parent_ctx);
1592 ctx->parent_ctx = NULL; /* no longer a clone */
1593 }
1594 spin_unlock_irqrestore(&ctx->lock, flags); 1595 spin_unlock_irqrestore(&ctx->lock, flags);
1595 } 1596 }
1596 1597
@@ -4255,15 +4256,12 @@ void perf_counter_exit_task(struct task_struct *child)
4255 */ 4256 */
4256 spin_lock(&child_ctx->lock); 4257 spin_lock(&child_ctx->lock);
4257 child->perf_counter_ctxp = NULL; 4258 child->perf_counter_ctxp = NULL;
4258 if (child_ctx->parent_ctx) { 4259 /*
4259 /* 4260 * If this context is a clone; unclone it so it can't get
4260 * This context is a clone; unclone it so it can't get 4261 * swapped to another process while we're removing all
4261 * swapped to another process while we're removing all 4262 * the counters from it.
4262 * the counters from it. 4263 */
4263 */ 4264 unclone_ctx(child_ctx);
4264 put_ctx(child_ctx->parent_ctx);
4265 child_ctx->parent_ctx = NULL;
4266 }
4267 spin_unlock(&child_ctx->lock); 4265 spin_unlock(&child_ctx->lock);
4268 local_irq_restore(flags); 4266 local_irq_restore(flags);
4269 4267