aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 27960f114efd..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
364 } 364 }
365 365
366 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
367 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
368 * allow event_filter_match() to not 369 * allow event_filter_match() to not
369 * have to pass task around 370 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2423 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2424 goto out; 2425 goto out;
2425 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2426 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2427 2436
2428 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2447 2456
2448 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2449 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2450 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2451out: 2463out:
2452 local_irq_restore(flags); 2464 local_irq_restore(flags);