summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-08 03:29:16 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-21 12:54:19 -0500
commit70a0165752944e0be0b1de4a9020473079962c18 (patch)
tree8ff05d0e0829991eebbdcf56a7856f539e91c8c2 /kernel
parent7e41d17753e6e0da55d343997454dd4fbe8d28a8 (diff)
perf: Fix cgroup scheduling in perf_enable_on_exec()
There is a comment that states that perf_event_context_sched_in() will also switch in the cgroup events, I cannot find it does so. Therefore all the resulting logic goes out the window too. Clean that up. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c31
1 files changed, 7 insertions, 24 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9d1195af819c..e7bda0ed8d40 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -579,13 +579,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
579 * we are holding the rcu lock 579 * we are holding the rcu lock
580 */ 580 */
581 cgrp1 = perf_cgroup_from_task(task, NULL); 581 cgrp1 = perf_cgroup_from_task(task, NULL);
582 582 cgrp2 = perf_cgroup_from_task(next, NULL);
583 /*
584 * next is NULL when called from perf_event_enable_on_exec()
585 * that will systematically cause a cgroup_switch()
586 */
587 if (next)
588 cgrp2 = perf_cgroup_from_task(next, NULL);
589 583
590 /* 584 /*
591 * only schedule out current cgroup events if we know 585 * only schedule out current cgroup events if we know
@@ -611,8 +605,6 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
611 * we are holding the rcu lock 605 * we are holding the rcu lock
612 */ 606 */
613 cgrp1 = perf_cgroup_from_task(task, NULL); 607 cgrp1 = perf_cgroup_from_task(task, NULL);
614
615 /* prev can never be NULL */
616 cgrp2 = perf_cgroup_from_task(prev, NULL); 608 cgrp2 = perf_cgroup_from_task(prev, NULL);
617 609
618 /* 610 /*
@@ -1450,11 +1442,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1450 1442
1451 if (is_cgroup_event(event)) { 1443 if (is_cgroup_event(event)) {
1452 ctx->nr_cgroups--; 1444 ctx->nr_cgroups--;
1445 /*
1446 * Because cgroup events are always per-cpu events, this will
1447 * always be called from the right CPU.
1448 */
1453 cpuctx = __get_cpu_context(ctx); 1449 cpuctx = __get_cpu_context(ctx);
1454 /* 1450 /*
1455 * if there are no more cgroup events 1451 * If there are no more cgroup events then clear cgrp to avoid
1456 * then cler cgrp to avoid stale pointer 1452 * stale pointer in update_cgrp_time_from_cpuctx().
1457 * in update_cgrp_time_from_cpuctx()
1458 */ 1453 */
1459 if (!ctx->nr_cgroups) 1454 if (!ctx->nr_cgroups)
1460 cpuctx->cgrp = NULL; 1455 cpuctx->cgrp = NULL;
@@ -3118,15 +3113,6 @@ static void perf_event_enable_on_exec(int ctxn)
3118 if (!ctx || !ctx->nr_events) 3113 if (!ctx || !ctx->nr_events)
3119 goto out; 3114 goto out;
3120 3115
3121 /*
3122 * We must ctxsw out cgroup events to avoid conflict
3123 * when invoking perf_task_event_sched_in() later on
3124 * in this function. Otherwise we end up trying to
3125 * ctxswin cgroup events which are already scheduled
3126 * in.
3127 */
3128 perf_cgroup_sched_out(current, NULL);
3129
3130 raw_spin_lock(&ctx->lock); 3116 raw_spin_lock(&ctx->lock);
3131 task_ctx_sched_out(ctx); 3117 task_ctx_sched_out(ctx);
3132 3118
@@ -3144,9 +3130,6 @@ static void perf_event_enable_on_exec(int ctxn)
3144 3130
3145 raw_spin_unlock(&ctx->lock); 3131 raw_spin_unlock(&ctx->lock);
3146 3132
3147 /*
3148 * Also calls ctxswin for cgroup events, if any:
3149 */
3150 perf_event_context_sched_in(ctx, ctx->task); 3133 perf_event_context_sched_in(ctx, ctx->task);
3151out: 3134out:
3152 local_irq_restore(flags); 3135 local_irq_restore(flags);