aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-08 03:21:40 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-21 12:54:19 -0500
commit7e41d17753e6e0da55d343997454dd4fbe8d28a8 (patch)
tree1fb4b3d63f54aff3f54dd008f111ba0631e52a1e /kernel/events
parentc994d6136738fd8b24a79f5ad8df40a6a79e2cf7 (diff)
perf: Fix cgroup event scheduling
There appears to be a problem in __perf_event_task_sched_in() wrt cgroup event scheduling. The normal event scheduling order is: CPU pinned Task pinned CPU flexible Task flexible And since perf_cgroup_sched*() only schedules the cpu context, we must call this _before_ adding the task events. Note: double check what happens on the ctx switch optimization where the task ctx isn't scheduled. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c77b05d9a37d..9d1195af819c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2806,6 +2806,16 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2806 struct perf_event_context *ctx; 2806 struct perf_event_context *ctx;
2807 int ctxn; 2807 int ctxn;
2808 2808
2809 /*
2810 * If cgroup events exist on this CPU, then we need to check if we have
2811 * to switch in PMU state; cgroup event are system-wide mode only.
2812 *
2813 * Since cgroup events are CPU events, we must schedule these in before
2814 * we schedule in the task events.
2815 */
2816 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2817 perf_cgroup_sched_in(prev, task);
2818
2809 for_each_task_context_nr(ctxn) { 2819 for_each_task_context_nr(ctxn) {
2810 ctx = task->perf_event_ctxp[ctxn]; 2820 ctx = task->perf_event_ctxp[ctxn];
2811 if (likely(!ctx)) 2821 if (likely(!ctx))
@@ -2813,13 +2823,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2813 2823
2814 perf_event_context_sched_in(ctx, task); 2824 perf_event_context_sched_in(ctx, task);
2815 } 2825 }
2816 /*
2817 * if cgroup events exist on this CPU, then we need
2818 * to check if we have to switch in PMU state.
2819 * cgroup event are system-wide mode only
2820 */
2821 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2822 perf_cgroup_sched_in(prev, task);
2823 2826
2824 if (atomic_read(&nr_switch_events)) 2827 if (atomic_read(&nr_switch_events))
2825 perf_event_switch(task, prev, true); 2828 perf_event_switch(task, prev, true);