aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-08 04:45:11 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-21 12:54:22 -0500
commitaee7dbc45f8aa976913de9b352fa6da816f1f3cd (patch)
tree0d7ebb18fa8e90c77bb860b9320f1b83c2108d52 /kernel
parent8833d0e286c12fd4456089a7a553faf4921e4b08 (diff)
perf: Simplify/fix perf_event_enable() event scheduling
Like perf_enable_on_exec(), perf_event_enable() event scheduling has problems respecting the context hierarchy when trying to schedule events (for example, it will try and add a pinned event without first removing existing flexible events). So simplify it by using the new ctx_resched() call which will DTRT. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c31
1 files changed, 5 insertions, 26 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 12f1d4a52da9..079eb9fcaaa8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2188,7 +2188,7 @@ static int __perf_event_enable(void *info)
2188 struct perf_event_context *ctx = event->ctx; 2188 struct perf_event_context *ctx = event->ctx;
2189 struct perf_event *leader = event->group_leader; 2189 struct perf_event *leader = event->group_leader;
2190 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2190 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2191 int err; 2191 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2192 2192
2193 /* 2193 /*
2194 * There's a time window between 'ctx->is_active' check 2194 * There's a time window between 'ctx->is_active' check
@@ -2202,7 +2202,8 @@ static int __perf_event_enable(void *info)
2202 if (!ctx->is_active) 2202 if (!ctx->is_active)
2203 return -EINVAL; 2203 return -EINVAL;
2204 2204
2205 raw_spin_lock(&ctx->lock); 2205 perf_ctx_lock(cpuctx, task_ctx);
2206 WARN_ON_ONCE(&cpuctx->ctx != ctx && task_ctx != ctx);
2206 update_context_time(ctx); 2207 update_context_time(ctx);
2207 2208
2208 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2209 if (event->state >= PERF_EVENT_STATE_INACTIVE)
@@ -2228,32 +2229,10 @@ static int __perf_event_enable(void *info)
2228 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2229 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2229 goto unlock; 2230 goto unlock;
2230 2231
2231 if (!group_can_go_on(event, cpuctx, 1)) { 2232 ctx_resched(cpuctx, task_ctx);
2232 err = -EEXIST;
2233 } else {
2234 if (event == leader)
2235 err = group_sched_in(event, cpuctx, ctx);
2236 else
2237 err = event_sched_in(event, cpuctx, ctx);
2238 }
2239
2240 if (err) {
2241 /*
2242 * If this event can't go on and it's part of a
2243 * group, then the whole group has to come off.
2244 */
2245 if (leader != event) {
2246 group_sched_out(leader, cpuctx, ctx);
2247 perf_mux_hrtimer_restart(cpuctx);
2248 }
2249 if (leader->attr.pinned) {
2250 update_group_times(leader);
2251 leader->state = PERF_EVENT_STATE_ERROR;
2252 }
2253 }
2254 2233
2255unlock: 2234unlock:
2256 raw_spin_unlock(&ctx->lock); 2235 perf_ctx_unlock(cpuctx, task_ctx);
2257 2236
2258 return 0; 2237 return 0;
2259} 2238}