diff options
author | Oleg Nesterov <oleg@redhat.com> | 2011-01-19 13:22:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-19 14:04:27 -0500 |
commit | dbe08d82ce3967ccdf459f7951d02589cf967300 (patch) | |
tree | 2f8fc95749e47ff0de4b369f298898e4db5ad391 /kernel | |
parent | c56eb8fb6dccb83d9fe62fd4dc00c834de9bc470 (diff) |
perf: Fix find_get_context() vs perf_event_exit_task() race
find_get_context() must not install the new perf_event_context
if the task has already passed perf_event_exit_task().
If nothing else, this means the memory leak. Initially
ctx->refcount == 2, it is supposed that
perf_event_exit_task_context() should participate and do the
necessary put_ctx().
find_lively_task_by_vpid() checks PF_EXITING but this buys
nothing, by the time we call find_get_context() this task can be
already dead. To the point, cmpxchg() can succeed when the task
has already done the last schedule().
Change find_get_context() to populate task->perf_event_ctxp[]
under task->perf_event_mutex, this way we can trust PF_EXITING
because perf_event_exit_task() takes the same mutex.
Also, change perf_event_exit_task_context() to use
rcu_dereference(). Probably this is not strictly needed, but
with or without this change find_get_context() can race with
setup_new_exec()->perf_event_exit_task(), rcu_dereference()
looks better.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Roland McGrath <roland@redhat.com>
LKML-Reference: <20110119182207.GB12183@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 34 |
1 files changed, 20 insertions, 14 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 84522c796987..4ec55ef5810c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2201,13 +2201,6 @@ find_lively_task_by_vpid(pid_t vpid) | |||
2201 | if (!task) | 2201 | if (!task) |
2202 | return ERR_PTR(-ESRCH); | 2202 | return ERR_PTR(-ESRCH); |
2203 | 2203 | ||
2204 | /* | ||
2205 | * Can't attach events to a dying task. | ||
2206 | */ | ||
2207 | err = -ESRCH; | ||
2208 | if (task->flags & PF_EXITING) | ||
2209 | goto errout; | ||
2210 | |||
2211 | /* Reuse ptrace permission checks for now. */ | 2204 | /* Reuse ptrace permission checks for now. */ |
2212 | err = -EACCES; | 2205 | err = -EACCES; |
2213 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 2206 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
@@ -2268,14 +2261,27 @@ retry: | |||
2268 | 2261 | ||
2269 | get_ctx(ctx); | 2262 | get_ctx(ctx); |
2270 | 2263 | ||
2271 | if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { | 2264 | err = 0; |
2272 | /* | 2265 | mutex_lock(&task->perf_event_mutex); |
2273 | * We raced with some other task; use | 2266 | /* |
2274 | * the context they set. | 2267 | * If it has already passed perf_event_exit_task(). |
2275 | */ | 2268 | * we must see PF_EXITING, it takes this mutex too. |
2269 | */ | ||
2270 | if (task->flags & PF_EXITING) | ||
2271 | err = -ESRCH; | ||
2272 | else if (task->perf_event_ctxp[ctxn]) | ||
2273 | err = -EAGAIN; | ||
2274 | else | ||
2275 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); | ||
2276 | mutex_unlock(&task->perf_event_mutex); | ||
2277 | |||
2278 | if (unlikely(err)) { | ||
2276 | put_task_struct(task); | 2279 | put_task_struct(task); |
2277 | kfree(ctx); | 2280 | kfree(ctx); |
2278 | goto retry; | 2281 | |
2282 | if (err == -EAGAIN) | ||
2283 | goto retry; | ||
2284 | goto errout; | ||
2279 | } | 2285 | } |
2280 | } | 2286 | } |
2281 | 2287 | ||
@@ -6127,7 +6133,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
6127 | * scheduled, so we are now safe from rescheduling changing | 6133 | * scheduled, so we are now safe from rescheduling changing |
6128 | * our context. | 6134 | * our context. |
6129 | */ | 6135 | */ |
6130 | child_ctx = child->perf_event_ctxp[ctxn]; | 6136 | child_ctx = rcu_dereference(child->perf_event_ctxp[ctxn]); |
6131 | task_ctx_sched_out(child_ctx, EVENT_ALL); | 6137 | task_ctx_sched_out(child_ctx, EVENT_ALL); |
6132 | 6138 | ||
6133 | /* | 6139 | /* |