diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-19 11:39:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-19 11:57:36 -0400 |
commit | b49a9e7e72103ea91946453c19703a4dfa1994fe (patch) | |
tree | 7e9e74881384c581afca56cc397901f71e904c8b /kernel | |
parent | 0c87197142427063e096f11603543ca874045952 (diff) |
perf_counter: Close race in perf_lock_task_context()
perf_lock_task_context() is buggy because it can return a dead
context.
the RCU read lock in perf_lock_task_context() only guarantees
the memory won't get freed, it doesn't guarantee the object is
valid (in our case refcount > 0).
Therefore we can return a locked object that can get freed the
moment we release the rcu read lock.
perf_pin_task_context() then increases the refcount and does an
unlock on freed memory.
That increased refcount will cause a double free, in case it
started out with 0.
Ammend this by including the get_ctx() functionality in
perf_lock_task_context() (all users already did this later
anyway), and return a NULL context when the found one is
already dead.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8d4f0dd41c22..adb6ae506d5b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -175,6 +175,11 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) | |||
175 | spin_unlock_irqrestore(&ctx->lock, *flags); | 175 | spin_unlock_irqrestore(&ctx->lock, *flags); |
176 | goto retry; | 176 | goto retry; |
177 | } | 177 | } |
178 | |||
179 | if (!atomic_inc_not_zero(&ctx->refcount)) { | ||
180 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
181 | ctx = NULL; | ||
182 | } | ||
178 | } | 183 | } |
179 | rcu_read_unlock(); | 184 | rcu_read_unlock(); |
180 | return ctx; | 185 | return ctx; |
@@ -193,7 +198,6 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta | |||
193 | ctx = perf_lock_task_context(task, &flags); | 198 | ctx = perf_lock_task_context(task, &flags); |
194 | if (ctx) { | 199 | if (ctx) { |
195 | ++ctx->pin_count; | 200 | ++ctx->pin_count; |
196 | get_ctx(ctx); | ||
197 | spin_unlock_irqrestore(&ctx->lock, flags); | 201 | spin_unlock_irqrestore(&ctx->lock, flags); |
198 | } | 202 | } |
199 | return ctx; | 203 | return ctx; |
@@ -1459,11 +1463,6 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | |||
1459 | put_ctx(parent_ctx); | 1463 | put_ctx(parent_ctx); |
1460 | ctx->parent_ctx = NULL; /* no longer a clone */ | 1464 | ctx->parent_ctx = NULL; /* no longer a clone */ |
1461 | } | 1465 | } |
1462 | /* | ||
1463 | * Get an extra reference before dropping the lock so that | ||
1464 | * this context won't get freed if the task exits. | ||
1465 | */ | ||
1466 | get_ctx(ctx); | ||
1467 | spin_unlock_irqrestore(&ctx->lock, flags); | 1466 | spin_unlock_irqrestore(&ctx->lock, flags); |
1468 | } | 1467 | } |
1469 | 1468 | ||