diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-01-17 07:45:37 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-18 09:10:35 -0500 |
commit | c5ed5145591774bd9a2960ba4ca45a02fc70aad1 (patch) | |
tree | f7e53164a33e1a0a41a4b3a7b508835361e39689 | |
parent | ad7f4e3f7b966ac09c8f98dbc5024813a1685775 (diff) |
perf: Fix contexted inheritance
Linus reported that the RCU lockdep annotation bits triggered for this
rcu_dereference() because we're not holding rcu_read_lock().
Going over the code I cannot convince myself its correct:
- holding a ref on the parent_ctx, doesn't avoid it being uncloned
concurrently (as the comment says), so we can race with a free.
- holding parent_ctx->mutex doesn't avoid the above free from taking
place either, it would at best avoid parent_ctx from being freed.
I.e. the warning is correct. To fix the bug, serialize against the
unclone_ctx() call by extending the reach of the parent_ctx->lock.
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/perf_event.c | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b782b7a79f00..76be4c7bf08e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -6494,7 +6494,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6494 | 6494 | ||
6495 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 6495 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
6496 | parent_ctx->rotate_disable = 0; | 6496 | parent_ctx->rotate_disable = 0; |
6497 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
6498 | 6497 | ||
6499 | child_ctx = child->perf_event_ctxp[ctxn]; | 6498 | child_ctx = child->perf_event_ctxp[ctxn]; |
6500 | 6499 | ||
@@ -6502,12 +6501,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6502 | /* | 6501 | /* |
6503 | * Mark the child context as a clone of the parent | 6502 | * Mark the child context as a clone of the parent |
6504 | * context, or of whatever the parent is a clone of. | 6503 | * context, or of whatever the parent is a clone of. |
6505 | * Note that if the parent is a clone, it could get | 6504 | * |
6506 | * uncloned at any point, but that doesn't matter | 6505 | * Note that if the parent is a clone, the holding of |
6507 | * because the list of events and the generation | 6506 | * parent_ctx->lock avoids it from being uncloned. |
6508 | * count can't have changed since we took the mutex. | ||
6509 | */ | 6507 | */ |
6510 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | 6508 | cloned_ctx = parent_ctx->parent_ctx; |
6511 | if (cloned_ctx) { | 6509 | if (cloned_ctx) { |
6512 | child_ctx->parent_ctx = cloned_ctx; | 6510 | child_ctx->parent_ctx = cloned_ctx; |
6513 | child_ctx->parent_gen = parent_ctx->parent_gen; | 6511 | child_ctx->parent_gen = parent_ctx->parent_gen; |
@@ -6518,6 +6516,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6518 | get_ctx(child_ctx->parent_ctx); | 6516 | get_ctx(child_ctx->parent_ctx); |
6519 | } | 6517 | } |
6520 | 6518 | ||
6519 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
6521 | mutex_unlock(&parent_ctx->mutex); | 6520 | mutex_unlock(&parent_ctx->mutex); |
6522 | 6521 | ||
6523 | perf_unpin_context(parent_ctx); | 6522 | perf_unpin_context(parent_ctx); |