aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-05-30 16:04:25 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-06-29 15:35:28 -0400
commit12709f95fd8ca75ec1c4dbed945751b4d0d1d05f (patch)
tree2e43fbe7bf62d7f8bccb93efa17fdab98b809fde /kernel
parent51fbd77c171936aed07cf5081741d8e3437d683b (diff)
perf: Fix ring_buffer_attach() RCU sync, again
commit 2f993cf093643b98477c421fa2b9a98dcc940323 upstream. While looking for other users of get_state/cond_sync. I Found ring_buffer_attach() and it looks obviously buggy? Don't we need to ensure that we have "synchronize" _between_ list_del() and list_add() ? IOW. Suppose that ring_buffer_attach() preempts right_after get_state_synchronize_rcu() and gp completes before spin_lock(). In this case cond_synchronize_rcu() does nothing and we reuse ->rb_entry without waiting for gp in between? It also moves the ->rcu_pending check under "if (rb)", to make it more readable imo. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dave@stgolabs.net Cc: der.herr@hofr.at Cc: josh@joshtriplett.org Cc: tj@kernel.org Fixes: b69cf53640da ("perf: Fix a race between ring_buffer_detach() and ring_buffer_attach()") Link: http://lkml.kernel.org/r/20150530200425.GA15748@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eddf1ed4155e..0ceb386777ae 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4331,20 +4331,20 @@ static void ring_buffer_attach(struct perf_event *event,
4331 WARN_ON_ONCE(event->rcu_pending); 4331 WARN_ON_ONCE(event->rcu_pending);
4332 4332
4333 old_rb = event->rb; 4333 old_rb = event->rb;
4334 event->rcu_batches = get_state_synchronize_rcu();
4335 event->rcu_pending = 1;
4336
4337 spin_lock_irqsave(&old_rb->event_lock, flags); 4334 spin_lock_irqsave(&old_rb->event_lock, flags);
4338 list_del_rcu(&event->rb_entry); 4335 list_del_rcu(&event->rb_entry);
4339 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4336 spin_unlock_irqrestore(&old_rb->event_lock, flags);
4340 }
4341 4337
4342 if (event->rcu_pending && rb) { 4338 event->rcu_batches = get_state_synchronize_rcu();
4343 cond_synchronize_rcu(event->rcu_batches); 4339 event->rcu_pending = 1;
4344 event->rcu_pending = 0;
4345 } 4340 }
4346 4341
4347 if (rb) { 4342 if (rb) {
4343 if (event->rcu_pending) {
4344 cond_synchronize_rcu(event->rcu_batches);
4345 event->rcu_pending = 0;
4346 }
4347
4348 spin_lock_irqsave(&rb->event_lock, flags); 4348 spin_lock_irqsave(&rb->event_lock, flags);
4349 list_add_rcu(&event->rb_entry, &rb->event_list); 4349 list_add_rcu(&event->rb_entry, &rb->event_list);
4350 spin_unlock_irqrestore(&rb->event_lock, flags); 4350 spin_unlock_irqrestore(&rb->event_lock, flags);