diff options
| author | Oleg Nesterov <oleg@redhat.com> | 2015-02-12 14:59:13 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2015-02-18 08:27:40 -0500 |
| commit | bc9560155f4063bbc9be71bd69d6726d41b47653 (patch) | |
| tree | 9d4be1d4a9526b0b0d9e74cfb335ad696195685f /kernel | |
| parent | 06b1f8083d6ed379ec1207a96339f23e8f7abfcf (diff) | |
sched/completion: Serialize completion_done() with complete()
Commit de30ec47302c "Remove unnecessary ->wait.lock serialization when
reading completion state" was not correct, without lock/unlock the code
like stop_machine_from_inactive_cpu()
while (!completion_done())
cpu_relax();
can return before complete() finishes its spin_unlock() which writes to
this memory. And spin_unlock_wait().
While at it, change try_wait_for_completion() to use READ_ONCE().
Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reported-by: Davidlohr Bueso <dave@stgolabs.net>
Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
[ Added a comment with the barrier. ]
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicholas Mc Guire <der.herr@hofr.at>
Cc: raghavendra.kt@linux.vnet.ibm.com
Cc: waiman.long@hp.com
Fixes: de30ec47302c ("sched/completion: Remove unnecessary ->wait.lock serialization when reading completion state")
Link: http://lkml.kernel.org/r/20150212195913.GA30430@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/completion.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 7052d3fd4e7b..8d0f35debf35 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c | |||
| @@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x) | |||
| 274 | * first without taking the lock so we can | 274 | * first without taking the lock so we can |
| 275 | * return early in the blocking case. | 275 | * return early in the blocking case. |
| 276 | */ | 276 | */ |
| 277 | if (!ACCESS_ONCE(x->done)) | 277 | if (!READ_ONCE(x->done)) |
| 278 | return 0; | 278 | return 0; |
| 279 | 279 | ||
| 280 | spin_lock_irqsave(&x->wait.lock, flags); | 280 | spin_lock_irqsave(&x->wait.lock, flags); |
| @@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
| 297 | */ | 297 | */ |
| 298 | bool completion_done(struct completion *x) | 298 | bool completion_done(struct completion *x) |
| 299 | { | 299 | { |
| 300 | return !!ACCESS_ONCE(x->done); | 300 | if (!READ_ONCE(x->done)) |
| 301 | return false; | ||
| 302 | |||
| 303 | /* | ||
| 304 | * If ->done, we need to wait for complete() to release ->wait.lock | ||
| 305 | * otherwise we can end up freeing the completion before complete() | ||
| 306 | * is done referencing it. | ||
| 307 | * | ||
| 308 | * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders | ||
| 309 | * the loads of ->done and ->wait.lock such that we cannot observe | ||
| 310 | * the lock before complete() acquires it while observing the ->done | ||
| 311 | * after it's acquired the lock. | ||
| 312 | */ | ||
| 313 | smp_rmb(); | ||
| 314 | spin_unlock_wait(&x->wait.lock); | ||
| 315 | return true; | ||
| 301 | } | 316 | } |
| 302 | EXPORT_SYMBOL(completion_done); | 317 | EXPORT_SYMBOL(completion_done); |
