diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-09 18:24:03 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-09 18:24:03 -0500 |
commit | 8308756f45a12e2ff4f7749c2694fc83cdef0be9 (patch) | |
tree | 618c8f955d22927ebe73d94305d19b37c6ab65c9 /kernel/sched | |
parent | 23e8fe2e16587494268510c1bc9f6952f50f0311 (diff) | |
parent | afffc6c1805d98e08e778cddb644a666e78cfcfd (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar:
"The main changes are:
- mutex, completions and rtmutex micro-optimizations
- lock debugging fix
- various cleanups in the MCS and the futex code"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/rtmutex: Optimize setting task running after being blocked
locking/rwsem: Use task->state helpers
sched/completion: Add lock-free checking of the blocking case
sched/completion: Remove unnecessary ->wait.lock serialization when reading completion state
locking/mutex: Explicitly mark task as running after wakeup
futex: Fix argument handling in futex_lock_pi() calls
doc: Fix misnamed FUTEX_CMP_REQUEUE_PI op constants
locking/Documentation: Update code path
softirq/preempt: Add missing current->preempt_disable_ip update
locking/osq: No need for load/acquire when acquire-polling
locking/mcs: Better differentiate between MCS variants
locking/mutex: Introduce ww_mutex_set_context_slowpath()
locking/mutex: Move MCS related comments to proper location
locking/mutex: Checking the stamp is WW only
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/completion.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 607f852b4d04..7052d3fd4e7b 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c | |||
@@ -268,6 +268,15 @@ bool try_wait_for_completion(struct completion *x) | |||
268 | unsigned long flags; | 268 | unsigned long flags; |
269 | int ret = 1; | 269 | int ret = 1; |
270 | 270 | ||
271 | /* | ||
272 | * Since x->done will need to be locked only | ||
273 | * in the non-blocking case, we check x->done | ||
274 | * first without taking the lock so we can | ||
275 | * return early in the blocking case. | ||
276 | */ | ||
277 | if (!ACCESS_ONCE(x->done)) | ||
278 | return 0; | ||
279 | |||
271 | spin_lock_irqsave(&x->wait.lock, flags); | 280 | spin_lock_irqsave(&x->wait.lock, flags); |
272 | if (!x->done) | 281 | if (!x->done) |
273 | ret = 0; | 282 | ret = 0; |
@@ -288,13 +297,6 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
288 | */ | 297 | */ |
289 | bool completion_done(struct completion *x) | 298 | bool completion_done(struct completion *x) |
290 | { | 299 | { |
291 | unsigned long flags; | 300 | return !!ACCESS_ONCE(x->done); |
292 | int ret = 1; | ||
293 | |||
294 | spin_lock_irqsave(&x->wait.lock, flags); | ||
295 | if (!x->done) | ||
296 | ret = 0; | ||
297 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
298 | return ret; | ||
299 | } | 301 | } |
300 | EXPORT_SYMBOL(completion_done); | 302 | EXPORT_SYMBOL(completion_done); |