diff options
author | Jason Low <jason.low2@hp.com> | 2014-09-16 20:16:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-10-03 00:09:29 -0400 |
commit | debfab74e453f079cd8b12b0604387a8c510ef3a (patch) | |
tree | bab0d5cc8f56dde5ad91d409683aac5d7fec8b47 /kernel/locking/rwsem-xadd.c | |
parent | db0e716a1512179e8374a74c1f3184e9ce15d138 (diff) |
locking/rwsem: Avoid double checking before try acquiring write lock
Commit 9b0fc9c09f1b ("rwsem: skip initial trylock in rwsem_down_write_failed")
checks for if there are known active lockers in order to avoid write trylocking
using expensive cmpxchg() when it likely wouldn't get the lock.
However, a subsequent patch was added such that we directly
check for sem->count == RWSEM_WAITING_BIAS right before trying
that cmpxchg().
Thus, commit 9b0fc9c09f1b now just adds overhead.
This patch modifies it so that we only do a check for if
count == RWSEM_WAITING_BIAS.
Also, add a comment on why we do an "extra check" of count
before the cmpxchg().
Signed-off-by: Jason Low <jason.low2@hp.com>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1410913017.2447.22.camel@j-VirtualBox
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 12166ec9b7e7..7628c3fc37ca 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -250,16 +250,18 @@ EXPORT_SYMBOL(rwsem_down_read_failed); | |||
250 | 250 | ||
251 | static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) | 251 | static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) |
252 | { | 252 | { |
253 | if (!(count & RWSEM_ACTIVE_MASK)) { | 253 | /* |
254 | /* try acquiring the write lock */ | 254 | * Try acquiring the write lock. Check count first in order |
255 | if (sem->count == RWSEM_WAITING_BIAS && | 255 | * to reduce unnecessary expensive cmpxchg() operations. |
256 | cmpxchg(&sem->count, RWSEM_WAITING_BIAS, | 256 | */ |
257 | RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) { | 257 | if (count == RWSEM_WAITING_BIAS && |
258 | if (!list_is_singular(&sem->wait_list)) | 258 | cmpxchg(&sem->count, RWSEM_WAITING_BIAS, |
259 | rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); | 259 | RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) { |
260 | return true; | 260 | if (!list_is_singular(&sem->wait_list)) |
261 | } | 261 | rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); |
262 | return true; | ||
262 | } | 263 | } |
264 | |||
263 | return false; | 265 | return false; |
264 | } | 266 | } |
265 | 267 | ||