diff options
author | Michel Lespinasse <walken@google.com> | 2013-05-07 09:45:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 10:20:16 -0400 |
commit | 9b0fc9c09f1b262b7fe697eba6b05095d78850e5 (patch) | |
tree | 5fb25ae32c9aed77f604858a77a27cd4b37bbfc6 | |
parent | a7d2c573ae7fad1b2c877d1a1342fa5bb0d6478c (diff) |
rwsem: skip initial trylock in rwsem_down_write_failed
We can skip the initial trylock in rwsem_down_write_failed() if there
are known active lockers already, thus saving one likely-to-fail
cmpxchg.
Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | lib/rwsem.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c index edf3d9ca670e..0d50e46d5b0c 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -216,14 +216,15 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
216 | /* wait until we successfully acquire the lock */ | 216 | /* wait until we successfully acquire the lock */ |
217 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 217 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
218 | while (true) { | 218 | while (true) { |
219 | 219 | if (!(count & RWSEM_ACTIVE_MASK)) { | |
220 | /* Try acquiring the write lock. */ | 220 | /* Try acquiring the write lock. */ |
221 | count = RWSEM_ACTIVE_WRITE_BIAS; | 221 | count = RWSEM_ACTIVE_WRITE_BIAS; |
222 | if (!list_is_singular(&sem->wait_list)) | 222 | if (!list_is_singular(&sem->wait_list)) |
223 | count += RWSEM_WAITING_BIAS; | 223 | count += RWSEM_WAITING_BIAS; |
224 | if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) == | 224 | if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) == |
225 | RWSEM_WAITING_BIAS) | 225 | RWSEM_WAITING_BIAS) |
226 | break; | 226 | break; |
227 | } | ||
227 | 228 | ||
228 | raw_spin_unlock_irq(&sem->wait_lock); | 229 | raw_spin_unlock_irq(&sem->wait_lock); |
229 | 230 | ||
@@ -231,7 +232,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
231 | do { | 232 | do { |
232 | schedule(); | 233 | schedule(); |
233 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 234 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
234 | } while (sem->count & RWSEM_ACTIVE_MASK); | 235 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); |
235 | 236 | ||
236 | raw_spin_lock_irq(&sem->wait_lock); | 237 | raw_spin_lock_irq(&sem->wait_lock); |
237 | } | 238 | } |