diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2015-01-30 04:14:26 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-18 10:57:16 -0500 |
commit | b3fd4f03ca0b9952221f39ae6790e698bf4b39e7 (patch) | |
tree | 80c7076b97961309b8d3fb82fe5cae9889e07139 /kernel/locking/rwsem-xadd.c | |
parent | 7a215f89a0335582292ec6f3edaa3abd570da75a (diff) |
locking/rwsem: Avoid deceiving lock spinners
When readers hold the semaphore, the ->owner is nil. As such,
and unlike mutexes, '!owner' does not necessarily imply that
the lock is free. This will cause writers to potentially spin
excessively as they've been mislead to thinking they have a
chance of acquiring the lock, instead of blocking.
This patch therefore enhances the counter check when the owner
is not set by the time we've broken out of the loop. Otherwise
we can return true as a new owner has the lock and thus we want
to continue spinning. While at it, we can make rwsem_spin_on_owner()
less ambiguos and return right away under need_resched conditions.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michel Lespinasse <walken@google.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Link: http://lkml.kernel.org/r/1422609267-15102-5-git-send-email-dave@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 07713e5d9713..1c0d11e8ce34 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -337,21 +337,30 @@ static inline bool owner_running(struct rw_semaphore *sem, | |||
337 | static noinline | 337 | static noinline |
338 | bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) | 338 | bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) |
339 | { | 339 | { |
340 | long count; | ||
341 | |||
340 | rcu_read_lock(); | 342 | rcu_read_lock(); |
341 | while (owner_running(sem, owner)) { | 343 | while (owner_running(sem, owner)) { |
342 | if (need_resched()) | 344 | /* abort spinning when need_resched */ |
343 | break; | 345 | if (need_resched()) { |
346 | rcu_read_unlock(); | ||
347 | return false; | ||
348 | } | ||
344 | 349 | ||
345 | cpu_relax_lowlatency(); | 350 | cpu_relax_lowlatency(); |
346 | } | 351 | } |
347 | rcu_read_unlock(); | 352 | rcu_read_unlock(); |
348 | 353 | ||
354 | if (READ_ONCE(sem->owner)) | ||
355 | return true; /* new owner, continue spinning */ | ||
356 | |||
349 | /* | 357 | /* |
350 | * We break out the loop above on need_resched() or when the | 358 | * When the owner is not set, the lock could be free or |
351 | * owner changed, which is a sign for heavy contention. Return | 359 | * held by readers. Check the counter to verify the |
352 | * success only when sem->owner is NULL. | 360 | * state. |
353 | */ | 361 | */ |
354 | return sem->owner == NULL; | 362 | count = READ_ONCE(sem->count); |
363 | return (count == 0 || count == RWSEM_WAITING_BIAS); | ||
355 | } | 364 | } |
356 | 365 | ||
357 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | 366 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) |