diff options
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 27 |
1 files changed, 17 insertions, 10 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 1c0d11e8ce34..e4ad019e23f5 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -298,23 +298,30 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | |||
298 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | 298 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) |
299 | { | 299 | { |
300 | struct task_struct *owner; | 300 | struct task_struct *owner; |
301 | bool on_cpu = false; | 301 | bool ret = true; |
302 | 302 | ||
303 | if (need_resched()) | 303 | if (need_resched()) |
304 | return false; | 304 | return false; |
305 | 305 | ||
306 | rcu_read_lock(); | 306 | rcu_read_lock(); |
307 | owner = ACCESS_ONCE(sem->owner); | 307 | owner = ACCESS_ONCE(sem->owner); |
308 | if (owner) | 308 | if (!owner) { |
309 | on_cpu = owner->on_cpu; | 309 | long count = ACCESS_ONCE(sem->count); |
310 | rcu_read_unlock(); | 310 | /* |
311 | * If sem->owner is not set, yet we have just recently entered the | ||
312 | * slowpath with the lock being active, then there is a possibility | ||
313 | * reader(s) may have the lock. To be safe, bail spinning in these | ||
314 | * situations. | ||
315 | */ | ||
316 | if (count & RWSEM_ACTIVE_MASK) | ||
317 | ret = false; | ||
318 | goto done; | ||
319 | } | ||
311 | 320 | ||
312 | /* | 321 | ret = owner->on_cpu; |
313 | * If sem->owner is not set, yet we have just recently entered the | 322 | done: |
314 | * slowpath, then there is a possibility reader(s) may have the lock. | 323 | rcu_read_unlock(); |
315 | * To be safe, avoid spinning in these situations. | 324 | return ret; |
316 | */ | ||
317 | return on_cpu; | ||
318 | } | 325 | } |
319 | 326 | ||
320 | static inline bool owner_running(struct rw_semaphore *sem, | 327 | static inline bool owner_running(struct rw_semaphore *sem, |