diff options
| author | Oleg Nesterov <oleg@redhat.com> | 2018-05-18 12:55:35 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-05-25 02:11:47 -0400 |
| commit | 1b22fc609cecd1b16c4a015e1a6b3c9717484e3a (patch) | |
| tree | 261b3bf3f6b89a19ab367b58105acbe1cd1e190d | |
| parent | 675c00c3325723751864cc5867d736c58633cf3d (diff) | |
locking/rwsem: Simplify the is-owner-spinnable checks
Add the trivial owner_on_cpu() helper for rwsem_can_spin_on_owner() and
rwsem_spin_on_owner(), it also allows to make rwsem_can_spin_on_owner()
a bit more clear.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Cc: Amir Goldstein <amir73il@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Jan Kara <jack@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Theodore Y. Ts'o <tytso@mit.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180518165534.GA22348@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | kernel/locking/rwsem-xadd.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index a90336779375..3064c50e181e 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -347,6 +347,15 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | |||
| 347 | } | 347 | } |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | static inline bool owner_on_cpu(struct task_struct *owner) | ||
| 351 | { | ||
| 352 | /* | ||
| 353 | * As lock holder preemption issue, we both skip spinning if | ||
| 354 | * task is not on cpu or its cpu is preempted | ||
| 355 | */ | ||
| 356 | return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); | ||
| 357 | } | ||
| 358 | |||
| 350 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | 359 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) |
| 351 | { | 360 | { |
| 352 | struct task_struct *owner; | 361 | struct task_struct *owner; |
| @@ -359,17 +368,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |||
| 359 | 368 | ||
| 360 | rcu_read_lock(); | 369 | rcu_read_lock(); |
| 361 | owner = READ_ONCE(sem->owner); | 370 | owner = READ_ONCE(sem->owner); |
| 362 | if (!owner || !is_rwsem_owner_spinnable(owner)) { | 371 | if (owner) { |
| 363 | ret = !owner; /* !owner is spinnable */ | 372 | ret = is_rwsem_owner_spinnable(owner) && |
| 364 | goto done; | 373 | owner_on_cpu(owner); |
| 365 | } | 374 | } |
| 366 | |||
| 367 | /* | ||
| 368 | * As lock holder preemption issue, we both skip spinning if task is not | ||
| 369 | * on cpu or its cpu is preempted | ||
| 370 | */ | ||
| 371 | ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); | ||
| 372 | done: | ||
| 373 | rcu_read_unlock(); | 375 | rcu_read_unlock(); |
| 374 | return ret; | 376 | return ret; |
| 375 | } | 377 | } |
| @@ -398,8 +400,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) | |||
| 398 | * abort spinning when need_resched or owner is not running or | 400 | * abort spinning when need_resched or owner is not running or |
| 399 | * owner's cpu is preempted. | 401 | * owner's cpu is preempted. |
| 400 | */ | 402 | */ |
| 401 | if (!owner->on_cpu || need_resched() || | 403 | if (need_resched() || !owner_on_cpu(owner)) { |
| 402 | vcpu_is_preempted(task_cpu(owner))) { | ||
| 403 | rcu_read_unlock(); | 404 | rcu_read_unlock(); |
| 404 | return false; | 405 | return false; |
| 405 | } | 406 | } |
