diff options
| author | Waiman Long <Waiman.Long@hpe.com> | 2016-05-17 21:26:23 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2016-06-08 09:17:00 -0400 |
| commit | ddd0fa73c2b71c35de4fe7ae60a5f1a6cddc2cf0 (patch) | |
| tree | 6ff6f3e9c5f3b7866ceb0d1608e49ce6f4c90043 /kernel/locking | |
| parent | bf7b4c472db44413251bcef79ca1f6bf1ec81475 (diff) | |
locking/rwsem: Streamline the rwsem_optimistic_spin() code
This patch moves the owner loading and checking code entirely inside of
rwsem_spin_on_owner() to simplify the logic of rwsem_optimistic_spin()
loop.
Suggested-by: Peter Hurley <peter@hurleysoftware.com>
Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Douglas Hatch <doug.hatch@hpe.com>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1463534783-38814-6-git-send-email-Waiman.Long@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/rwsem-xadd.c | 38 |
1 files changed, 20 insertions, 18 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 4f1daf5a472d..2031281bb940 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -350,9 +350,16 @@ done: | |||
| 350 | return ret; | 350 | return ret; |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | static noinline | 353 | /* |
| 354 | bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) | 354 | * Return true only if we can still spin on the owner field of the rwsem. |
| 355 | */ | ||
| 356 | static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) | ||
| 355 | { | 357 | { |
| 358 | struct task_struct *owner = READ_ONCE(sem->owner); | ||
| 359 | |||
| 360 | if (!rwsem_owner_is_writer(owner)) | ||
| 361 | goto out; | ||
| 362 | |||
| 356 | rcu_read_lock(); | 363 | rcu_read_lock(); |
| 357 | while (sem->owner == owner) { | 364 | while (sem->owner == owner) { |
| 358 | /* | 365 | /* |
| @@ -372,7 +379,7 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) | |||
| 372 | cpu_relax_lowlatency(); | 379 | cpu_relax_lowlatency(); |
| 373 | } | 380 | } |
| 374 | rcu_read_unlock(); | 381 | rcu_read_unlock(); |
| 375 | 382 | out: | |
| 376 | /* | 383 | /* |
| 377 | * If there is a new owner or the owner is not set, we continue | 384 | * If there is a new owner or the owner is not set, we continue |
| 378 | * spinning. | 385 | * spinning. |
| @@ -382,7 +389,6 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) | |||
| 382 | 389 | ||
| 383 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | 390 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) |
| 384 | { | 391 | { |
| 385 | struct task_struct *owner; | ||
| 386 | bool taken = false; | 392 | bool taken = false; |
| 387 | 393 | ||
| 388 | preempt_disable(); | 394 | preempt_disable(); |
| @@ -394,21 +400,17 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |||
| 394 | if (!osq_lock(&sem->osq)) | 400 | if (!osq_lock(&sem->osq)) |
| 395 | goto done; | 401 | goto done; |
| 396 | 402 | ||
| 397 | while (true) { | 403 | /* |
| 398 | owner = READ_ONCE(sem->owner); | 404 | * Optimistically spin on the owner field and attempt to acquire the |
| 405 | * lock whenever the owner changes. Spinning will be stopped when: | ||
| 406 | * 1) the owning writer isn't running; or | ||
| 407 | * 2) readers own the lock as we can't determine if they are | ||
| 408 | * actively running or not. | ||
| 409 | */ | ||
| 410 | while (rwsem_spin_on_owner(sem)) { | ||
| 399 | /* | 411 | /* |
| 400 | * Don't spin if | 412 | * Try to acquire the lock |
| 401 | * 1) the owner is a reader as we we can't determine if the | ||
| 402 | * reader is actively running or not. | ||
| 403 | * 2) The rwsem_spin_on_owner() returns false which means | ||
| 404 | * the owner isn't running. | ||
| 405 | */ | 413 | */ |
| 406 | if (rwsem_owner_is_reader(owner) || | ||
| 407 | (rwsem_owner_is_writer(owner) && | ||
| 408 | !rwsem_spin_on_owner(sem, owner))) | ||
| 409 | break; | ||
| 410 | |||
| 411 | /* wait_lock will be acquired if write_lock is obtained */ | ||
| 412 | if (rwsem_try_write_lock_unqueued(sem)) { | 414 | if (rwsem_try_write_lock_unqueued(sem)) { |
| 413 | taken = true; | 415 | taken = true; |
| 414 | break; | 416 | break; |
| @@ -420,7 +422,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |||
| 420 | * we're an RT task that will live-lock because we won't let | 422 | * we're an RT task that will live-lock because we won't let |
| 421 | * the owner complete. | 423 | * the owner complete. |
| 422 | */ | 424 | */ |
| 423 | if (!owner && (need_resched() || rt_task(current))) | 425 | if (!sem->owner && (need_resched() || rt_task(current))) |
| 424 | break; | 426 | break; |
| 425 | 427 | ||
| 426 | /* | 428 | /* |
