aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rwsem-xadd.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:54:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:54:22 -0400
commit1bf7067c6e173dc10411704db48338ed69c05565 (patch)
tree06d731d9647c525fa598d03d7ec957ff9772ff40 /kernel/locking/rwsem-xadd.c
parentfc934d40178ad4e551a17e2733241d9f29fddd70 (diff)
parent68722101ec3a0e179408a13708dd020e04f54aab (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes are: - 'qspinlock' support, enabled on x86: queued spinlocks - these are now the spinlock variant used by x86 as they outperform ticket spinlocks in every category. (Waiman Long) - 'pvqspinlock' support on x86: paravirtualized variant of queued spinlocks. (Waiman Long, Peter Zijlstra) - 'qrwlock' support, enabled on x86: queued rwlocks. Similar to queued spinlocks, they are now the variant used by x86: CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y CONFIG_QUEUED_SPINLOCKS=y CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y - various lockdep fixlets - various locking primitives cleanups, further WRITE_ONCE() propagation" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking/lockdep: Remove hard coded array size dependency locking/qrwlock: Don't contend with readers when setting _QW_WAITING lockdep: Do not break user-visible string locking/arch: Rename set_mb() to smp_store_mb() locking/arch: Add WRITE_ONCE() to set_mb() rtmutex: Warn if trylock is called from hard/softirq context arch: Remove __ARCH_HAVE_CMPXCHG locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS locking/pvqspinlock: Replace xchg() by the more descriptive set_mb() locking/pvqspinlock, x86: Enable PV qspinlock for Xen locking/pvqspinlock, x86: Enable PV qspinlock for KVM locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching locking/pvqspinlock: Implement simple paravirt support for the qspinlock locking/qspinlock: Revert to test-and-set on hypervisors locking/qspinlock: Use a simple write to grab the lock locking/qspinlock: Optimize for smaller NR_CPUS locking/qspinlock: Extract out code snippets for the next patch locking/qspinlock: Add pending bit ...
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r--kernel/locking/rwsem-xadd.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3417d0172a5d..0f189714e457 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -409,11 +409,24 @@ done:
409 return taken; 409 return taken;
410} 410}
411 411
412/*
413 * Return true if the rwsem has active spinner
414 */
415static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
416{
417 return osq_is_locked(&sem->osq);
418}
419
412#else 420#else
413static bool rwsem_optimistic_spin(struct rw_semaphore *sem) 421static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
414{ 422{
415 return false; 423 return false;
416} 424}
425
426static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
427{
428 return false;
429}
417#endif 430#endif
418 431
419/* 432/*
@@ -496,7 +509,38 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
496{ 509{
497 unsigned long flags; 510 unsigned long flags;
498 511
512 /*
513 * If a spinner is present, it is not necessary to do the wakeup.
514 * Try to do wakeup only if the trylock succeeds to minimize
515 * spinlock contention which may introduce too much delay in the
516 * unlock operation.
517 *
518 * spinning writer up_write/up_read caller
519 * --------------- -----------------------
520 * [S] osq_unlock() [L] osq
521 * MB RMB
522 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
523 *
524 * Here, it is important to make sure that there won't be a missed
525 * wakeup while the rwsem is free and the only spinning writer goes
526 * to sleep without taking the rwsem. Even when the spinning writer
527 * is just going to break out of the waiting loop, it will still do
528 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
529 * rwsem_has_spinner() is true, it will guarantee at least one
530 * trylock attempt on the rwsem later on.
531 */
532 if (rwsem_has_spinner(sem)) {
533 /*
534 * The smp_rmb() here is to make sure that the spinner
535 * state is consulted before reading the wait_lock.
536 */
537 smp_rmb();
538 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
539 return sem;
540 goto locked;
541 }
499 raw_spin_lock_irqsave(&sem->wait_lock, flags); 542 raw_spin_lock_irqsave(&sem->wait_lock, flags);
543locked:
500 544
501 /* do nothing if list empty */ 545 /* do nothing if list empty */
502 if (!list_empty(&sem->wait_list)) 546 if (!list_empty(&sem->wait_list))