aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/qrwlock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:54:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:54:22 -0400
commit1bf7067c6e173dc10411704db48338ed69c05565 (patch)
tree06d731d9647c525fa598d03d7ec957ff9772ff40 /kernel/locking/qrwlock.c
parentfc934d40178ad4e551a17e2733241d9f29fddd70 (diff)
parent68722101ec3a0e179408a13708dd020e04f54aab (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes are: - 'qspinlock' support, enabled on x86: queued spinlocks - these are now the spinlock variant used by x86 as they outperform ticket spinlocks in every category. (Waiman Long) - 'pvqspinlock' support on x86: paravirtualized variant of queued spinlocks. (Waiman Long, Peter Zijlstra) - 'qrwlock' support, enabled on x86: queued rwlocks. Similar to queued spinlocks, they are now the variant used by x86: CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y CONFIG_QUEUED_SPINLOCKS=y CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y - various lockdep fixlets - various locking primitives cleanups, further WRITE_ONCE() propagation" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking/lockdep: Remove hard coded array size dependency locking/qrwlock: Don't contend with readers when setting _QW_WAITING lockdep: Do not break user-visible string locking/arch: Rename set_mb() to smp_store_mb() locking/arch: Add WRITE_ONCE() to set_mb() rtmutex: Warn if trylock is called from hard/softirq context arch: Remove __ARCH_HAVE_CMPXCHG locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS locking/pvqspinlock: Replace xchg() by the more descriptive set_mb() locking/pvqspinlock, x86: Enable PV qspinlock for Xen locking/pvqspinlock, x86: Enable PV qspinlock for KVM locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching locking/pvqspinlock: Implement simple paravirt support for the qspinlock locking/qspinlock: Revert to test-and-set on hypervisors locking/qspinlock: Use a simple write to grab the lock locking/qspinlock: Optimize for smaller NR_CPUS locking/qspinlock: Extract out code snippets for the next patch locking/qspinlock: Add pending bit ...
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r--kernel/locking/qrwlock.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index f956ede7f90d..6c5da483966b 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Queue read/write lock 2 * Queued read/write locks
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -22,6 +22,26 @@
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <asm/qrwlock.h> 23#include <asm/qrwlock.h>
24 24
25/*
26 * This internal data structure is used for optimizing access to some of
27 * the subfields within the atomic_t cnts.
28 */
29struct __qrwlock {
30 union {
31 atomic_t cnts;
32 struct {
33#ifdef __LITTLE_ENDIAN
34 u8 wmode; /* Writer mode */
35 u8 rcnts[3]; /* Reader counts */
36#else
37 u8 rcnts[3]; /* Reader counts */
38 u8 wmode; /* Writer mode */
39#endif
40 };
41 };
42 arch_spinlock_t lock;
43};
44
25/** 45/**
26 * rspin_until_writer_unlock - inc reader count & spin until writer is gone 46 * rspin_until_writer_unlock - inc reader count & spin until writer is gone
27 * @lock : Pointer to queue rwlock structure 47 * @lock : Pointer to queue rwlock structure
@@ -107,10 +127,10 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
107 * or wait for a previous writer to go away. 127 * or wait for a previous writer to go away.
108 */ 128 */
109 for (;;) { 129 for (;;) {
110 cnts = atomic_read(&lock->cnts); 130 struct __qrwlock *l = (struct __qrwlock *)lock;
111 if (!(cnts & _QW_WMASK) && 131
112 (atomic_cmpxchg(&lock->cnts, cnts, 132 if (!READ_ONCE(l->wmode) &&
113 cnts | _QW_WAITING) == cnts)) 133 (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0))
114 break; 134 break;
115 135
116 cpu_relax_lowlatency(); 136 cpu_relax_lowlatency();