diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-22 17:54:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-22 17:54:22 -0400 |
commit | 1bf7067c6e173dc10411704db48338ed69c05565 (patch) | |
tree | 06d731d9647c525fa598d03d7ec957ff9772ff40 /kernel | |
parent | fc934d40178ad4e551a17e2733241d9f29fddd70 (diff) | |
parent | 68722101ec3a0e179408a13708dd020e04f54aab (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes are:
- 'qspinlock' support, enabled on x86: queued spinlocks - these are
now the spinlock variant used by x86 as they outperform ticket
spinlocks in every category. (Waiman Long)
- 'pvqspinlock' support on x86: paravirtualized variant of queued
spinlocks. (Waiman Long, Peter Zijlstra)
- 'qrwlock' support, enabled on x86: queued rwlocks. Similar to
queued spinlocks, they are now the variant used by x86:
CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
CONFIG_QUEUED_SPINLOCKS=y
CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
CONFIG_QUEUED_RWLOCKS=y
- various lockdep fixlets
- various locking primitives cleanups, further WRITE_ONCE()
propagation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
locking/lockdep: Remove hard coded array size dependency
locking/qrwlock: Don't contend with readers when setting _QW_WAITING
lockdep: Do not break user-visible string
locking/arch: Rename set_mb() to smp_store_mb()
locking/arch: Add WRITE_ONCE() to set_mb()
rtmutex: Warn if trylock is called from hard/softirq context
arch: Remove __ARCH_HAVE_CMPXCHG
locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG
locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS
locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS
locking/pvqspinlock: Replace xchg() by the more descriptive set_mb()
locking/pvqspinlock, x86: Enable PV qspinlock for Xen
locking/pvqspinlock, x86: Enable PV qspinlock for KVM
locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching
locking/pvqspinlock: Implement simple paravirt support for the qspinlock
locking/qspinlock: Revert to test-and-set on hypervisors
locking/qspinlock: Use a simple write to grab the lock
locking/qspinlock: Optimize for smaller NR_CPUS
locking/qspinlock: Extract out code snippets for the next patch
locking/qspinlock: Add pending bit
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Kconfig.locks | 13 | ||||
-rw-r--r-- | kernel/futex.c | 2 | ||||
-rw-r--r-- | kernel/locking/Makefile | 3 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 3 | ||||
-rw-r--r-- | kernel/locking/mcs_spinlock.h | 1 | ||||
-rw-r--r-- | kernel/locking/qrwlock.c | 30 | ||||
-rw-r--r-- | kernel/locking/qspinlock.c | 473 | ||||
-rw-r--r-- | kernel/locking/qspinlock_paravirt.h | 325 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 13 | ||||
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 44 | ||||
-rw-r--r-- | kernel/sched/wait.c | 4 |
11 files changed, 894 insertions, 17 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 08561f1acd13..ebdb0043203a 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks | |||
@@ -235,9 +235,16 @@ config LOCK_SPIN_ON_OWNER | |||
235 | def_bool y | 235 | def_bool y |
236 | depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER | 236 | depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER |
237 | 237 | ||
238 | config ARCH_USE_QUEUE_RWLOCK | 238 | config ARCH_USE_QUEUED_SPINLOCKS |
239 | bool | 239 | bool |
240 | 240 | ||
241 | config QUEUE_RWLOCK | 241 | config QUEUED_SPINLOCKS |
242 | def_bool y if ARCH_USE_QUEUE_RWLOCK | 242 | def_bool y if ARCH_USE_QUEUED_SPINLOCKS |
243 | depends on SMP | ||
244 | |||
245 | config ARCH_USE_QUEUED_RWLOCKS | ||
246 | bool | ||
247 | |||
248 | config QUEUED_RWLOCKS | ||
249 | def_bool y if ARCH_USE_QUEUED_RWLOCKS | ||
243 | depends on SMP | 250 | depends on SMP |
diff --git a/kernel/futex.c b/kernel/futex.c index 2579e407ff67..55ca63ad9622 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2055,7 +2055,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
2055 | { | 2055 | { |
2056 | /* | 2056 | /* |
2057 | * The task state is guaranteed to be set before another task can | 2057 | * The task state is guaranteed to be set before another task can |
2058 | * wake it. set_current_state() is implemented using set_mb() and | 2058 | * wake it. set_current_state() is implemented using smp_store_mb() and |
2059 | * queue_me() calls spin_unlock() upon completion, both serializing | 2059 | * queue_me() calls spin_unlock() upon completion, both serializing |
2060 | * access to the hash list and forcing another memory barrier. | 2060 | * access to the hash list and forcing another memory barrier. |
2061 | */ | 2061 | */ |
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index de7a416cca2a..7dd5c9918e4c 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_SMP) += spinlock.o | |||
17 | obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o | 17 | obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o |
18 | obj-$(CONFIG_SMP) += lglock.o | 18 | obj-$(CONFIG_SMP) += lglock.o |
19 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 19 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
20 | obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o | ||
20 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 21 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
21 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 22 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
22 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 23 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
@@ -25,5 +26,5 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | |||
25 | obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 26 | obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
26 | obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o | 27 | obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o |
27 | obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o | 28 | obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o |
28 | obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o | 29 | obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o |
29 | obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o | 30 | obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index aaeae885d9af..456614136f1a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -4067,8 +4067,7 @@ void __init lockdep_info(void) | |||
4067 | 4067 | ||
4068 | #ifdef CONFIG_DEBUG_LOCKDEP | 4068 | #ifdef CONFIG_DEBUG_LOCKDEP |
4069 | if (lockdep_init_error) { | 4069 | if (lockdep_init_error) { |
4070 | printk("WARNING: lockdep init error! lock-%s was acquired" | 4070 | printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error); |
4071 | "before lockdep_init\n", lock_init_error); | ||
4072 | printk("Call stack leading to lockdep invocation was:\n"); | 4071 | printk("Call stack leading to lockdep invocation was:\n"); |
4073 | print_stack_trace(&lockdep_init_trace, 0); | 4072 | print_stack_trace(&lockdep_init_trace, 0); |
4074 | } | 4073 | } |
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 75e114bdf3f2..fd91aaa4554c 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h | |||
@@ -17,6 +17,7 @@ | |||
17 | struct mcs_spinlock { | 17 | struct mcs_spinlock { |
18 | struct mcs_spinlock *next; | 18 | struct mcs_spinlock *next; |
19 | int locked; /* 1 if lock acquired */ | 19 | int locked; /* 1 if lock acquired */ |
20 | int count; /* nesting count, see qspinlock.c */ | ||
20 | }; | 21 | }; |
21 | 22 | ||
22 | #ifndef arch_mcs_spin_lock_contended | 23 | #ifndef arch_mcs_spin_lock_contended |
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index f956ede7f90d..6c5da483966b 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Queue read/write lock | 2 | * Queued read/write locks |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -22,6 +22,26 @@ | |||
22 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
23 | #include <asm/qrwlock.h> | 23 | #include <asm/qrwlock.h> |
24 | 24 | ||
25 | /* | ||
26 | * This internal data structure is used for optimizing access to some of | ||
27 | * the subfields within the atomic_t cnts. | ||
28 | */ | ||
29 | struct __qrwlock { | ||
30 | union { | ||
31 | atomic_t cnts; | ||
32 | struct { | ||
33 | #ifdef __LITTLE_ENDIAN | ||
34 | u8 wmode; /* Writer mode */ | ||
35 | u8 rcnts[3]; /* Reader counts */ | ||
36 | #else | ||
37 | u8 rcnts[3]; /* Reader counts */ | ||
38 | u8 wmode; /* Writer mode */ | ||
39 | #endif | ||
40 | }; | ||
41 | }; | ||
42 | arch_spinlock_t lock; | ||
43 | }; | ||
44 | |||
25 | /** | 45 | /** |
26 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone | 46 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone |
27 | * @lock : Pointer to queue rwlock structure | 47 | * @lock : Pointer to queue rwlock structure |
@@ -107,10 +127,10 @@ void queue_write_lock_slowpath(struct qrwlock *lock) | |||
107 | * or wait for a previous writer to go away. | 127 | * or wait for a previous writer to go away. |
108 | */ | 128 | */ |
109 | for (;;) { | 129 | for (;;) { |
110 | cnts = atomic_read(&lock->cnts); | 130 | struct __qrwlock *l = (struct __qrwlock *)lock; |
111 | if (!(cnts & _QW_WMASK) && | 131 | |
112 | (atomic_cmpxchg(&lock->cnts, cnts, | 132 | if (!READ_ONCE(l->wmode) && |
113 | cnts | _QW_WAITING) == cnts)) | 133 | (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0)) |
114 | break; | 134 | break; |
115 | 135 | ||
116 | cpu_relax_lowlatency(); | 136 | cpu_relax_lowlatency(); |
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c new file mode 100644 index 000000000000..38c49202d532 --- /dev/null +++ b/kernel/locking/qspinlock.c | |||
@@ -0,0 +1,473 @@ | |||
1 | /* | ||
2 | * Queued spinlock | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | ||
15 | * (C) Copyright 2013-2014 Red Hat, Inc. | ||
16 | * (C) Copyright 2015 Intel Corp. | ||
17 | * | ||
18 | * Authors: Waiman Long <waiman.long@hp.com> | ||
19 | * Peter Zijlstra <peterz@infradead.org> | ||
20 | */ | ||
21 | |||
22 | #ifndef _GEN_PV_LOCK_SLOWPATH | ||
23 | |||
24 | #include <linux/smp.h> | ||
25 | #include <linux/bug.h> | ||
26 | #include <linux/cpumask.h> | ||
27 | #include <linux/percpu.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <linux/mutex.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/qspinlock.h> | ||
32 | |||
33 | /* | ||
34 | * The basic principle of a queue-based spinlock can best be understood | ||
35 | * by studying a classic queue-based spinlock implementation called the | ||
36 | * MCS lock. The paper below provides a good description for this kind | ||
37 | * of lock. | ||
38 | * | ||
39 | * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf | ||
40 | * | ||
41 | * This queued spinlock implementation is based on the MCS lock, however to make | ||
42 | * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing | ||
43 | * API, we must modify it somehow. | ||
44 | * | ||
45 | * In particular; where the traditional MCS lock consists of a tail pointer | ||
46 | * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to | ||
47 | * unlock the next pending (next->locked), we compress both these: {tail, | ||
48 | * next->locked} into a single u32 value. | ||
49 | * | ||
50 | * Since a spinlock disables recursion of its own context and there is a limit | ||
51 | * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there | ||
52 | * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now | ||
53 | * we can encode the tail by combining the 2-bit nesting level with the cpu | ||
54 | * number. With one byte for the lock value and 3 bytes for the tail, only a | ||
55 | * 32-bit word is now needed. Even though we only need 1 bit for the lock, | ||
56 | * we extend it to a full byte to achieve better performance for architectures | ||
57 | * that support atomic byte write. | ||
58 | * | ||
59 | * We also change the first spinner to spin on the lock bit instead of its | ||
60 | * node; whereby avoiding the need to carry a node from lock to unlock, and | ||
61 | * preserving existing lock API. This also makes the unlock code simpler and | ||
62 | * faster. | ||
63 | * | ||
64 | * N.B. The current implementation only supports architectures that allow | ||
65 | * atomic operations on smaller 8-bit and 16-bit data types. | ||
66 | * | ||
67 | */ | ||
68 | |||
69 | #include "mcs_spinlock.h" | ||
70 | |||
71 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
72 | #define MAX_NODES 8 | ||
73 | #else | ||
74 | #define MAX_NODES 4 | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | * Per-CPU queue node structures; we can never have more than 4 nested | ||
79 | * contexts: task, softirq, hardirq, nmi. | ||
80 | * | ||
81 | * Exactly fits one 64-byte cacheline on a 64-bit architecture. | ||
82 | * | ||
83 | * PV doubles the storage and uses the second cacheline for PV state. | ||
84 | */ | ||
85 | static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); | ||
86 | |||
87 | /* | ||
88 | * We must be able to distinguish between no-tail and the tail at 0:0, | ||
89 | * therefore increment the cpu number by one. | ||
90 | */ | ||
91 | |||
92 | static inline u32 encode_tail(int cpu, int idx) | ||
93 | { | ||
94 | u32 tail; | ||
95 | |||
96 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
97 | BUG_ON(idx > 3); | ||
98 | #endif | ||
99 | tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; | ||
100 | tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ | ||
101 | |||
102 | return tail; | ||
103 | } | ||
104 | |||
105 | static inline struct mcs_spinlock *decode_tail(u32 tail) | ||
106 | { | ||
107 | int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; | ||
108 | int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; | ||
109 | |||
110 | return per_cpu_ptr(&mcs_nodes[idx], cpu); | ||
111 | } | ||
112 | |||
113 | #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) | ||
114 | |||
115 | /* | ||
116 | * By using the whole 2nd least significant byte for the pending bit, we | ||
117 | * can allow better optimization of the lock acquisition for the pending | ||
118 | * bit holder. | ||
119 | * | ||
120 | * This internal structure is also used by the set_locked function which | ||
121 | * is not restricted to _Q_PENDING_BITS == 8. | ||
122 | */ | ||
123 | struct __qspinlock { | ||
124 | union { | ||
125 | atomic_t val; | ||
126 | #ifdef __LITTLE_ENDIAN | ||
127 | struct { | ||
128 | u8 locked; | ||
129 | u8 pending; | ||
130 | }; | ||
131 | struct { | ||
132 | u16 locked_pending; | ||
133 | u16 tail; | ||
134 | }; | ||
135 | #else | ||
136 | struct { | ||
137 | u16 tail; | ||
138 | u16 locked_pending; | ||
139 | }; | ||
140 | struct { | ||
141 | u8 reserved[2]; | ||
142 | u8 pending; | ||
143 | u8 locked; | ||
144 | }; | ||
145 | #endif | ||
146 | }; | ||
147 | }; | ||
148 | |||
149 | #if _Q_PENDING_BITS == 8 | ||
150 | /** | ||
151 | * clear_pending_set_locked - take ownership and clear the pending bit. | ||
152 | * @lock: Pointer to queued spinlock structure | ||
153 | * | ||
154 | * *,1,0 -> *,0,1 | ||
155 | * | ||
156 | * Lock stealing is not allowed if this function is used. | ||
157 | */ | ||
158 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | ||
159 | { | ||
160 | struct __qspinlock *l = (void *)lock; | ||
161 | |||
162 | WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | ||
167 | * @lock : Pointer to queued spinlock structure | ||
168 | * @tail : The new queue tail code word | ||
169 | * Return: The previous queue tail code word | ||
170 | * | ||
171 | * xchg(lock, tail) | ||
172 | * | ||
173 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | ||
174 | */ | ||
175 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | ||
176 | { | ||
177 | struct __qspinlock *l = (void *)lock; | ||
178 | |||
179 | return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; | ||
180 | } | ||
181 | |||
182 | #else /* _Q_PENDING_BITS == 8 */ | ||
183 | |||
184 | /** | ||
185 | * clear_pending_set_locked - take ownership and clear the pending bit. | ||
186 | * @lock: Pointer to queued spinlock structure | ||
187 | * | ||
188 | * *,1,0 -> *,0,1 | ||
189 | */ | ||
190 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | ||
191 | { | ||
192 | atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | ||
197 | * @lock : Pointer to queued spinlock structure | ||
198 | * @tail : The new queue tail code word | ||
199 | * Return: The previous queue tail code word | ||
200 | * | ||
201 | * xchg(lock, tail) | ||
202 | * | ||
203 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | ||
204 | */ | ||
205 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | ||
206 | { | ||
207 | u32 old, new, val = atomic_read(&lock->val); | ||
208 | |||
209 | for (;;) { | ||
210 | new = (val & _Q_LOCKED_PENDING_MASK) | tail; | ||
211 | old = atomic_cmpxchg(&lock->val, val, new); | ||
212 | if (old == val) | ||
213 | break; | ||
214 | |||
215 | val = old; | ||
216 | } | ||
217 | return old; | ||
218 | } | ||
219 | #endif /* _Q_PENDING_BITS == 8 */ | ||
220 | |||
221 | /** | ||
222 | * set_locked - Set the lock bit and own the lock | ||
223 | * @lock: Pointer to queued spinlock structure | ||
224 | * | ||
225 | * *,*,0 -> *,0,1 | ||
226 | */ | ||
227 | static __always_inline void set_locked(struct qspinlock *lock) | ||
228 | { | ||
229 | struct __qspinlock *l = (void *)lock; | ||
230 | |||
231 | WRITE_ONCE(l->locked, _Q_LOCKED_VAL); | ||
232 | } | ||
233 | |||
234 | |||
235 | /* | ||
236 | * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for | ||
237 | * all the PV callbacks. | ||
238 | */ | ||
239 | |||
240 | static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } | ||
241 | static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { } | ||
242 | static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { } | ||
243 | |||
244 | static __always_inline void __pv_wait_head(struct qspinlock *lock, | ||
245 | struct mcs_spinlock *node) { } | ||
246 | |||
247 | #define pv_enabled() false | ||
248 | |||
249 | #define pv_init_node __pv_init_node | ||
250 | #define pv_wait_node __pv_wait_node | ||
251 | #define pv_kick_node __pv_kick_node | ||
252 | #define pv_wait_head __pv_wait_head | ||
253 | |||
254 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
255 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | ||
256 | #endif | ||
257 | |||
258 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | ||
259 | |||
260 | /** | ||
261 | * queued_spin_lock_slowpath - acquire the queued spinlock | ||
262 | * @lock: Pointer to queued spinlock structure | ||
263 | * @val: Current value of the queued spinlock 32-bit word | ||
264 | * | ||
265 | * (queue tail, pending bit, lock value) | ||
266 | * | ||
267 | * fast : slow : unlock | ||
268 | * : : | ||
269 | * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) | ||
270 | * : | ^--------.------. / : | ||
271 | * : v \ \ | : | ||
272 | * pending : (0,1,1) +--> (0,1,0) \ | : | ||
273 | * : | ^--' | | : | ||
274 | * : v | | : | ||
275 | * uncontended : (n,x,y) +--> (n,0,0) --' | : | ||
276 | * queue : | ^--' | : | ||
277 | * : v | : | ||
278 | * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : | ||
279 | * queue : ^--' : | ||
280 | */ | ||
281 | void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | ||
282 | { | ||
283 | struct mcs_spinlock *prev, *next, *node; | ||
284 | u32 new, old, tail; | ||
285 | int idx; | ||
286 | |||
287 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); | ||
288 | |||
289 | if (pv_enabled()) | ||
290 | goto queue; | ||
291 | |||
292 | if (virt_queued_spin_lock(lock)) | ||
293 | return; | ||
294 | |||
295 | /* | ||
296 | * wait for in-progress pending->locked hand-overs | ||
297 | * | ||
298 | * 0,1,0 -> 0,0,1 | ||
299 | */ | ||
300 | if (val == _Q_PENDING_VAL) { | ||
301 | while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) | ||
302 | cpu_relax(); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * trylock || pending | ||
307 | * | ||
308 | * 0,0,0 -> 0,0,1 ; trylock | ||
309 | * 0,0,1 -> 0,1,1 ; pending | ||
310 | */ | ||
311 | for (;;) { | ||
312 | /* | ||
313 | * If we observe any contention; queue. | ||
314 | */ | ||
315 | if (val & ~_Q_LOCKED_MASK) | ||
316 | goto queue; | ||
317 | |||
318 | new = _Q_LOCKED_VAL; | ||
319 | if (val == new) | ||
320 | new |= _Q_PENDING_VAL; | ||
321 | |||
322 | old = atomic_cmpxchg(&lock->val, val, new); | ||
323 | if (old == val) | ||
324 | break; | ||
325 | |||
326 | val = old; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * we won the trylock | ||
331 | */ | ||
332 | if (new == _Q_LOCKED_VAL) | ||
333 | return; | ||
334 | |||
335 | /* | ||
336 | * we're pending, wait for the owner to go away. | ||
337 | * | ||
338 | * *,1,1 -> *,1,0 | ||
339 | * | ||
340 | * this wait loop must be a load-acquire such that we match the | ||
341 | * store-release that clears the locked bit and create lock | ||
342 | * sequentiality; this is because not all clear_pending_set_locked() | ||
343 | * implementations imply full barriers. | ||
344 | */ | ||
345 | while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) | ||
346 | cpu_relax(); | ||
347 | |||
348 | /* | ||
349 | * take ownership and clear the pending bit. | ||
350 | * | ||
351 | * *,1,0 -> *,0,1 | ||
352 | */ | ||
353 | clear_pending_set_locked(lock); | ||
354 | return; | ||
355 | |||
356 | /* | ||
357 | * End of pending bit optimistic spinning and beginning of MCS | ||
358 | * queuing. | ||
359 | */ | ||
360 | queue: | ||
361 | node = this_cpu_ptr(&mcs_nodes[0]); | ||
362 | idx = node->count++; | ||
363 | tail = encode_tail(smp_processor_id(), idx); | ||
364 | |||
365 | node += idx; | ||
366 | node->locked = 0; | ||
367 | node->next = NULL; | ||
368 | pv_init_node(node); | ||
369 | |||
370 | /* | ||
371 | * We touched a (possibly) cold cacheline in the per-cpu queue node; | ||
372 | * attempt the trylock once more in the hope someone let go while we | ||
373 | * weren't watching. | ||
374 | */ | ||
375 | if (queued_spin_trylock(lock)) | ||
376 | goto release; | ||
377 | |||
378 | /* | ||
379 | * We have already touched the queueing cacheline; don't bother with | ||
380 | * pending stuff. | ||
381 | * | ||
382 | * p,*,* -> n,*,* | ||
383 | */ | ||
384 | old = xchg_tail(lock, tail); | ||
385 | |||
386 | /* | ||
387 | * if there was a previous node; link it and wait until reaching the | ||
388 | * head of the waitqueue. | ||
389 | */ | ||
390 | if (old & _Q_TAIL_MASK) { | ||
391 | prev = decode_tail(old); | ||
392 | WRITE_ONCE(prev->next, node); | ||
393 | |||
394 | pv_wait_node(node); | ||
395 | arch_mcs_spin_lock_contended(&node->locked); | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * we're at the head of the waitqueue, wait for the owner & pending to | ||
400 | * go away. | ||
401 | * | ||
402 | * *,x,y -> *,0,0 | ||
403 | * | ||
404 | * this wait loop must use a load-acquire such that we match the | ||
405 | * store-release that clears the locked bit and create lock | ||
406 | * sequentiality; this is because the set_locked() function below | ||
407 | * does not imply a full barrier. | ||
408 | * | ||
409 | */ | ||
410 | pv_wait_head(lock, node); | ||
411 | while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK) | ||
412 | cpu_relax(); | ||
413 | |||
414 | /* | ||
415 | * claim the lock: | ||
416 | * | ||
417 | * n,0,0 -> 0,0,1 : lock, uncontended | ||
418 | * *,0,0 -> *,0,1 : lock, contended | ||
419 | * | ||
420 | * If the queue head is the only one in the queue (lock value == tail), | ||
421 | * clear the tail code and grab the lock. Otherwise, we only need | ||
422 | * to grab the lock. | ||
423 | */ | ||
424 | for (;;) { | ||
425 | if (val != tail) { | ||
426 | set_locked(lock); | ||
427 | break; | ||
428 | } | ||
429 | old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL); | ||
430 | if (old == val) | ||
431 | goto release; /* No contention */ | ||
432 | |||
433 | val = old; | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * contended path; wait for next, release. | ||
438 | */ | ||
439 | while (!(next = READ_ONCE(node->next))) | ||
440 | cpu_relax(); | ||
441 | |||
442 | arch_mcs_spin_unlock_contended(&next->locked); | ||
443 | pv_kick_node(next); | ||
444 | |||
445 | release: | ||
446 | /* | ||
447 | * release the node | ||
448 | */ | ||
449 | this_cpu_dec(mcs_nodes[0].count); | ||
450 | } | ||
451 | EXPORT_SYMBOL(queued_spin_lock_slowpath); | ||
452 | |||
453 | /* | ||
454 | * Generate the paravirt code for queued_spin_unlock_slowpath(). | ||
455 | */ | ||
456 | #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) | ||
457 | #define _GEN_PV_LOCK_SLOWPATH | ||
458 | |||
459 | #undef pv_enabled | ||
460 | #define pv_enabled() true | ||
461 | |||
462 | #undef pv_init_node | ||
463 | #undef pv_wait_node | ||
464 | #undef pv_kick_node | ||
465 | #undef pv_wait_head | ||
466 | |||
467 | #undef queued_spin_lock_slowpath | ||
468 | #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath | ||
469 | |||
470 | #include "qspinlock_paravirt.h" | ||
471 | #include "qspinlock.c" | ||
472 | |||
473 | #endif | ||
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h new file mode 100644 index 000000000000..04ab18151cc8 --- /dev/null +++ b/kernel/locking/qspinlock_paravirt.h | |||
@@ -0,0 +1,325 @@ | |||
1 | #ifndef _GEN_PV_LOCK_SLOWPATH | ||
2 | #error "do not include this file" | ||
3 | #endif | ||
4 | |||
5 | #include <linux/hash.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | |||
8 | /* | ||
9 | * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead | ||
10 | * of spinning them. | ||
11 | * | ||
12 | * This relies on the architecture to provide two paravirt hypercalls: | ||
13 | * | ||
14 | * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val | ||
15 | * pv_kick(cpu) -- wakes a suspended vcpu | ||
16 | * | ||
17 | * Using these we implement __pv_queued_spin_lock_slowpath() and | ||
18 | * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and | ||
19 | * native_queued_spin_unlock(). | ||
20 | */ | ||
21 | |||
22 | #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) | ||
23 | |||
24 | enum vcpu_state { | ||
25 | vcpu_running = 0, | ||
26 | vcpu_halted, | ||
27 | }; | ||
28 | |||
29 | struct pv_node { | ||
30 | struct mcs_spinlock mcs; | ||
31 | struct mcs_spinlock __res[3]; | ||
32 | |||
33 | int cpu; | ||
34 | u8 state; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Lock and MCS node addresses hash table for fast lookup | ||
39 | * | ||
40 | * Hashing is done on a per-cacheline basis to minimize the need to access | ||
41 | * more than one cacheline. | ||
42 | * | ||
43 | * Dynamically allocate a hash table big enough to hold at least 4X the | ||
44 | * number of possible cpus in the system. Allocation is done on page | ||
45 | * granularity. So the minimum number of hash buckets should be at least | ||
46 | * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page. | ||
47 | * | ||
48 | * Since we should not be holding locks from NMI context (very rare indeed) the | ||
49 | * max load factor is 0.75, which is around the point where open addressing | ||
50 | * breaks down. | ||
51 | * | ||
52 | */ | ||
53 | struct pv_hash_entry { | ||
54 | struct qspinlock *lock; | ||
55 | struct pv_node *node; | ||
56 | }; | ||
57 | |||
58 | #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) | ||
59 | #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) | ||
60 | |||
61 | static struct pv_hash_entry *pv_lock_hash; | ||
62 | static unsigned int pv_lock_hash_bits __read_mostly; | ||
63 | |||
64 | /* | ||
65 | * Allocate memory for the PV qspinlock hash buckets | ||
66 | * | ||
67 | * This function should be called from the paravirt spinlock initialization | ||
68 | * routine. | ||
69 | */ | ||
70 | void __init __pv_init_lock_hash(void) | ||
71 | { | ||
72 | int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); | ||
73 | |||
74 | if (pv_hash_size < PV_HE_MIN) | ||
75 | pv_hash_size = PV_HE_MIN; | ||
76 | |||
77 | /* | ||
78 | * Allocate space from bootmem which should be page-size aligned | ||
79 | * and hence cacheline aligned. | ||
80 | */ | ||
81 | pv_lock_hash = alloc_large_system_hash("PV qspinlock", | ||
82 | sizeof(struct pv_hash_entry), | ||
83 | pv_hash_size, 0, HASH_EARLY, | ||
84 | &pv_lock_hash_bits, NULL, | ||
85 | pv_hash_size, pv_hash_size); | ||
86 | } | ||
87 | |||
88 | #define for_each_hash_entry(he, offset, hash) \ | ||
89 | for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ | ||
90 | offset < (1 << pv_lock_hash_bits); \ | ||
91 | offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) | ||
92 | |||
93 | static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) | ||
94 | { | ||
95 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | ||
96 | struct pv_hash_entry *he; | ||
97 | |||
98 | for_each_hash_entry(he, offset, hash) { | ||
99 | if (!cmpxchg(&he->lock, NULL, lock)) { | ||
100 | WRITE_ONCE(he->node, node); | ||
101 | return &he->lock; | ||
102 | } | ||
103 | } | ||
104 | /* | ||
105 | * Hard assume there is a free entry for us. | ||
106 | * | ||
107 | * This is guaranteed by ensuring every blocked lock only ever consumes | ||
108 | * a single entry, and since we only have 4 nesting levels per CPU | ||
109 | * and allocated 4*nr_possible_cpus(), this must be so. | ||
110 | * | ||
111 | * The single entry is guaranteed by having the lock owner unhash | ||
112 | * before it releases. | ||
113 | */ | ||
114 | BUG(); | ||
115 | } | ||
116 | |||
117 | static struct pv_node *pv_unhash(struct qspinlock *lock) | ||
118 | { | ||
119 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | ||
120 | struct pv_hash_entry *he; | ||
121 | struct pv_node *node; | ||
122 | |||
123 | for_each_hash_entry(he, offset, hash) { | ||
124 | if (READ_ONCE(he->lock) == lock) { | ||
125 | node = READ_ONCE(he->node); | ||
126 | WRITE_ONCE(he->lock, NULL); | ||
127 | return node; | ||
128 | } | ||
129 | } | ||
130 | /* | ||
131 | * Hard assume we'll find an entry. | ||
132 | * | ||
133 | * This guarantees a limited lookup time and is itself guaranteed by | ||
134 | * having the lock owner do the unhash -- IFF the unlock sees the | ||
135 | * SLOW flag, there MUST be a hash entry. | ||
136 | */ | ||
137 | BUG(); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Initialize the PV part of the mcs_spinlock node. | ||
142 | */ | ||
143 | static void pv_init_node(struct mcs_spinlock *node) | ||
144 | { | ||
145 | struct pv_node *pn = (struct pv_node *)node; | ||
146 | |||
147 | BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock)); | ||
148 | |||
149 | pn->cpu = smp_processor_id(); | ||
150 | pn->state = vcpu_running; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Wait for node->locked to become true, halt the vcpu after a short spin. | ||
155 | * pv_kick_node() is used to wake the vcpu again. | ||
156 | */ | ||
157 | static void pv_wait_node(struct mcs_spinlock *node) | ||
158 | { | ||
159 | struct pv_node *pn = (struct pv_node *)node; | ||
160 | int loop; | ||
161 | |||
162 | for (;;) { | ||
163 | for (loop = SPIN_THRESHOLD; loop; loop--) { | ||
164 | if (READ_ONCE(node->locked)) | ||
165 | return; | ||
166 | cpu_relax(); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Order pn->state vs pn->locked thusly: | ||
171 | * | ||
172 | * [S] pn->state = vcpu_halted [S] next->locked = 1 | ||
173 | * MB MB | ||
174 | * [L] pn->locked [RmW] pn->state = vcpu_running | ||
175 | * | ||
176 | * Matches the xchg() from pv_kick_node(). | ||
177 | */ | ||
178 | smp_store_mb(pn->state, vcpu_halted); | ||
179 | |||
180 | if (!READ_ONCE(node->locked)) | ||
181 | pv_wait(&pn->state, vcpu_halted); | ||
182 | |||
183 | /* | ||
184 | * Reset the vCPU state to avoid unncessary CPU kicking | ||
185 | */ | ||
186 | WRITE_ONCE(pn->state, vcpu_running); | ||
187 | |||
188 | /* | ||
189 | * If the locked flag is still not set after wakeup, it is a | ||
190 | * spurious wakeup and the vCPU should wait again. However, | ||
191 | * there is a pretty high overhead for CPU halting and kicking. | ||
192 | * So it is better to spin for a while in the hope that the | ||
193 | * MCS lock will be released soon. | ||
194 | */ | ||
195 | } | ||
196 | /* | ||
197 | * By now our node->locked should be 1 and our caller will not actually | ||
198 | * spin-wait for it. We do however rely on our caller to do a | ||
199 | * load-acquire for us. | ||
200 | */ | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Called after setting next->locked = 1, used to wake those stuck in | ||
205 | * pv_wait_node(). | ||
206 | */ | ||
207 | static void pv_kick_node(struct mcs_spinlock *node) | ||
208 | { | ||
209 | struct pv_node *pn = (struct pv_node *)node; | ||
210 | |||
211 | /* | ||
212 | * Note that because node->locked is already set, this actual | ||
213 | * mcs_spinlock entry could be re-used already. | ||
214 | * | ||
215 | * This should be fine however, kicking people for no reason is | ||
216 | * harmless. | ||
217 | * | ||
218 | * See the comment in pv_wait_node(). | ||
219 | */ | ||
220 | if (xchg(&pn->state, vcpu_running) == vcpu_halted) | ||
221 | pv_kick(pn->cpu); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Wait for l->locked to become clear; halt the vcpu after a short spin. | ||
226 | * __pv_queued_spin_unlock() will wake us. | ||
227 | */ | ||
228 | static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node) | ||
229 | { | ||
230 | struct pv_node *pn = (struct pv_node *)node; | ||
231 | struct __qspinlock *l = (void *)lock; | ||
232 | struct qspinlock **lp = NULL; | ||
233 | int loop; | ||
234 | |||
235 | for (;;) { | ||
236 | for (loop = SPIN_THRESHOLD; loop; loop--) { | ||
237 | if (!READ_ONCE(l->locked)) | ||
238 | return; | ||
239 | cpu_relax(); | ||
240 | } | ||
241 | |||
242 | WRITE_ONCE(pn->state, vcpu_halted); | ||
243 | if (!lp) { /* ONCE */ | ||
244 | lp = pv_hash(lock, pn); | ||
245 | /* | ||
246 | * lp must be set before setting _Q_SLOW_VAL | ||
247 | * | ||
248 | * [S] lp = lock [RmW] l = l->locked = 0 | ||
249 | * MB MB | ||
250 | * [S] l->locked = _Q_SLOW_VAL [L] lp | ||
251 | * | ||
252 | * Matches the cmpxchg() in __pv_queued_spin_unlock(). | ||
253 | */ | ||
254 | if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) { | ||
255 | /* | ||
256 | * The lock is free and _Q_SLOW_VAL has never | ||
257 | * been set. Therefore we need to unhash before | ||
258 | * getting the lock. | ||
259 | */ | ||
260 | WRITE_ONCE(*lp, NULL); | ||
261 | return; | ||
262 | } | ||
263 | } | ||
264 | pv_wait(&l->locked, _Q_SLOW_VAL); | ||
265 | |||
266 | /* | ||
267 | * The unlocker should have freed the lock before kicking the | ||
268 | * CPU. So if the lock is still not free, it is a spurious | ||
269 | * wakeup and so the vCPU should wait again after spinning for | ||
270 | * a while. | ||
271 | */ | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Lock is unlocked now; the caller will acquire it without waiting. | ||
276 | * As with pv_wait_node() we rely on the caller to do a load-acquire | ||
277 | * for us. | ||
278 | */ | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * PV version of the unlock function to be used in stead of | ||
283 | * queued_spin_unlock(). | ||
284 | */ | ||
285 | __visible void __pv_queued_spin_unlock(struct qspinlock *lock) | ||
286 | { | ||
287 | struct __qspinlock *l = (void *)lock; | ||
288 | struct pv_node *node; | ||
289 | |||
290 | /* | ||
291 | * We must not unlock if SLOW, because in that case we must first | ||
292 | * unhash. Otherwise it would be possible to have multiple @lock | ||
293 | * entries, which would be BAD. | ||
294 | */ | ||
295 | if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) | ||
296 | return; | ||
297 | |||
298 | /* | ||
299 | * Since the above failed to release, this must be the SLOW path. | ||
300 | * Therefore start by looking up the blocked node and unhashing it. | ||
301 | */ | ||
302 | node = pv_unhash(lock); | ||
303 | |||
304 | /* | ||
305 | * Now that we have a reference to the (likely) blocked pv_node, | ||
306 | * release the lock. | ||
307 | */ | ||
308 | smp_store_release(&l->locked, 0); | ||
309 | |||
310 | /* | ||
311 | * At this point the memory pointed at by lock can be freed/reused, | ||
312 | * however we can still use the pv_node to kick the CPU. | ||
313 | */ | ||
314 | if (READ_ONCE(node->state) == vcpu_halted) | ||
315 | pv_kick(node->cpu); | ||
316 | } | ||
317 | /* | ||
318 | * Include the architecture specific callee-save thunk of the | ||
319 | * __pv_queued_spin_unlock(). This thunk is put together with | ||
320 | * __pv_queued_spin_unlock() near the top of the file to make sure | ||
321 | * that the callee-save thunk and the real unlock function are close | ||
322 | * to each other sharing consecutive instruction cachelines. | ||
323 | */ | ||
324 | #include <asm/qspinlock_paravirt.h> | ||
325 | |||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b025295f4966..30ec5b46cd8c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -70,10 +70,10 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * We can speed up the acquire/release, if the architecture | 73 | * We can speed up the acquire/release, if there's no debugging state to be |
74 | * supports cmpxchg and if there's no debugging state to be set up | 74 | * set up. |
75 | */ | 75 | */ |
76 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | 76 | #ifndef CONFIG_DEBUG_RT_MUTEXES |
77 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | 77 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) |
78 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | 78 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
79 | { | 79 | { |
@@ -1443,10 +1443,17 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |||
1443 | * | 1443 | * |
1444 | * @lock: the rt_mutex to be locked | 1444 | * @lock: the rt_mutex to be locked |
1445 | * | 1445 | * |
1446 | * This function can only be called in thread context. It's safe to | ||
1447 | * call it from atomic regions, but not from hard interrupt or soft | ||
1448 | * interrupt context. | ||
1449 | * | ||
1446 | * Returns 1 on success and 0 on contention | 1450 | * Returns 1 on success and 0 on contention |
1447 | */ | 1451 | */ |
1448 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | 1452 | int __sched rt_mutex_trylock(struct rt_mutex *lock) |
1449 | { | 1453 | { |
1454 | if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq())) | ||
1455 | return 0; | ||
1456 | |||
1450 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | 1457 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); |
1451 | } | 1458 | } |
1452 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | 1459 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 3417d0172a5d..0f189714e457 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -409,11 +409,24 @@ done: | |||
409 | return taken; | 409 | return taken; |
410 | } | 410 | } |
411 | 411 | ||
412 | /* | ||
413 | * Return true if the rwsem has active spinner | ||
414 | */ | ||
415 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | ||
416 | { | ||
417 | return osq_is_locked(&sem->osq); | ||
418 | } | ||
419 | |||
412 | #else | 420 | #else |
413 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | 421 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) |
414 | { | 422 | { |
415 | return false; | 423 | return false; |
416 | } | 424 | } |
425 | |||
426 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | ||
427 | { | ||
428 | return false; | ||
429 | } | ||
417 | #endif | 430 | #endif |
418 | 431 | ||
419 | /* | 432 | /* |
@@ -496,7 +509,38 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
496 | { | 509 | { |
497 | unsigned long flags; | 510 | unsigned long flags; |
498 | 511 | ||
512 | /* | ||
513 | * If a spinner is present, it is not necessary to do the wakeup. | ||
514 | * Try to do wakeup only if the trylock succeeds to minimize | ||
515 | * spinlock contention which may introduce too much delay in the | ||
516 | * unlock operation. | ||
517 | * | ||
518 | * spinning writer up_write/up_read caller | ||
519 | * --------------- ----------------------- | ||
520 | * [S] osq_unlock() [L] osq | ||
521 | * MB RMB | ||
522 | * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock) | ||
523 | * | ||
524 | * Here, it is important to make sure that there won't be a missed | ||
525 | * wakeup while the rwsem is free and the only spinning writer goes | ||
526 | * to sleep without taking the rwsem. Even when the spinning writer | ||
527 | * is just going to break out of the waiting loop, it will still do | ||
528 | * a trylock in rwsem_down_write_failed() before sleeping. IOW, if | ||
529 | * rwsem_has_spinner() is true, it will guarantee at least one | ||
530 | * trylock attempt on the rwsem later on. | ||
531 | */ | ||
532 | if (rwsem_has_spinner(sem)) { | ||
533 | /* | ||
534 | * The smp_rmb() here is to make sure that the spinner | ||
535 | * state is consulted before reading the wait_lock. | ||
536 | */ | ||
537 | smp_rmb(); | ||
538 | if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) | ||
539 | return sem; | ||
540 | goto locked; | ||
541 | } | ||
499 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 542 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
543 | locked: | ||
500 | 544 | ||
501 | /* do nothing if list empty */ | 545 | /* do nothing if list empty */ |
502 | if (!list_empty(&sem->wait_list)) | 546 | if (!list_empty(&sem->wait_list)) |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 852143a79f36..9bc82329eaad 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -341,7 +341,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) | |||
341 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss | 341 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss |
342 | * an event. | 342 | * an event. |
343 | */ | 343 | */ |
344 | set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ | 344 | smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ |
345 | 345 | ||
346 | return timeout; | 346 | return timeout; |
347 | } | 347 | } |
@@ -354,7 +354,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
354 | * doesn't imply write barrier and the users expects write | 354 | * doesn't imply write barrier and the users expects write |
355 | * barrier semantics on wakeup functions. The following | 355 | * barrier semantics on wakeup functions. The following |
356 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | 356 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() |
357 | * and is paired with set_mb() in wait_woken(). | 357 | * and is paired with smp_store_mb() in wait_woken(). |
358 | */ | 358 | */ |
359 | smp_wmb(); /* C */ | 359 | smp_wmb(); /* C */ |
360 | wait->flags |= WQ_FLAG_WOKEN; | 360 | wait->flags |= WQ_FLAG_WOKEN; |