diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:51:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:51:40 -0400 |
commit | 6d5f0ebfc0be9cbfeaafdd9258d5fa24b7975a36 (patch) | |
tree | 3b7a5851a3d9f02441e2dcbaf22785d131544544 /arch/x86 | |
parent | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (diff) | |
parent | 8acd91e8620836a56ff62028ed28ba629f2881a0 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar:
"The main updates in this cycle were:
- mutex MCS refactoring finishing touches: improve comments, refactor
and clean up code, reduce debug data structure footprint, etc.
- qrwlock finishing touches: remove old code, self-test updates.
- small rwsem optimization
- various smaller fixes/cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/lockdep: Revert qrwlock recusive stuff
locking/rwsem: Avoid double checking before try acquiring write lock
locking/rwsem: Move EXPORT_SYMBOL() lines to follow function definition
locking/rwlock, x86: Delete unused asm/rwlock.h and rwlock.S
locking/rwlock, x86: Clean up asm/spinlock*.h to remove old rwlock code
locking/semaphore: Resolve some shadow warnings
locking/selftest: Support queued rwlock
locking/lockdep: Restrict the use of recursive read_lock() with qrwlock
locking/spinlocks: Always evaluate the second argument of spin_lock_nested()
locking/Documentation: Update locking/mutex-design.txt disadvantages
locking/Documentation: Move locking related docs into Documentation/locking/
locking/mutexes: Use MUTEX_SPIN_ON_OWNER when appropriate
locking/mutexes: Refactor optimistic spinning code
locking/mcs: Remove obsolete comment
locking/mutexes: Document quick lock release when unlocking
locking/mutexes: Standardize arguments in lock/unlock slowpaths
locking: Remove deprecated smp_mb__() barriers
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/rwlock.h | 49 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 81 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock_types.h | 4 | ||||
-rw-r--r-- | arch/x86/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/lib/rwlock.S | 44 |
5 files changed, 2 insertions, 177 deletions
diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h deleted file mode 100644 index a5370a03d90c..000000000000 --- a/arch/x86/include/asm/rwlock.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | #ifndef _ASM_X86_RWLOCK_H | ||
2 | #define _ASM_X86_RWLOCK_H | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | |||
6 | #if CONFIG_NR_CPUS <= 2048 | ||
7 | |||
8 | #ifndef __ASSEMBLY__ | ||
9 | typedef union { | ||
10 | s32 lock; | ||
11 | s32 write; | ||
12 | } arch_rwlock_t; | ||
13 | #endif | ||
14 | |||
15 | #define RW_LOCK_BIAS 0x00100000 | ||
16 | #define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l) | ||
17 | #define READ_LOCK_ATOMIC(n) atomic_##n | ||
18 | #define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n) | ||
19 | #define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n) | ||
20 | #define WRITE_LOCK_CMP RW_LOCK_BIAS | ||
21 | |||
22 | #else /* CONFIG_NR_CPUS > 2048 */ | ||
23 | |||
24 | #include <linux/const.h> | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | typedef union { | ||
28 | s64 lock; | ||
29 | struct { | ||
30 | u32 read; | ||
31 | s32 write; | ||
32 | }; | ||
33 | } arch_rwlock_t; | ||
34 | #endif | ||
35 | |||
36 | #define RW_LOCK_BIAS (_AC(1,L) << 32) | ||
37 | #define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q) | ||
38 | #define READ_LOCK_ATOMIC(n) atomic64_##n | ||
39 | #define WRITE_LOCK_ADD(n) __ASM_FORM(incl) | ||
40 | #define WRITE_LOCK_SUB(n) __ASM_FORM(decl) | ||
41 | #define WRITE_LOCK_CMP 1 | ||
42 | |||
43 | #endif /* CONFIG_NR_CPUS */ | ||
44 | |||
45 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
46 | |||
47 | /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ | ||
48 | |||
49 | #endif /* _ASM_X86_RWLOCK_H */ | ||
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 54f1c8068c02..9295016485c9 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -187,7 +187,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
187 | cpu_relax(); | 187 | cpu_relax(); |
188 | } | 188 | } |
189 | 189 | ||
190 | #ifndef CONFIG_QUEUE_RWLOCK | ||
191 | /* | 190 | /* |
192 | * Read-write spinlocks, allowing multiple readers | 191 | * Read-write spinlocks, allowing multiple readers |
193 | * but only one writer. | 192 | * but only one writer. |
@@ -198,91 +197,15 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
198 | * irq-safe write-lock, but readers can get non-irqsafe | 197 | * irq-safe write-lock, but readers can get non-irqsafe |
199 | * read-locks. | 198 | * read-locks. |
200 | * | 199 | * |
201 | * On x86, we implement read-write locks as a 32-bit counter | 200 | * On x86, we implement read-write locks using the generic qrwlock with |
202 | * with the high bit (sign) being the "contended" bit. | 201 | * x86 specific optimization. |
203 | */ | 202 | */ |
204 | 203 | ||
205 | /** | ||
206 | * read_can_lock - would read_trylock() succeed? | ||
207 | * @lock: the rwlock in question. | ||
208 | */ | ||
209 | static inline int arch_read_can_lock(arch_rwlock_t *lock) | ||
210 | { | ||
211 | return lock->lock > 0; | ||
212 | } | ||
213 | |||
214 | /** | ||
215 | * write_can_lock - would write_trylock() succeed? | ||
216 | * @lock: the rwlock in question. | ||
217 | */ | ||
218 | static inline int arch_write_can_lock(arch_rwlock_t *lock) | ||
219 | { | ||
220 | return lock->write == WRITE_LOCK_CMP; | ||
221 | } | ||
222 | |||
223 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
224 | { | ||
225 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" | ||
226 | "jns 1f\n" | ||
227 | "call __read_lock_failed\n\t" | ||
228 | "1:\n" | ||
229 | ::LOCK_PTR_REG (rw) : "memory"); | ||
230 | } | ||
231 | |||
232 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
233 | { | ||
234 | asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" | ||
235 | "jz 1f\n" | ||
236 | "call __write_lock_failed\n\t" | ||
237 | "1:\n" | ||
238 | ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) | ||
239 | : "memory"); | ||
240 | } | ||
241 | |||
242 | static inline int arch_read_trylock(arch_rwlock_t *lock) | ||
243 | { | ||
244 | READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; | ||
245 | |||
246 | if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) | ||
247 | return 1; | ||
248 | READ_LOCK_ATOMIC(inc)(count); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static inline int arch_write_trylock(arch_rwlock_t *lock) | ||
253 | { | ||
254 | atomic_t *count = (atomic_t *)&lock->write; | ||
255 | |||
256 | if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) | ||
257 | return 1; | ||
258 | atomic_add(WRITE_LOCK_CMP, count); | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
263 | { | ||
264 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" | ||
265 | :"+m" (rw->lock) : : "memory"); | ||
266 | } | ||
267 | |||
268 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
269 | { | ||
270 | asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" | ||
271 | : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); | ||
272 | } | ||
273 | #else | ||
274 | #include <asm/qrwlock.h> | 204 | #include <asm/qrwlock.h> |
275 | #endif /* CONFIG_QUEUE_RWLOCK */ | ||
276 | 205 | ||
277 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | 206 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
278 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | 207 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
279 | 208 | ||
280 | #undef READ_LOCK_SIZE | ||
281 | #undef READ_LOCK_ATOMIC | ||
282 | #undef WRITE_LOCK_ADD | ||
283 | #undef WRITE_LOCK_SUB | ||
284 | #undef WRITE_LOCK_CMP | ||
285 | |||
286 | #define arch_spin_relax(lock) cpu_relax() | 209 | #define arch_spin_relax(lock) cpu_relax() |
287 | #define arch_read_relax(lock) cpu_relax() | 210 | #define arch_read_relax(lock) cpu_relax() |
288 | #define arch_write_relax(lock) cpu_relax() | 211 | #define arch_write_relax(lock) cpu_relax() |
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 73c4c007200f..5f9d7572d82b 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h | |||
@@ -34,10 +34,6 @@ typedef struct arch_spinlock { | |||
34 | 34 | ||
35 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } | 35 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } |
36 | 36 | ||
37 | #ifdef CONFIG_QUEUE_RWLOCK | ||
38 | #include <asm-generic/qrwlock_types.h> | 37 | #include <asm-generic/qrwlock_types.h> |
39 | #else | ||
40 | #include <asm/rwlock.h> | ||
41 | #endif | ||
42 | 38 | ||
43 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ | 39 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 4d4f96a27638..7ef9a30e7dac 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -20,7 +20,6 @@ lib-y := delay.o misc.o cmdline.o | |||
20 | lib-y += thunk_$(BITS).o | 20 | lib-y += thunk_$(BITS).o |
21 | lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o | 21 | lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o |
22 | lib-y += memcpy_$(BITS).o | 22 | lib-y += memcpy_$(BITS).o |
23 | lib-$(CONFIG_SMP) += rwlock.o | ||
24 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 23 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
25 | lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o | 24 | lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o |
26 | 25 | ||
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S deleted file mode 100644 index 1cad22139c88..000000000000 --- a/arch/x86/lib/rwlock.S +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | /* Slow paths of read/write spinlocks. */ | ||
2 | |||
3 | #include <linux/linkage.h> | ||
4 | #include <asm/alternative-asm.h> | ||
5 | #include <asm/frame.h> | ||
6 | #include <asm/rwlock.h> | ||
7 | |||
8 | #ifdef CONFIG_X86_32 | ||
9 | # define __lock_ptr eax | ||
10 | #else | ||
11 | # define __lock_ptr rdi | ||
12 | #endif | ||
13 | |||
14 | ENTRY(__write_lock_failed) | ||
15 | CFI_STARTPROC | ||
16 | FRAME | ||
17 | 0: LOCK_PREFIX | ||
18 | WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) | ||
19 | 1: rep; nop | ||
20 | cmpl $WRITE_LOCK_CMP, (%__lock_ptr) | ||
21 | jne 1b | ||
22 | LOCK_PREFIX | ||
23 | WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) | ||
24 | jnz 0b | ||
25 | ENDFRAME | ||
26 | ret | ||
27 | CFI_ENDPROC | ||
28 | END(__write_lock_failed) | ||
29 | |||
30 | ENTRY(__read_lock_failed) | ||
31 | CFI_STARTPROC | ||
32 | FRAME | ||
33 | 0: LOCK_PREFIX | ||
34 | READ_LOCK_SIZE(inc) (%__lock_ptr) | ||
35 | 1: rep; nop | ||
36 | READ_LOCK_SIZE(cmp) $1, (%__lock_ptr) | ||
37 | js 1b | ||
38 | LOCK_PREFIX | ||
39 | READ_LOCK_SIZE(dec) (%__lock_ptr) | ||
40 | js 0b | ||
41 | ENDFRAME | ||
42 | ret | ||
43 | CFI_ENDPROC | ||
44 | END(__read_lock_failed) | ||