aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86_64/lib/Makefile2
-rw-r--r--arch/x86_64/lib/rwlock.S38
-rw-r--r--arch/x86_64/lib/thunk.S30
-rw-r--r--include/asm-x86_64/rwlock.h68
-rw-r--r--include/asm-x86_64/spinlock.h11
5 files changed, 51 insertions, 98 deletions
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile
index ccef6ae747a3..b78d4170fce2 100644
--- a/arch/x86_64/lib/Makefile
+++ b/arch/x86_64/lib/Makefile
@@ -9,4 +9,4 @@ obj-y := io.o iomap_copy.o
9lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \ 9lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
10 usercopy.o getuser.o putuser.o \ 10 usercopy.o getuser.o putuser.o \
11 thunk.o clear_page.o copy_page.o bitstr.o bitops.o 11 thunk.o clear_page.o copy_page.o bitstr.o bitops.o
12lib-y += memcpy.o memmove.o memset.o copy_user.o 12lib-y += memcpy.o memmove.o memset.o copy_user.o rwlock.o
diff --git a/arch/x86_64/lib/rwlock.S b/arch/x86_64/lib/rwlock.S
new file mode 100644
index 000000000000..0cde1f807314
--- /dev/null
+++ b/arch/x86_64/lib/rwlock.S
@@ -0,0 +1,38 @@
1/* Slow paths of read/write spinlocks. */
2
3#include <linux/linkage.h>
4#include <asm/rwlock.h>
5#include <asm/alternative-asm.i>
6#include <asm/dwarf2.h>
7
8/* rdi: pointer to rwlock_t */
9ENTRY(__write_lock_failed)
10 CFI_STARTPROC
11 LOCK_PREFIX
12 addl $RW_LOCK_BIAS,(%rdi)
131: rep
14 nop
15 cmpl $RW_LOCK_BIAS,(%rdi)
16 jne 1b
17 LOCK_PREFIX
18 subl $RW_LOCK_BIAS,(%rdi)
19 jnz __write_lock_failed
20 ret
21 CFI_ENDPROC
22END(__write_lock_failed)
23
24/* rdi: pointer to rwlock_t */
25ENTRY(__read_lock_failed)
26 CFI_STARTPROC
27 LOCK_PREFIX
28 incl (%rdi)
291: rep
30 nop
31 cmpl $1,(%rdi)
32 js 1b
33 LOCK_PREFIX
34 decl (%rdi)
35 js __read_lock_failed
36 ret
37 CFI_ENDPROC
38END(__read_lock_failed)
diff --git a/arch/x86_64/lib/thunk.S b/arch/x86_64/lib/thunk.S
index 332ea5dff916..6cff27c775ae 100644
--- a/arch/x86_64/lib/thunk.S
+++ b/arch/x86_64/lib/thunk.S
@@ -67,33 +67,3 @@ restore_norax:
67 RESTORE_ARGS 1 67 RESTORE_ARGS 1
68 ret 68 ret
69 CFI_ENDPROC 69 CFI_ENDPROC
70
71#ifdef CONFIG_SMP
72/* Support for read/write spinlocks. */
73 .text
74/* rax: pointer to rwlock_t */
75ENTRY(__write_lock_failed)
76 lock
77 addl $RW_LOCK_BIAS,(%rax)
781: rep
79 nop
80 cmpl $RW_LOCK_BIAS,(%rax)
81 jne 1b
82 lock
83 subl $RW_LOCK_BIAS,(%rax)
84 jnz __write_lock_failed
85 ret
86
87/* rax: pointer to rwlock_t */
88ENTRY(__read_lock_failed)
89 lock
90 incl (%rax)
911: rep
92 nop
93 cmpl $1,(%rax)
94 js 1b
95 lock
96 decl (%rax)
97 js __read_lock_failed
98 ret
99#endif
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index dea0e9459264..28a080d23119 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -18,69 +18,21 @@
18#ifndef _ASM_X86_64_RWLOCK_H 18#ifndef _ASM_X86_64_RWLOCK_H
19#define _ASM_X86_64_RWLOCK_H 19#define _ASM_X86_64_RWLOCK_H
20 20
21#include <linux/stringify.h>
22
23#define RW_LOCK_BIAS 0x01000000 21#define RW_LOCK_BIAS 0x01000000
24#define RW_LOCK_BIAS_STR "0x01000000" 22#define RW_LOCK_BIAS_STR "0x01000000"
25 23
26#define __build_read_lock_ptr(rw, helper) \ 24#define __build_read_lock(rw) \
27 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \ 25 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \
28 "js 2f\n" \ 26 "jns 1f\n" \
29 "1:\n" \ 27 "call __read_lock_failed\n" \
30 LOCK_SECTION_START("") \
31 "2:\tcall " helper "\n\t" \
32 "jmp 1b\n" \
33 LOCK_SECTION_END \
34 ::"a" (rw) : "memory")
35
36#define __build_read_lock_const(rw, helper) \
37 asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \
38 "js 2f\n" \
39 "1:\n" \ 28 "1:\n" \
40 LOCK_SECTION_START("") \ 29 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory")
41 "2:\tpushq %%rax\n\t" \
42 "leaq %0,%%rax\n\t" \
43 "call " helper "\n\t" \
44 "popq %%rax\n\t" \
45 "jmp 1b\n" \
46 LOCK_SECTION_END \
47 :"=m" (*((volatile int *)rw))::"memory")
48
49#define __build_read_lock(rw, helper) do { \
50 if (__builtin_constant_p(rw)) \
51 __build_read_lock_const(rw, helper); \
52 else \
53 __build_read_lock_ptr(rw, helper); \
54 } while (0)
55 30
56#define __build_write_lock_ptr(rw, helper) \ 31#define __build_write_lock(rw) \
57 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 32 asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" \
58 "jnz 2f\n" \ 33 "jz 1f\n" \
34 "\tcall __write_lock_failed\n\t" \
59 "1:\n" \ 35 "1:\n" \
60 LOCK_SECTION_START("") \ 36 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory")
61 "2:\tcall " helper "\n\t" \
62 "jmp 1b\n" \
63 LOCK_SECTION_END \
64 ::"a" (rw) : "memory")
65
66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
68 "jnz 2f\n" \
69 "1:\n" \
70 LOCK_SECTION_START("") \
71 "2:\tpushq %%rax\n\t" \
72 "leaq %0,%%rax\n\t" \
73 "call " helper "\n\t" \
74 "popq %%rax\n\t" \
75 "jmp 1b\n" \
76 LOCK_SECTION_END \
77 :"=m" (*((volatile long *)rw))::"memory")
78
79#define __build_write_lock(rw, helper) do { \
80 if (__builtin_constant_p(rw)) \
81 __build_write_lock_const(rw, helper); \
82 else \
83 __build_write_lock_ptr(rw, helper); \
84 } while (0)
85 37
86#endif 38#endif
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 248a79f0eaff..a8e3d89f591f 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -79,13 +79,6 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
79 * 79 *
80 * On x86, we implement read-write locks as a 32-bit counter 80 * On x86, we implement read-write locks as a 32-bit counter
81 * with the high bit (sign) being the "contended" bit. 81 * with the high bit (sign) being the "contended" bit.
82 *
83 * The inline assembly is non-obvious. Think about it.
84 *
85 * Changed to use the same technique as rw semaphores. See
86 * semaphore.h for details. -ben
87 *
88 * the helpers are in arch/i386/kernel/semaphore.c
89 */ 82 */
90 83
91#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 84#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
@@ -93,12 +86,12 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
93 86
94static inline void __raw_read_lock(raw_rwlock_t *rw) 87static inline void __raw_read_lock(raw_rwlock_t *rw)
95{ 88{
96 __build_read_lock(rw, "__read_lock_failed"); 89 __build_read_lock(rw);
97} 90}
98 91
99static inline void __raw_write_lock(raw_rwlock_t *rw) 92static inline void __raw_write_lock(raw_rwlock_t *rw)
100{ 93{
101 __build_write_lock(rw, "__write_lock_failed"); 94 __build_write_lock(rw);
102} 95}
103 96
104static inline int __raw_read_trylock(raw_rwlock_t *lock) 97static inline int __raw_read_trylock(raw_rwlock_t *lock)