diff options
-rw-r--r-- | include/asm-x86/spinlock.h | 14 | ||||
-rw-r--r-- | include/asm-x86/spinlock_32.h | 71 | ||||
-rw-r--r-- | include/asm-x86/spinlock_64.h | 37 |
3 files changed, 64 insertions, 58 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index d74d85e71dcb..e1d555a3dfe5 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -1,5 +1,19 @@ | |||
1 | #ifndef _X86_SPINLOCK_H_ | ||
2 | #define _X86_SPINLOCK_H_ | ||
3 | |||
4 | #ifdef CONFIG_PARAVIRT | ||
5 | #include <asm/paravirt.h> | ||
6 | #else | ||
7 | #define CLI_STRING "cli" | ||
8 | #define STI_STRING "sti" | ||
9 | #define CLI_STI_CLOBBERS | ||
10 | #define CLI_STI_INPUT_ARGS | ||
11 | #endif /* CONFIG_PARAVIRT */ | ||
12 | |||
1 | #ifdef CONFIG_X86_32 | 13 | #ifdef CONFIG_X86_32 |
2 | # include "spinlock_32.h" | 14 | # include "spinlock_32.h" |
3 | #else | 15 | #else |
4 | # include "spinlock_64.h" | 16 | # include "spinlock_64.h" |
5 | #endif | 17 | #endif |
18 | |||
19 | #endif | ||
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h index d3bcebed60ca..c42c3f12d7ce 100644 --- a/include/asm-x86/spinlock_32.h +++ b/include/asm-x86/spinlock_32.h | |||
@@ -5,16 +5,6 @@ | |||
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <linux/compiler.h> | ||
9 | |||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
13 | #define CLI_STRING "cli" | ||
14 | #define STI_STRING "sti" | ||
15 | #define CLI_STI_CLOBBERS | ||
16 | #define CLI_STI_INPUT_ARGS | ||
17 | #endif /* CONFIG_PARAVIRT */ | ||
18 | 8 | ||
19 | /* | 9 | /* |
20 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
@@ -27,23 +17,24 @@ | |||
27 | * (the type definitions are in asm/spinlock_types.h) | 17 | * (the type definitions are in asm/spinlock_types.h) |
28 | */ | 18 | */ |
29 | 19 | ||
30 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
31 | { | 21 | { |
32 | return *(volatile signed char *)(&(x)->slock) <= 0; | 22 | return *(volatile signed char *)(&(lock)->slock) <= 0; |
33 | } | 23 | } |
34 | 24 | ||
35 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
36 | { | 26 | { |
37 | asm volatile("\n1:\t" | 27 | asm volatile( |
38 | LOCK_PREFIX " ; decb %0\n\t" | 28 | "\n1:\t" |
39 | "jns 3f\n" | 29 | LOCK_PREFIX " ; decb %0\n\t" |
40 | "2:\t" | 30 | "jns 3f\n" |
41 | "rep;nop\n\t" | 31 | "2:\t" |
42 | "cmpb $0,%0\n\t" | 32 | "rep;nop\n\t" |
43 | "jle 2b\n\t" | 33 | "cmpb $0,%0\n\t" |
44 | "jmp 1b\n" | 34 | "jle 2b\n\t" |
45 | "3:\n\t" | 35 | "jmp 1b\n" |
46 | : "+m" (lock->slock) : : "memory"); | 36 | "3:\n\t" |
37 | : "+m" (lock->slock) : : "memory"); | ||
47 | } | 38 | } |
48 | 39 | ||
49 | /* | 40 | /* |
@@ -55,7 +46,8 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
55 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | 46 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. |
56 | */ | 47 | */ |
57 | #ifndef CONFIG_PROVE_LOCKING | 48 | #ifndef CONFIG_PROVE_LOCKING |
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 49 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, |
50 | unsigned long flags) | ||
59 | { | 51 | { |
60 | asm volatile( | 52 | asm volatile( |
61 | "\n1:\t" | 53 | "\n1:\t" |
@@ -79,18 +71,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
79 | "5:\n\t" | 71 | "5:\n\t" |
80 | : [slock] "+m" (lock->slock) | 72 | : [slock] "+m" (lock->slock) |
81 | : [flags] "r" (flags) | 73 | : [flags] "r" (flags) |
82 | CLI_STI_INPUT_ARGS | 74 | CLI_STI_INPUT_ARGS |
83 | : "memory" CLI_STI_CLOBBERS); | 75 | : "memory" CLI_STI_CLOBBERS); |
84 | } | 76 | } |
85 | #endif | 77 | #endif |
86 | 78 | ||
87 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 79 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
88 | { | 80 | { |
89 | char oldval; | 81 | signed char oldval; |
82 | |||
90 | asm volatile( | 83 | asm volatile( |
91 | "xchgb %b0,%1" | 84 | "xchgb %b0,%1" |
92 | :"=q" (oldval), "+m" (lock->slock) | 85 | :"=q" (oldval), "+m" (lock->slock) |
93 | :"0" (0) : "memory"); | 86 | :"0" (0) : "memory"); |
87 | |||
94 | return oldval > 0; | 88 | return oldval > 0; |
95 | } | 89 | } |
96 | 90 | ||
@@ -112,7 +106,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
112 | 106 | ||
113 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 107 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
114 | { | 108 | { |
115 | char oldval = 1; | 109 | unsigned char oldval = 1; |
116 | 110 | ||
117 | asm volatile("xchgb %b0, %1" | 111 | asm volatile("xchgb %b0, %1" |
118 | : "=q" (oldval), "+m" (lock->slock) | 112 | : "=q" (oldval), "+m" (lock->slock) |
@@ -139,31 +133,16 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | |||
139 | * | 133 | * |
140 | * On x86, we implement read-write locks as a 32-bit counter | 134 | * On x86, we implement read-write locks as a 32-bit counter |
141 | * with the high bit (sign) being the "contended" bit. | 135 | * with the high bit (sign) being the "contended" bit. |
142 | * | ||
143 | * The inline assembly is non-obvious. Think about it. | ||
144 | * | ||
145 | * Changed to use the same technique as rw semaphores. See | ||
146 | * semaphore.h for details. -ben | ||
147 | * | ||
148 | * the helpers are in arch/i386/kernel/semaphore.c | ||
149 | */ | 136 | */ |
150 | 137 | ||
151 | /** | 138 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
152 | * read_can_lock - would read_trylock() succeed? | ||
153 | * @lock: the rwlock in question. | ||
154 | */ | ||
155 | static inline int __raw_read_can_lock(raw_rwlock_t *x) | ||
156 | { | 139 | { |
157 | return (int)(x)->lock > 0; | 140 | return (int)(lock)->lock > 0; |
158 | } | 141 | } |
159 | 142 | ||
160 | /** | 143 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
161 | * write_can_lock - would write_trylock() succeed? | ||
162 | * @lock: the rwlock in question. | ||
163 | */ | ||
164 | static inline int __raw_write_can_lock(raw_rwlock_t *x) | ||
165 | { | 144 | { |
166 | return (x)->lock == RW_LOCK_BIAS; | 145 | return (lock)->lock == RW_LOCK_BIAS; |
167 | } | 146 | } |
168 | 147 | ||
169 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 148 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
@@ -187,6 +166,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
187 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 166 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
188 | { | 167 | { |
189 | atomic_t *count = (atomic_t *)lock; | 168 | atomic_t *count = (atomic_t *)lock; |
169 | |||
190 | atomic_dec(count); | 170 | atomic_dec(count); |
191 | if (atomic_read(count) >= 0) | 171 | if (atomic_read(count) >= 0) |
192 | return 1; | 172 | return 1; |
@@ -197,6 +177,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
197 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 177 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
198 | { | 178 | { |
199 | atomic_t *count = (atomic_t *)lock; | 179 | atomic_t *count = (atomic_t *)lock; |
180 | |||
200 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 181 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
201 | return 1; | 182 | return 1; |
202 | atomic_add(RW_LOCK_BIAS, count); | 183 | atomic_add(RW_LOCK_BIAS, count); |
diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h index 88bf981e73cf..3b5adf92ad08 100644 --- a/include/asm-x86/spinlock_64.h +++ b/include/asm-x86/spinlock_64.h | |||
@@ -33,14 +33,21 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
33 | "cmpl $0,%0\n\t" | 33 | "cmpl $0,%0\n\t" |
34 | "jle 3b\n\t" | 34 | "jle 3b\n\t" |
35 | "jmp 1b\n" | 35 | "jmp 1b\n" |
36 | "2:\t" : "=m" (lock->slock) : : "memory"); | 36 | "2:\t" |
37 | : "=m" (lock->slock) : : "memory"); | ||
37 | } | 38 | } |
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Same as __raw_spin_lock, but reenable interrupts during spinning. | 41 | * It is easier for the lock validator if interrupts are not re-enabled |
42 | * in the middle of a lock-acquire. This is a performance feature anyway | ||
43 | * so we turn it off: | ||
44 | * | ||
45 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
46 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
41 | */ | 47 | */ |
42 | #ifndef CONFIG_PROVE_LOCKING | 48 | #ifndef CONFIG_PROVE_LOCKING |
43 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 49 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, |
50 | unsigned long flags) | ||
44 | { | 51 | { |
45 | asm volatile( | 52 | asm volatile( |
46 | "\n1:\t" | 53 | "\n1:\t" |
@@ -48,12 +55,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
48 | "jns 5f\n" | 55 | "jns 5f\n" |
49 | "testl $0x200, %1\n\t" /* interrupts were disabled? */ | 56 | "testl $0x200, %1\n\t" /* interrupts were disabled? */ |
50 | "jz 4f\n\t" | 57 | "jz 4f\n\t" |
51 | "sti\n" | 58 | STI_STRING "\n" |
52 | "3:\t" | 59 | "3:\t" |
53 | "rep;nop\n\t" | 60 | "rep;nop\n\t" |
54 | "cmpl $0, %0\n\t" | 61 | "cmpl $0, %0\n\t" |
55 | "jle 3b\n\t" | 62 | "jle 3b\n\t" |
56 | "cli\n\t" | 63 | CLI_STRING "\n\t" |
57 | "jmp 1b\n" | 64 | "jmp 1b\n" |
58 | "4:\t" | 65 | "4:\t" |
59 | "rep;nop\n\t" | 66 | "rep;nop\n\t" |
@@ -61,7 +68,9 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
61 | "jg 1b\n\t" | 68 | "jg 1b\n\t" |
62 | "jmp 4b\n" | 69 | "jmp 4b\n" |
63 | "5:\n\t" | 70 | "5:\n\t" |
64 | : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); | 71 | : "+m" (lock->slock) |
72 | : "r" ((unsigned)flags) CLI_STI_INPUT_ARGS | ||
73 | : "memory" CLI_STI_CLOBBERS); | ||
65 | } | 74 | } |
66 | #endif | 75 | #endif |
67 | 76 | ||
@@ -79,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
79 | 88 | ||
80 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 89 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
81 | { | 90 | { |
82 | asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); | 91 | asm volatile("movl $1,%0" : "=m" (lock->slock) :: "memory"); |
83 | } | 92 | } |
84 | 93 | ||
85 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 94 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
@@ -114,18 +123,18 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock) | |||
114 | 123 | ||
115 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 124 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
116 | { | 125 | { |
117 | asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" | 126 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
118 | "jns 1f\n" | 127 | "jns 1f\n" |
119 | "call __read_lock_failed\n" | 128 | "call __read_lock_failed\n\t" |
120 | "1:\n" | 129 | "1:\n" |
121 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | 130 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); |
122 | } | 131 | } |
123 | 132 | ||
124 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 133 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
125 | { | 134 | { |
126 | asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" | 135 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
127 | "jz 1f\n" | 136 | "jz 1f\n" |
128 | "\tcall __write_lock_failed\n\t" | 137 | "call __write_lock_failed\n\t" |
129 | "1:\n" | 138 | "1:\n" |
130 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | 139 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); |
131 | } | 140 | } |
@@ -133,6 +142,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
133 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 142 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
134 | { | 143 | { |
135 | atomic_t *count = (atomic_t *)lock; | 144 | atomic_t *count = (atomic_t *)lock; |
145 | |||
136 | atomic_dec(count); | 146 | atomic_dec(count); |
137 | if (atomic_read(count) >= 0) | 147 | if (atomic_read(count) >= 0) |
138 | return 1; | 148 | return 1; |
@@ -143,6 +153,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
143 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 153 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
144 | { | 154 | { |
145 | atomic_t *count = (atomic_t *)lock; | 155 | atomic_t *count = (atomic_t *)lock; |
156 | |||
146 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 157 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
147 | return 1; | 158 | return 1; |
148 | atomic_add(RW_LOCK_BIAS, count); | 159 | atomic_add(RW_LOCK_BIAS, count); |
@@ -151,12 +162,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
151 | 162 | ||
152 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 163 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
153 | { | 164 | { |
154 | asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); | 165 | asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); |
155 | } | 166 | } |
156 | 167 | ||
157 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
158 | { | 169 | { |
159 | asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" | 170 | asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",%0" |
160 | : "=m" (rw->lock) : : "memory"); | 171 | : "=m" (rw->lock) : : "memory"); |
161 | } | 172 | } |
162 | 173 | ||