diff options
Diffstat (limited to 'include/asm-x86/spinlock_32.h')
-rw-r--r-- | include/asm-x86/spinlock_32.h | 71 |
1 files changed, 26 insertions, 45 deletions
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h index d3bcebed60ca..c42c3f12d7ce 100644 --- a/include/asm-x86/spinlock_32.h +++ b/include/asm-x86/spinlock_32.h | |||
@@ -5,16 +5,6 @@ | |||
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <linux/compiler.h> | ||
9 | |||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
13 | #define CLI_STRING "cli" | ||
14 | #define STI_STRING "sti" | ||
15 | #define CLI_STI_CLOBBERS | ||
16 | #define CLI_STI_INPUT_ARGS | ||
17 | #endif /* CONFIG_PARAVIRT */ | ||
18 | 8 | ||
19 | /* | 9 | /* |
20 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
@@ -27,23 +17,24 @@ | |||
27 | * (the type definitions are in asm/spinlock_types.h) | 17 | * (the type definitions are in asm/spinlock_types.h) |
28 | */ | 18 | */ |
29 | 19 | ||
30 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
31 | { | 21 | { |
32 | return *(volatile signed char *)(&(x)->slock) <= 0; | 22 | return *(volatile signed char *)(&(lock)->slock) <= 0; |
33 | } | 23 | } |
34 | 24 | ||
35 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
36 | { | 26 | { |
37 | asm volatile("\n1:\t" | 27 | asm volatile( |
38 | LOCK_PREFIX " ; decb %0\n\t" | 28 | "\n1:\t" |
39 | "jns 3f\n" | 29 | LOCK_PREFIX " ; decb %0\n\t" |
40 | "2:\t" | 30 | "jns 3f\n" |
41 | "rep;nop\n\t" | 31 | "2:\t" |
42 | "cmpb $0,%0\n\t" | 32 | "rep;nop\n\t" |
43 | "jle 2b\n\t" | 33 | "cmpb $0,%0\n\t" |
44 | "jmp 1b\n" | 34 | "jle 2b\n\t" |
45 | "3:\n\t" | 35 | "jmp 1b\n" |
46 | : "+m" (lock->slock) : : "memory"); | 36 | "3:\n\t" |
37 | : "+m" (lock->slock) : : "memory"); | ||
47 | } | 38 | } |
48 | 39 | ||
49 | /* | 40 | /* |
@@ -55,7 +46,8 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
55 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | 46 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. |
56 | */ | 47 | */ |
57 | #ifndef CONFIG_PROVE_LOCKING | 48 | #ifndef CONFIG_PROVE_LOCKING |
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 49 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, |
50 | unsigned long flags) | ||
59 | { | 51 | { |
60 | asm volatile( | 52 | asm volatile( |
61 | "\n1:\t" | 53 | "\n1:\t" |
@@ -79,18 +71,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
79 | "5:\n\t" | 71 | "5:\n\t" |
80 | : [slock] "+m" (lock->slock) | 72 | : [slock] "+m" (lock->slock) |
81 | : [flags] "r" (flags) | 73 | : [flags] "r" (flags) |
82 | CLI_STI_INPUT_ARGS | 74 | CLI_STI_INPUT_ARGS |
83 | : "memory" CLI_STI_CLOBBERS); | 75 | : "memory" CLI_STI_CLOBBERS); |
84 | } | 76 | } |
85 | #endif | 77 | #endif |
86 | 78 | ||
87 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 79 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
88 | { | 80 | { |
89 | char oldval; | 81 | signed char oldval; |
82 | |||
90 | asm volatile( | 83 | asm volatile( |
91 | "xchgb %b0,%1" | 84 | "xchgb %b0,%1" |
92 | :"=q" (oldval), "+m" (lock->slock) | 85 | :"=q" (oldval), "+m" (lock->slock) |
93 | :"0" (0) : "memory"); | 86 | :"0" (0) : "memory"); |
87 | |||
94 | return oldval > 0; | 88 | return oldval > 0; |
95 | } | 89 | } |
96 | 90 | ||
@@ -112,7 +106,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
112 | 106 | ||
113 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 107 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
114 | { | 108 | { |
115 | char oldval = 1; | 109 | unsigned char oldval = 1; |
116 | 110 | ||
117 | asm volatile("xchgb %b0, %1" | 111 | asm volatile("xchgb %b0, %1" |
118 | : "=q" (oldval), "+m" (lock->slock) | 112 | : "=q" (oldval), "+m" (lock->slock) |
@@ -139,31 +133,16 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | |||
139 | * | 133 | * |
140 | * On x86, we implement read-write locks as a 32-bit counter | 134 | * On x86, we implement read-write locks as a 32-bit counter |
141 | * with the high bit (sign) being the "contended" bit. | 135 | * with the high bit (sign) being the "contended" bit. |
142 | * | ||
143 | * The inline assembly is non-obvious. Think about it. | ||
144 | * | ||
145 | * Changed to use the same technique as rw semaphores. See | ||
146 | * semaphore.h for details. -ben | ||
147 | * | ||
148 | * the helpers are in arch/i386/kernel/semaphore.c | ||
149 | */ | 136 | */ |
150 | 137 | ||
151 | /** | 138 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
152 | * read_can_lock - would read_trylock() succeed? | ||
153 | * @lock: the rwlock in question. | ||
154 | */ | ||
155 | static inline int __raw_read_can_lock(raw_rwlock_t *x) | ||
156 | { | 139 | { |
157 | return (int)(x)->lock > 0; | 140 | return (int)(lock)->lock > 0; |
158 | } | 141 | } |
159 | 142 | ||
160 | /** | 143 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
161 | * write_can_lock - would write_trylock() succeed? | ||
162 | * @lock: the rwlock in question. | ||
163 | */ | ||
164 | static inline int __raw_write_can_lock(raw_rwlock_t *x) | ||
165 | { | 144 | { |
166 | return (x)->lock == RW_LOCK_BIAS; | 145 | return (lock)->lock == RW_LOCK_BIAS; |
167 | } | 146 | } |
168 | 147 | ||
169 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 148 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
@@ -187,6 +166,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
187 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 166 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
188 | { | 167 | { |
189 | atomic_t *count = (atomic_t *)lock; | 168 | atomic_t *count = (atomic_t *)lock; |
169 | |||
190 | atomic_dec(count); | 170 | atomic_dec(count); |
191 | if (atomic_read(count) >= 0) | 171 | if (atomic_read(count) >= 0) |
192 | return 1; | 172 | return 1; |
@@ -197,6 +177,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
197 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 177 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
198 | { | 178 | { |
199 | atomic_t *count = (atomic_t *)lock; | 179 | atomic_t *count = (atomic_t *)lock; |
180 | |||
200 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 181 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
201 | return 1; | 182 | return 1; |
202 | atomic_add(RW_LOCK_BIAS, count); | 183 | atomic_add(RW_LOCK_BIAS, count); |