diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-10-30 22:45:07 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-12-04 17:43:14 -0500 |
commit | 0004a9dfeaa709a7f853487aba19932c9b1a87c8 (patch) | |
tree | e9f1f4b1ca897e57f46778cef283617ba83fc855 /include/asm-mips/spinlock.h | |
parent | 08f57f7ffe5819e537301b1f1109fa4fc670bfff (diff) |
[MIPS] Cleanup memory barriers for weakly ordered systems.
Also the R4000 / R4600 LL/SC instructions imply a sync so no explicit sync
needed.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/spinlock.h')
-rw-r--r-- | include/asm-mips/spinlock.h | 53 |
1 files changed, 31 insertions, 22 deletions
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index c8d5587467bb..fc3217fc1118 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
@@ -3,12 +3,13 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1999, 2000 by Ralf Baechle | 6 | * Copyright (C) 1999, 2000, 06 by Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | */ | 8 | */ |
9 | #ifndef _ASM_SPINLOCK_H | 9 | #ifndef _ASM_SPINLOCK_H |
10 | #define _ASM_SPINLOCK_H | 10 | #define _ASM_SPINLOCK_H |
11 | 11 | ||
12 | #include <asm/barrier.h> | ||
12 | #include <asm/war.h> | 13 | #include <asm/war.h> |
13 | 14 | ||
14 | /* | 15 | /* |
@@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
40 | " sc %1, %0 \n" | 41 | " sc %1, %0 \n" |
41 | " beqzl %1, 1b \n" | 42 | " beqzl %1, 1b \n" |
42 | " nop \n" | 43 | " nop \n" |
43 | " sync \n" | ||
44 | " .set reorder \n" | 44 | " .set reorder \n" |
45 | : "=m" (lock->lock), "=&r" (tmp) | 45 | : "=m" (lock->lock), "=&r" (tmp) |
46 | : "m" (lock->lock) | 46 | : "m" (lock->lock) |
@@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
53 | " li %1, 1 \n" | 53 | " li %1, 1 \n" |
54 | " sc %1, %0 \n" | 54 | " sc %1, %0 \n" |
55 | " beqz %1, 1b \n" | 55 | " beqz %1, 1b \n" |
56 | " sync \n" | 56 | " nop \n" |
57 | " .set reorder \n" | 57 | " .set reorder \n" |
58 | : "=m" (lock->lock), "=&r" (tmp) | 58 | : "=m" (lock->lock), "=&r" (tmp) |
59 | : "m" (lock->lock) | 59 | : "m" (lock->lock) |
60 | : "memory"); | 60 | : "memory"); |
61 | } | 61 | } |
62 | |||
63 | smp_mb(); | ||
62 | } | 64 | } |
63 | 65 | ||
64 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
65 | { | 67 | { |
68 | smp_mb(); | ||
69 | |||
66 | __asm__ __volatile__( | 70 | __asm__ __volatile__( |
67 | " .set noreorder # __raw_spin_unlock \n" | 71 | " .set noreorder # __raw_spin_unlock \n" |
68 | " sync \n" | ||
69 | " sw $0, %0 \n" | 72 | " sw $0, %0 \n" |
70 | " .set\treorder \n" | 73 | " .set\treorder \n" |
71 | : "=m" (lock->lock) | 74 | : "=m" (lock->lock) |
@@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
86 | " beqzl %2, 1b \n" | 89 | " beqzl %2, 1b \n" |
87 | " nop \n" | 90 | " nop \n" |
88 | " andi %2, %0, 1 \n" | 91 | " andi %2, %0, 1 \n" |
89 | " sync \n" | ||
90 | " .set reorder" | 92 | " .set reorder" |
91 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | 93 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) |
92 | : "m" (lock->lock) | 94 | : "m" (lock->lock) |
@@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
99 | " sc %2, %1 \n" | 101 | " sc %2, %1 \n" |
100 | " beqz %2, 1b \n" | 102 | " beqz %2, 1b \n" |
101 | " andi %2, %0, 1 \n" | 103 | " andi %2, %0, 1 \n" |
102 | " sync \n" | ||
103 | " .set reorder" | 104 | " .set reorder" |
104 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | 105 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) |
105 | : "m" (lock->lock) | 106 | : "m" (lock->lock) |
106 | : "memory"); | 107 | : "memory"); |
107 | } | 108 | } |
108 | 109 | ||
110 | smp_mb(); | ||
111 | |||
109 | return res == 0; | 112 | return res == 0; |
110 | } | 113 | } |
111 | 114 | ||
@@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
143 | " sc %1, %0 \n" | 146 | " sc %1, %0 \n" |
144 | " beqzl %1, 1b \n" | 147 | " beqzl %1, 1b \n" |
145 | " nop \n" | 148 | " nop \n" |
146 | " sync \n" | ||
147 | " .set reorder \n" | 149 | " .set reorder \n" |
148 | : "=m" (rw->lock), "=&r" (tmp) | 150 | : "=m" (rw->lock), "=&r" (tmp) |
149 | : "m" (rw->lock) | 151 | : "m" (rw->lock) |
@@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
156 | " addu %1, 1 \n" | 158 | " addu %1, 1 \n" |
157 | " sc %1, %0 \n" | 159 | " sc %1, %0 \n" |
158 | " beqz %1, 1b \n" | 160 | " beqz %1, 1b \n" |
159 | " sync \n" | 161 | " nop \n" |
160 | " .set reorder \n" | 162 | " .set reorder \n" |
161 | : "=m" (rw->lock), "=&r" (tmp) | 163 | : "=m" (rw->lock), "=&r" (tmp) |
162 | : "m" (rw->lock) | 164 | : "m" (rw->lock) |
163 | : "memory"); | 165 | : "memory"); |
164 | } | 166 | } |
167 | |||
168 | smp_mb(); | ||
165 | } | 169 | } |
166 | 170 | ||
167 | /* Note the use of sub, not subu which will make the kernel die with an | 171 | /* Note the use of sub, not subu which will make the kernel die with an |
@@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
171 | { | 175 | { |
172 | unsigned int tmp; | 176 | unsigned int tmp; |
173 | 177 | ||
178 | smp_mb(); | ||
179 | |||
174 | if (R10000_LLSC_WAR) { | 180 | if (R10000_LLSC_WAR) { |
175 | __asm__ __volatile__( | 181 | __asm__ __volatile__( |
176 | "1: ll %1, %2 # __raw_read_unlock \n" | 182 | "1: ll %1, %2 # __raw_read_unlock \n" |
177 | " sub %1, 1 \n" | 183 | " sub %1, 1 \n" |
178 | " sc %1, %0 \n" | 184 | " sc %1, %0 \n" |
179 | " beqzl %1, 1b \n" | 185 | " beqzl %1, 1b \n" |
180 | " sync \n" | ||
181 | : "=m" (rw->lock), "=&r" (tmp) | 186 | : "=m" (rw->lock), "=&r" (tmp) |
182 | : "m" (rw->lock) | 187 | : "m" (rw->lock) |
183 | : "memory"); | 188 | : "memory"); |
@@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
188 | " sub %1, 1 \n" | 193 | " sub %1, 1 \n" |
189 | " sc %1, %0 \n" | 194 | " sc %1, %0 \n" |
190 | " beqz %1, 1b \n" | 195 | " beqz %1, 1b \n" |
191 | " sync \n" | 196 | " nop \n" |
192 | " .set reorder \n" | 197 | " .set reorder \n" |
193 | : "=m" (rw->lock), "=&r" (tmp) | 198 | : "=m" (rw->lock), "=&r" (tmp) |
194 | : "m" (rw->lock) | 199 | : "m" (rw->lock) |
@@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
208 | " lui %1, 0x8000 \n" | 213 | " lui %1, 0x8000 \n" |
209 | " sc %1, %0 \n" | 214 | " sc %1, %0 \n" |
210 | " beqzl %1, 1b \n" | 215 | " beqzl %1, 1b \n" |
211 | " sync \n" | 216 | " nop \n" |
212 | " .set reorder \n" | 217 | " .set reorder \n" |
213 | : "=m" (rw->lock), "=&r" (tmp) | 218 | : "=m" (rw->lock), "=&r" (tmp) |
214 | : "m" (rw->lock) | 219 | : "m" (rw->lock) |
@@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
221 | " lui %1, 0x8000 \n" | 226 | " lui %1, 0x8000 \n" |
222 | " sc %1, %0 \n" | 227 | " sc %1, %0 \n" |
223 | " beqz %1, 1b \n" | 228 | " beqz %1, 1b \n" |
224 | " sync \n" | 229 | " nop \n" |
225 | " .set reorder \n" | 230 | " .set reorder \n" |
226 | : "=m" (rw->lock), "=&r" (tmp) | 231 | : "=m" (rw->lock), "=&r" (tmp) |
227 | : "m" (rw->lock) | 232 | : "m" (rw->lock) |
228 | : "memory"); | 233 | : "memory"); |
229 | } | 234 | } |
235 | |||
236 | smp_mb(); | ||
230 | } | 237 | } |
231 | 238 | ||
232 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 239 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
233 | { | 240 | { |
241 | smp_mb(); | ||
242 | |||
234 | __asm__ __volatile__( | 243 | __asm__ __volatile__( |
235 | " sync # __raw_write_unlock \n" | 244 | " # __raw_write_unlock \n" |
236 | " sw $0, %0 \n" | 245 | " sw $0, %0 \n" |
237 | : "=m" (rw->lock) | 246 | : "=m" (rw->lock) |
238 | : "m" (rw->lock) | 247 | : "m" (rw->lock) |
@@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
252 | " bnez %1, 2f \n" | 261 | " bnez %1, 2f \n" |
253 | " addu %1, 1 \n" | 262 | " addu %1, 1 \n" |
254 | " sc %1, %0 \n" | 263 | " sc %1, %0 \n" |
255 | " beqzl %1, 1b \n" | ||
256 | " .set reorder \n" | 264 | " .set reorder \n" |
257 | #ifdef CONFIG_SMP | 265 | " beqzl %1, 1b \n" |
258 | " sync \n" | 266 | " nop \n" |
259 | #endif | 267 | __WEAK_ORDERING_MB |
260 | " li %2, 1 \n" | 268 | " li %2, 1 \n" |
261 | "2: \n" | 269 | "2: \n" |
262 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | 270 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
@@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
271 | " addu %1, 1 \n" | 279 | " addu %1, 1 \n" |
272 | " sc %1, %0 \n" | 280 | " sc %1, %0 \n" |
273 | " beqz %1, 1b \n" | 281 | " beqz %1, 1b \n" |
282 | " nop \n" | ||
274 | " .set reorder \n" | 283 | " .set reorder \n" |
275 | #ifdef CONFIG_SMP | 284 | __WEAK_ORDERING_MB |
276 | " sync \n" | ||
277 | #endif | ||
278 | " li %2, 1 \n" | 285 | " li %2, 1 \n" |
279 | "2: \n" | 286 | "2: \n" |
280 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | 287 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
@@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
299 | " lui %1, 0x8000 \n" | 306 | " lui %1, 0x8000 \n" |
300 | " sc %1, %0 \n" | 307 | " sc %1, %0 \n" |
301 | " beqzl %1, 1b \n" | 308 | " beqzl %1, 1b \n" |
302 | " sync \n" | 309 | " nop \n" |
310 | __WEAK_ORDERING_MB | ||
303 | " li %2, 1 \n" | 311 | " li %2, 1 \n" |
304 | " .set reorder \n" | 312 | " .set reorder \n" |
305 | "2: \n" | 313 | "2: \n" |
@@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
315 | " lui %1, 0x8000 \n" | 323 | " lui %1, 0x8000 \n" |
316 | " sc %1, %0 \n" | 324 | " sc %1, %0 \n" |
317 | " beqz %1, 1b \n" | 325 | " beqz %1, 1b \n" |
318 | " sync \n" | 326 | " nop \n" |
327 | __WEAK_ORDERING_MB | ||
319 | " li %2, 1 \n" | 328 | " li %2, 1 \n" |
320 | " .set reorder \n" | 329 | " .set reorder \n" |
321 | "2: \n" | 330 | "2: \n" |