diff options
author | Maciej W. Rozycki <macro@linux-mips.org> | 2005-06-14 13:35:03 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2005-10-29 14:31:22 -0400 |
commit | aac8aa7717a23a9bf8740dbfb59755b1d62f04bf (patch) | |
tree | cae373db64607dafc496827c0d2f3b67b91d880f | |
parent | fded2e508a1d3c26ab477ab3b98f13274d4359ba (diff) |
Enable a suitable ISA for the assembler around ll/sc so that code
builds even for processors that don't support the instructions.
Plus minor formatting fixes.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/kernel/semaphore.c | 12 | ||||
-rw-r--r-- | include/asm-mips/atomic.h | 40 | ||||
-rw-r--r-- | include/asm-mips/bitops.h | 74 | ||||
-rw-r--r-- | include/asm-mips/system.h | 24 |
4 files changed, 118 insertions, 32 deletions
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c index 9c40fe5a8e8d..dbb145ee00a7 100644 --- a/arch/mips/kernel/semaphore.c +++ b/arch/mips/kernel/semaphore.c | |||
@@ -42,24 +42,28 @@ static inline int __sem_update_count(struct semaphore *sem, int incr) | |||
42 | 42 | ||
43 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 43 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
44 | __asm__ __volatile__( | 44 | __asm__ __volatile__( |
45 | "1: ll %0, %2 \n" | 45 | " .set mips2 \n" |
46 | "1: ll %0, %2 # __sem_update_count \n" | ||
46 | " sra %1, %0, 31 \n" | 47 | " sra %1, %0, 31 \n" |
47 | " not %1 \n" | 48 | " not %1 \n" |
48 | " and %1, %0, %1 \n" | 49 | " and %1, %0, %1 \n" |
49 | " add %1, %1, %3 \n" | 50 | " addu %1, %1, %3 \n" |
50 | " sc %1, %2 \n" | 51 | " sc %1, %2 \n" |
51 | " beqzl %1, 1b \n" | 52 | " beqzl %1, 1b \n" |
53 | " .set mips0 \n" | ||
52 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | 54 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
53 | : "r" (incr), "m" (sem->count)); | 55 | : "r" (incr), "m" (sem->count)); |
54 | } else if (cpu_has_llsc) { | 56 | } else if (cpu_has_llsc) { |
55 | __asm__ __volatile__( | 57 | __asm__ __volatile__( |
56 | "1: ll %0, %2 \n" | 58 | " .set mips2 \n" |
59 | "1: ll %0, %2 # __sem_update_count \n" | ||
57 | " sra %1, %0, 31 \n" | 60 | " sra %1, %0, 31 \n" |
58 | " not %1 \n" | 61 | " not %1 \n" |
59 | " and %1, %0, %1 \n" | 62 | " and %1, %0, %1 \n" |
60 | " add %1, %1, %3 \n" | 63 | " addu %1, %1, %3 \n" |
61 | " sc %1, %2 \n" | 64 | " sc %1, %2 \n" |
62 | " beqz %1, 1b \n" | 65 | " beqz %1, 1b \n" |
66 | " .set mips0 \n" | ||
63 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | 67 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
64 | : "r" (incr), "m" (sem->count)); | 68 | : "r" (incr), "m" (sem->count)); |
65 | } else { | 69 | } else { |
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index c0bd8d014e14..80ea3fbd3ece 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h | |||
@@ -62,20 +62,24 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
62 | unsigned long temp; | 62 | unsigned long temp; |
63 | 63 | ||
64 | __asm__ __volatile__( | 64 | __asm__ __volatile__( |
65 | " .set mips2 \n" | ||
65 | "1: ll %0, %1 # atomic_add \n" | 66 | "1: ll %0, %1 # atomic_add \n" |
66 | " addu %0, %2 \n" | 67 | " addu %0, %2 \n" |
67 | " sc %0, %1 \n" | 68 | " sc %0, %1 \n" |
68 | " beqzl %0, 1b \n" | 69 | " beqzl %0, 1b \n" |
70 | " .set mips0 \n" | ||
69 | : "=&r" (temp), "=m" (v->counter) | 71 | : "=&r" (temp), "=m" (v->counter) |
70 | : "Ir" (i), "m" (v->counter)); | 72 | : "Ir" (i), "m" (v->counter)); |
71 | } else if (cpu_has_llsc) { | 73 | } else if (cpu_has_llsc) { |
72 | unsigned long temp; | 74 | unsigned long temp; |
73 | 75 | ||
74 | __asm__ __volatile__( | 76 | __asm__ __volatile__( |
77 | " .set mips2 \n" | ||
75 | "1: ll %0, %1 # atomic_add \n" | 78 | "1: ll %0, %1 # atomic_add \n" |
76 | " addu %0, %2 \n" | 79 | " addu %0, %2 \n" |
77 | " sc %0, %1 \n" | 80 | " sc %0, %1 \n" |
78 | " beqz %0, 1b \n" | 81 | " beqz %0, 1b \n" |
82 | " .set mips0 \n" | ||
79 | : "=&r" (temp), "=m" (v->counter) | 83 | : "=&r" (temp), "=m" (v->counter) |
80 | : "Ir" (i), "m" (v->counter)); | 84 | : "Ir" (i), "m" (v->counter)); |
81 | } else { | 85 | } else { |
@@ -100,20 +104,24 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
100 | unsigned long temp; | 104 | unsigned long temp; |
101 | 105 | ||
102 | __asm__ __volatile__( | 106 | __asm__ __volatile__( |
107 | " .set mips2 \n" | ||
103 | "1: ll %0, %1 # atomic_sub \n" | 108 | "1: ll %0, %1 # atomic_sub \n" |
104 | " subu %0, %2 \n" | 109 | " subu %0, %2 \n" |
105 | " sc %0, %1 \n" | 110 | " sc %0, %1 \n" |
106 | " beqzl %0, 1b \n" | 111 | " beqzl %0, 1b \n" |
112 | " .set mips0 \n" | ||
107 | : "=&r" (temp), "=m" (v->counter) | 113 | : "=&r" (temp), "=m" (v->counter) |
108 | : "Ir" (i), "m" (v->counter)); | 114 | : "Ir" (i), "m" (v->counter)); |
109 | } else if (cpu_has_llsc) { | 115 | } else if (cpu_has_llsc) { |
110 | unsigned long temp; | 116 | unsigned long temp; |
111 | 117 | ||
112 | __asm__ __volatile__( | 118 | __asm__ __volatile__( |
119 | " .set mips2 \n" | ||
113 | "1: ll %0, %1 # atomic_sub \n" | 120 | "1: ll %0, %1 # atomic_sub \n" |
114 | " subu %0, %2 \n" | 121 | " subu %0, %2 \n" |
115 | " sc %0, %1 \n" | 122 | " sc %0, %1 \n" |
116 | " beqz %0, 1b \n" | 123 | " beqz %0, 1b \n" |
124 | " .set mips0 \n" | ||
117 | : "=&r" (temp), "=m" (v->counter) | 125 | : "=&r" (temp), "=m" (v->counter) |
118 | : "Ir" (i), "m" (v->counter)); | 126 | : "Ir" (i), "m" (v->counter)); |
119 | } else { | 127 | } else { |
@@ -136,12 +144,14 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
136 | unsigned long temp; | 144 | unsigned long temp; |
137 | 145 | ||
138 | __asm__ __volatile__( | 146 | __asm__ __volatile__( |
147 | " .set mips2 \n" | ||
139 | "1: ll %1, %2 # atomic_add_return \n" | 148 | "1: ll %1, %2 # atomic_add_return \n" |
140 | " addu %0, %1, %3 \n" | 149 | " addu %0, %1, %3 \n" |
141 | " sc %0, %2 \n" | 150 | " sc %0, %2 \n" |
142 | " beqzl %0, 1b \n" | 151 | " beqzl %0, 1b \n" |
143 | " addu %0, %1, %3 \n" | 152 | " addu %0, %1, %3 \n" |
144 | " sync \n" | 153 | " sync \n" |
154 | " .set mips0 \n" | ||
145 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 155 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
146 | : "Ir" (i), "m" (v->counter) | 156 | : "Ir" (i), "m" (v->counter) |
147 | : "memory"); | 157 | : "memory"); |
@@ -149,12 +159,14 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
149 | unsigned long temp; | 159 | unsigned long temp; |
150 | 160 | ||
151 | __asm__ __volatile__( | 161 | __asm__ __volatile__( |
162 | " .set mips2 \n" | ||
152 | "1: ll %1, %2 # atomic_add_return \n" | 163 | "1: ll %1, %2 # atomic_add_return \n" |
153 | " addu %0, %1, %3 \n" | 164 | " addu %0, %1, %3 \n" |
154 | " sc %0, %2 \n" | 165 | " sc %0, %2 \n" |
155 | " beqz %0, 1b \n" | 166 | " beqz %0, 1b \n" |
156 | " addu %0, %1, %3 \n" | 167 | " addu %0, %1, %3 \n" |
157 | " sync \n" | 168 | " sync \n" |
169 | " .set mips0 \n" | ||
158 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 170 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
159 | : "Ir" (i), "m" (v->counter) | 171 | : "Ir" (i), "m" (v->counter) |
160 | : "memory"); | 172 | : "memory"); |
@@ -179,12 +191,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
179 | unsigned long temp; | 191 | unsigned long temp; |
180 | 192 | ||
181 | __asm__ __volatile__( | 193 | __asm__ __volatile__( |
194 | " .set mips2 \n" | ||
182 | "1: ll %1, %2 # atomic_sub_return \n" | 195 | "1: ll %1, %2 # atomic_sub_return \n" |
183 | " subu %0, %1, %3 \n" | 196 | " subu %0, %1, %3 \n" |
184 | " sc %0, %2 \n" | 197 | " sc %0, %2 \n" |
185 | " beqzl %0, 1b \n" | 198 | " beqzl %0, 1b \n" |
186 | " subu %0, %1, %3 \n" | 199 | " subu %0, %1, %3 \n" |
187 | " sync \n" | 200 | " sync \n" |
201 | " .set mips0 \n" | ||
188 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 202 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
189 | : "Ir" (i), "m" (v->counter) | 203 | : "Ir" (i), "m" (v->counter) |
190 | : "memory"); | 204 | : "memory"); |
@@ -192,12 +206,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
192 | unsigned long temp; | 206 | unsigned long temp; |
193 | 207 | ||
194 | __asm__ __volatile__( | 208 | __asm__ __volatile__( |
209 | " .set mips2 \n" | ||
195 | "1: ll %1, %2 # atomic_sub_return \n" | 210 | "1: ll %1, %2 # atomic_sub_return \n" |
196 | " subu %0, %1, %3 \n" | 211 | " subu %0, %1, %3 \n" |
197 | " sc %0, %2 \n" | 212 | " sc %0, %2 \n" |
198 | " beqz %0, 1b \n" | 213 | " beqz %0, 1b \n" |
199 | " subu %0, %1, %3 \n" | 214 | " subu %0, %1, %3 \n" |
200 | " sync \n" | 215 | " sync \n" |
216 | " .set mips0 \n" | ||
201 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 217 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
202 | : "Ir" (i), "m" (v->counter) | 218 | : "Ir" (i), "m" (v->counter) |
203 | : "memory"); | 219 | : "memory"); |
@@ -229,6 +245,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
229 | unsigned long temp; | 245 | unsigned long temp; |
230 | 246 | ||
231 | __asm__ __volatile__( | 247 | __asm__ __volatile__( |
248 | " .set mips2 \n" | ||
232 | "1: ll %1, %2 # atomic_sub_if_positive\n" | 249 | "1: ll %1, %2 # atomic_sub_if_positive\n" |
233 | " subu %0, %1, %3 \n" | 250 | " subu %0, %1, %3 \n" |
234 | " bltz %0, 1f \n" | 251 | " bltz %0, 1f \n" |
@@ -236,6 +253,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
236 | " beqzl %0, 1b \n" | 253 | " beqzl %0, 1b \n" |
237 | " sync \n" | 254 | " sync \n" |
238 | "1: \n" | 255 | "1: \n" |
256 | " .set mips0 \n" | ||
239 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 257 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
240 | : "Ir" (i), "m" (v->counter) | 258 | : "Ir" (i), "m" (v->counter) |
241 | : "memory"); | 259 | : "memory"); |
@@ -243,6 +261,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
243 | unsigned long temp; | 261 | unsigned long temp; |
244 | 262 | ||
245 | __asm__ __volatile__( | 263 | __asm__ __volatile__( |
264 | " .set mips2 \n" | ||
246 | "1: ll %1, %2 # atomic_sub_if_positive\n" | 265 | "1: ll %1, %2 # atomic_sub_if_positive\n" |
247 | " subu %0, %1, %3 \n" | 266 | " subu %0, %1, %3 \n" |
248 | " bltz %0, 1f \n" | 267 | " bltz %0, 1f \n" |
@@ -250,6 +269,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
250 | " beqz %0, 1b \n" | 269 | " beqz %0, 1b \n" |
251 | " sync \n" | 270 | " sync \n" |
252 | "1: \n" | 271 | "1: \n" |
272 | " .set mips0 \n" | ||
253 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 273 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
254 | : "Ir" (i), "m" (v->counter) | 274 | : "Ir" (i), "m" (v->counter) |
255 | : "memory"); | 275 | : "memory"); |
@@ -367,20 +387,24 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
367 | unsigned long temp; | 387 | unsigned long temp; |
368 | 388 | ||
369 | __asm__ __volatile__( | 389 | __asm__ __volatile__( |
390 | " .set mips3 \n" | ||
370 | "1: lld %0, %1 # atomic64_add \n" | 391 | "1: lld %0, %1 # atomic64_add \n" |
371 | " addu %0, %2 \n" | 392 | " addu %0, %2 \n" |
372 | " scd %0, %1 \n" | 393 | " scd %0, %1 \n" |
373 | " beqzl %0, 1b \n" | 394 | " beqzl %0, 1b \n" |
395 | " .set mips0 \n" | ||
374 | : "=&r" (temp), "=m" (v->counter) | 396 | : "=&r" (temp), "=m" (v->counter) |
375 | : "Ir" (i), "m" (v->counter)); | 397 | : "Ir" (i), "m" (v->counter)); |
376 | } else if (cpu_has_llsc) { | 398 | } else if (cpu_has_llsc) { |
377 | unsigned long temp; | 399 | unsigned long temp; |
378 | 400 | ||
379 | __asm__ __volatile__( | 401 | __asm__ __volatile__( |
402 | " .set mips3 \n" | ||
380 | "1: lld %0, %1 # atomic64_add \n" | 403 | "1: lld %0, %1 # atomic64_add \n" |
381 | " addu %0, %2 \n" | 404 | " addu %0, %2 \n" |
382 | " scd %0, %1 \n" | 405 | " scd %0, %1 \n" |
383 | " beqz %0, 1b \n" | 406 | " beqz %0, 1b \n" |
407 | " .set mips0 \n" | ||
384 | : "=&r" (temp), "=m" (v->counter) | 408 | : "=&r" (temp), "=m" (v->counter) |
385 | : "Ir" (i), "m" (v->counter)); | 409 | : "Ir" (i), "m" (v->counter)); |
386 | } else { | 410 | } else { |
@@ -405,20 +429,24 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
405 | unsigned long temp; | 429 | unsigned long temp; |
406 | 430 | ||
407 | __asm__ __volatile__( | 431 | __asm__ __volatile__( |
432 | " .set mips3 \n" | ||
408 | "1: lld %0, %1 # atomic64_sub \n" | 433 | "1: lld %0, %1 # atomic64_sub \n" |
409 | " subu %0, %2 \n" | 434 | " subu %0, %2 \n" |
410 | " scd %0, %1 \n" | 435 | " scd %0, %1 \n" |
411 | " beqzl %0, 1b \n" | 436 | " beqzl %0, 1b \n" |
437 | " .set mips0 \n" | ||
412 | : "=&r" (temp), "=m" (v->counter) | 438 | : "=&r" (temp), "=m" (v->counter) |
413 | : "Ir" (i), "m" (v->counter)); | 439 | : "Ir" (i), "m" (v->counter)); |
414 | } else if (cpu_has_llsc) { | 440 | } else if (cpu_has_llsc) { |
415 | unsigned long temp; | 441 | unsigned long temp; |
416 | 442 | ||
417 | __asm__ __volatile__( | 443 | __asm__ __volatile__( |
444 | " .set mips3 \n" | ||
418 | "1: lld %0, %1 # atomic64_sub \n" | 445 | "1: lld %0, %1 # atomic64_sub \n" |
419 | " subu %0, %2 \n" | 446 | " subu %0, %2 \n" |
420 | " scd %0, %1 \n" | 447 | " scd %0, %1 \n" |
421 | " beqz %0, 1b \n" | 448 | " beqz %0, 1b \n" |
449 | " .set mips0 \n" | ||
422 | : "=&r" (temp), "=m" (v->counter) | 450 | : "=&r" (temp), "=m" (v->counter) |
423 | : "Ir" (i), "m" (v->counter)); | 451 | : "Ir" (i), "m" (v->counter)); |
424 | } else { | 452 | } else { |
@@ -441,12 +469,14 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
441 | unsigned long temp; | 469 | unsigned long temp; |
442 | 470 | ||
443 | __asm__ __volatile__( | 471 | __asm__ __volatile__( |
472 | " .set mips3 \n" | ||
444 | "1: lld %1, %2 # atomic64_add_return \n" | 473 | "1: lld %1, %2 # atomic64_add_return \n" |
445 | " addu %0, %1, %3 \n" | 474 | " addu %0, %1, %3 \n" |
446 | " scd %0, %2 \n" | 475 | " scd %0, %2 \n" |
447 | " beqzl %0, 1b \n" | 476 | " beqzl %0, 1b \n" |
448 | " addu %0, %1, %3 \n" | 477 | " addu %0, %1, %3 \n" |
449 | " sync \n" | 478 | " sync \n" |
479 | " .set mips0 \n" | ||
450 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 480 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
451 | : "Ir" (i), "m" (v->counter) | 481 | : "Ir" (i), "m" (v->counter) |
452 | : "memory"); | 482 | : "memory"); |
@@ -454,12 +484,14 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
454 | unsigned long temp; | 484 | unsigned long temp; |
455 | 485 | ||
456 | __asm__ __volatile__( | 486 | __asm__ __volatile__( |
487 | " .set mips3 \n" | ||
457 | "1: lld %1, %2 # atomic64_add_return \n" | 488 | "1: lld %1, %2 # atomic64_add_return \n" |
458 | " addu %0, %1, %3 \n" | 489 | " addu %0, %1, %3 \n" |
459 | " scd %0, %2 \n" | 490 | " scd %0, %2 \n" |
460 | " beqz %0, 1b \n" | 491 | " beqz %0, 1b \n" |
461 | " addu %0, %1, %3 \n" | 492 | " addu %0, %1, %3 \n" |
462 | " sync \n" | 493 | " sync \n" |
494 | " .set mips0 \n" | ||
463 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 495 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
464 | : "Ir" (i), "m" (v->counter) | 496 | : "Ir" (i), "m" (v->counter) |
465 | : "memory"); | 497 | : "memory"); |
@@ -484,12 +516,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
484 | unsigned long temp; | 516 | unsigned long temp; |
485 | 517 | ||
486 | __asm__ __volatile__( | 518 | __asm__ __volatile__( |
519 | " .set mips3 \n" | ||
487 | "1: lld %1, %2 # atomic64_sub_return \n" | 520 | "1: lld %1, %2 # atomic64_sub_return \n" |
488 | " subu %0, %1, %3 \n" | 521 | " subu %0, %1, %3 \n" |
489 | " scd %0, %2 \n" | 522 | " scd %0, %2 \n" |
490 | " beqzl %0, 1b \n" | 523 | " beqzl %0, 1b \n" |
491 | " subu %0, %1, %3 \n" | 524 | " subu %0, %1, %3 \n" |
492 | " sync \n" | 525 | " sync \n" |
526 | " .set mips0 \n" | ||
493 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 527 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
494 | : "Ir" (i), "m" (v->counter) | 528 | : "Ir" (i), "m" (v->counter) |
495 | : "memory"); | 529 | : "memory"); |
@@ -497,12 +531,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
497 | unsigned long temp; | 531 | unsigned long temp; |
498 | 532 | ||
499 | __asm__ __volatile__( | 533 | __asm__ __volatile__( |
534 | " .set mips3 \n" | ||
500 | "1: lld %1, %2 # atomic64_sub_return \n" | 535 | "1: lld %1, %2 # atomic64_sub_return \n" |
501 | " subu %0, %1, %3 \n" | 536 | " subu %0, %1, %3 \n" |
502 | " scd %0, %2 \n" | 537 | " scd %0, %2 \n" |
503 | " beqz %0, 1b \n" | 538 | " beqz %0, 1b \n" |
504 | " subu %0, %1, %3 \n" | 539 | " subu %0, %1, %3 \n" |
505 | " sync \n" | 540 | " sync \n" |
541 | " .set mips0 \n" | ||
506 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 542 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
507 | : "Ir" (i), "m" (v->counter) | 543 | : "Ir" (i), "m" (v->counter) |
508 | : "memory"); | 544 | : "memory"); |
@@ -534,6 +570,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
534 | unsigned long temp; | 570 | unsigned long temp; |
535 | 571 | ||
536 | __asm__ __volatile__( | 572 | __asm__ __volatile__( |
573 | " .set mips3 \n" | ||
537 | "1: lld %1, %2 # atomic64_sub_if_positive\n" | 574 | "1: lld %1, %2 # atomic64_sub_if_positive\n" |
538 | " dsubu %0, %1, %3 \n" | 575 | " dsubu %0, %1, %3 \n" |
539 | " bltz %0, 1f \n" | 576 | " bltz %0, 1f \n" |
@@ -541,6 +578,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
541 | " beqzl %0, 1b \n" | 578 | " beqzl %0, 1b \n" |
542 | " sync \n" | 579 | " sync \n" |
543 | "1: \n" | 580 | "1: \n" |
581 | " .set mips0 \n" | ||
544 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 582 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
545 | : "Ir" (i), "m" (v->counter) | 583 | : "Ir" (i), "m" (v->counter) |
546 | : "memory"); | 584 | : "memory"); |
@@ -548,6 +586,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
548 | unsigned long temp; | 586 | unsigned long temp; |
549 | 587 | ||
550 | __asm__ __volatile__( | 588 | __asm__ __volatile__( |
589 | " .set mips3 \n" | ||
551 | "1: lld %1, %2 # atomic64_sub_if_positive\n" | 590 | "1: lld %1, %2 # atomic64_sub_if_positive\n" |
552 | " dsubu %0, %1, %3 \n" | 591 | " dsubu %0, %1, %3 \n" |
553 | " bltz %0, 1f \n" | 592 | " bltz %0, 1f \n" |
@@ -555,6 +594,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
555 | " beqz %0, 1b \n" | 594 | " beqz %0, 1b \n" |
556 | " sync \n" | 595 | " sync \n" |
557 | "1: \n" | 596 | "1: \n" |
597 | " .set mips0 \n" | ||
558 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 598 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
559 | : "Ir" (i), "m" (v->counter) | 599 | : "Ir" (i), "m" (v->counter) |
560 | : "memory"); | 600 | : "memory"); |
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index eb8d79dba11c..1dc35879b362 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h | |||
@@ -18,14 +18,16 @@ | |||
18 | #if (_MIPS_SZLONG == 32) | 18 | #if (_MIPS_SZLONG == 32) |
19 | #define SZLONG_LOG 5 | 19 | #define SZLONG_LOG 5 |
20 | #define SZLONG_MASK 31UL | 20 | #define SZLONG_MASK 31UL |
21 | #define __LL "ll " | 21 | #define __LL "ll " |
22 | #define __SC "sc " | 22 | #define __SC "sc " |
23 | #define __SET_MIPS ".set mips2 " | ||
23 | #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) | 24 | #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) |
24 | #elif (_MIPS_SZLONG == 64) | 25 | #elif (_MIPS_SZLONG == 64) |
25 | #define SZLONG_LOG 6 | 26 | #define SZLONG_LOG 6 |
26 | #define SZLONG_MASK 63UL | 27 | #define SZLONG_MASK 63UL |
27 | #define __LL "lld " | 28 | #define __LL "lld " |
28 | #define __SC "scd " | 29 | #define __SC "scd " |
30 | #define __SET_MIPS ".set mips3 " | ||
29 | #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) | 31 | #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) |
30 | #endif | 32 | #endif |
31 | 33 | ||
@@ -72,18 +74,22 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
72 | 74 | ||
73 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 75 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
74 | __asm__ __volatile__( | 76 | __asm__ __volatile__( |
77 | " " __SET_MIPS " \n" | ||
75 | "1: " __LL "%0, %1 # set_bit \n" | 78 | "1: " __LL "%0, %1 # set_bit \n" |
76 | " or %0, %2 \n" | 79 | " or %0, %2 \n" |
77 | " "__SC "%0, %1 \n" | 80 | " " __SC "%0, %1 \n" |
78 | " beqzl %0, 1b \n" | 81 | " beqzl %0, 1b \n" |
82 | " .set mips0 \n" | ||
79 | : "=&r" (temp), "=m" (*m) | 83 | : "=&r" (temp), "=m" (*m) |
80 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 84 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
81 | } else if (cpu_has_llsc) { | 85 | } else if (cpu_has_llsc) { |
82 | __asm__ __volatile__( | 86 | __asm__ __volatile__( |
87 | " " __SET_MIPS " \n" | ||
83 | "1: " __LL "%0, %1 # set_bit \n" | 88 | "1: " __LL "%0, %1 # set_bit \n" |
84 | " or %0, %2 \n" | 89 | " or %0, %2 \n" |
85 | " "__SC "%0, %1 \n" | 90 | " " __SC "%0, %1 \n" |
86 | " beqz %0, 1b \n" | 91 | " beqz %0, 1b \n" |
92 | " .set mips0 \n" | ||
87 | : "=&r" (temp), "=m" (*m) | 93 | : "=&r" (temp), "=m" (*m) |
88 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 94 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
89 | } else { | 95 | } else { |
@@ -132,18 +138,22 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
132 | 138 | ||
133 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 139 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
134 | __asm__ __volatile__( | 140 | __asm__ __volatile__( |
141 | " " __SET_MIPS " \n" | ||
135 | "1: " __LL "%0, %1 # clear_bit \n" | 142 | "1: " __LL "%0, %1 # clear_bit \n" |
136 | " and %0, %2 \n" | 143 | " and %0, %2 \n" |
137 | " " __SC "%0, %1 \n" | 144 | " " __SC "%0, %1 \n" |
138 | " beqzl %0, 1b \n" | 145 | " beqzl %0, 1b \n" |
146 | " .set mips0 \n" | ||
139 | : "=&r" (temp), "=m" (*m) | 147 | : "=&r" (temp), "=m" (*m) |
140 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | 148 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); |
141 | } else if (cpu_has_llsc) { | 149 | } else if (cpu_has_llsc) { |
142 | __asm__ __volatile__( | 150 | __asm__ __volatile__( |
151 | " " __SET_MIPS " \n" | ||
143 | "1: " __LL "%0, %1 # clear_bit \n" | 152 | "1: " __LL "%0, %1 # clear_bit \n" |
144 | " and %0, %2 \n" | 153 | " and %0, %2 \n" |
145 | " " __SC "%0, %1 \n" | 154 | " " __SC "%0, %1 \n" |
146 | " beqz %0, 1b \n" | 155 | " beqz %0, 1b \n" |
156 | " .set mips0 \n" | ||
147 | : "=&r" (temp), "=m" (*m) | 157 | : "=&r" (temp), "=m" (*m) |
148 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | 158 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); |
149 | } else { | 159 | } else { |
@@ -191,10 +201,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
191 | unsigned long temp; | 201 | unsigned long temp; |
192 | 202 | ||
193 | __asm__ __volatile__( | 203 | __asm__ __volatile__( |
204 | " " __SET_MIPS " \n" | ||
194 | "1: " __LL "%0, %1 # change_bit \n" | 205 | "1: " __LL "%0, %1 # change_bit \n" |
195 | " xor %0, %2 \n" | 206 | " xor %0, %2 \n" |
196 | " "__SC "%0, %1 \n" | 207 | " " __SC "%0, %1 \n" |
197 | " beqzl %0, 1b \n" | 208 | " beqzl %0, 1b \n" |
209 | " .set mips0 \n" | ||
198 | : "=&r" (temp), "=m" (*m) | 210 | : "=&r" (temp), "=m" (*m) |
199 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 211 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
200 | } else if (cpu_has_llsc) { | 212 | } else if (cpu_has_llsc) { |
@@ -202,10 +214,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
202 | unsigned long temp; | 214 | unsigned long temp; |
203 | 215 | ||
204 | __asm__ __volatile__( | 216 | __asm__ __volatile__( |
217 | " " __SET_MIPS " \n" | ||
205 | "1: " __LL "%0, %1 # change_bit \n" | 218 | "1: " __LL "%0, %1 # change_bit \n" |
206 | " xor %0, %2 \n" | 219 | " xor %0, %2 \n" |
207 | " "__SC "%0, %1 \n" | 220 | " " __SC "%0, %1 \n" |
208 | " beqz %0, 1b \n" | 221 | " beqz %0, 1b \n" |
222 | " .set mips0 \n" | ||
209 | : "=&r" (temp), "=m" (*m) | 223 | : "=&r" (temp), "=m" (*m) |
210 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 224 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
211 | } else { | 225 | } else { |
@@ -253,14 +267,16 @@ static inline int test_and_set_bit(unsigned long nr, | |||
253 | unsigned long temp, res; | 267 | unsigned long temp, res; |
254 | 268 | ||
255 | __asm__ __volatile__( | 269 | __asm__ __volatile__( |
270 | " " __SET_MIPS " \n" | ||
256 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 271 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
257 | " or %2, %0, %3 \n" | 272 | " or %2, %0, %3 \n" |
258 | " " __SC "%2, %1 \n" | 273 | " " __SC "%2, %1 \n" |
259 | " beqzl %2, 1b \n" | 274 | " beqzl %2, 1b \n" |
260 | " and %2, %0, %3 \n" | 275 | " and %2, %0, %3 \n" |
261 | #ifdef CONFIG_SMP | 276 | #ifdef CONFIG_SMP |
262 | "sync \n" | 277 | " sync \n" |
263 | #endif | 278 | #endif |
279 | " .set mips0 \n" | ||
264 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 280 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
265 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 281 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
266 | : "memory"); | 282 | : "memory"); |
@@ -271,16 +287,18 @@ static inline int test_and_set_bit(unsigned long nr, | |||
271 | unsigned long temp, res; | 287 | unsigned long temp, res; |
272 | 288 | ||
273 | __asm__ __volatile__( | 289 | __asm__ __volatile__( |
274 | " .set noreorder # test_and_set_bit \n" | 290 | " .set push \n" |
275 | "1: " __LL "%0, %1 \n" | 291 | " .set noreorder \n" |
292 | " " __SET_MIPS " \n" | ||
293 | "1: " __LL "%0, %1 # test_and_set_bit \n" | ||
276 | " or %2, %0, %3 \n" | 294 | " or %2, %0, %3 \n" |
277 | " " __SC "%2, %1 \n" | 295 | " " __SC "%2, %1 \n" |
278 | " beqz %2, 1b \n" | 296 | " beqz %2, 1b \n" |
279 | " and %2, %0, %3 \n" | 297 | " and %2, %0, %3 \n" |
280 | #ifdef CONFIG_SMP | 298 | #ifdef CONFIG_SMP |
281 | "sync \n" | 299 | " sync \n" |
282 | #endif | 300 | #endif |
283 | ".set\treorder" | 301 | " .set pop \n" |
284 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 302 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
285 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 303 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
286 | : "memory"); | 304 | : "memory"); |
@@ -343,15 +361,17 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
343 | unsigned long temp, res; | 361 | unsigned long temp, res; |
344 | 362 | ||
345 | __asm__ __volatile__( | 363 | __asm__ __volatile__( |
364 | " " __SET_MIPS " \n" | ||
346 | "1: " __LL "%0, %1 # test_and_clear_bit \n" | 365 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
347 | " or %2, %0, %3 \n" | 366 | " or %2, %0, %3 \n" |
348 | " xor %2, %3 \n" | 367 | " xor %2, %3 \n" |
349 | __SC "%2, %1 \n" | 368 | " " __SC "%2, %1 \n" |
350 | " beqzl %2, 1b \n" | 369 | " beqzl %2, 1b \n" |
351 | " and %2, %0, %3 \n" | 370 | " and %2, %0, %3 \n" |
352 | #ifdef CONFIG_SMP | 371 | #ifdef CONFIG_SMP |
353 | " sync \n" | 372 | " sync \n" |
354 | #endif | 373 | #endif |
374 | " .set mips0 \n" | ||
355 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 375 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
356 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 376 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
357 | : "memory"); | 377 | : "memory"); |
@@ -362,17 +382,19 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
362 | unsigned long temp, res; | 382 | unsigned long temp, res; |
363 | 383 | ||
364 | __asm__ __volatile__( | 384 | __asm__ __volatile__( |
365 | " .set noreorder # test_and_clear_bit \n" | 385 | " .set push \n" |
366 | "1: " __LL "%0, %1 \n" | 386 | " .set noreorder \n" |
387 | " " __SET_MIPS " \n" | ||
388 | "1: " __LL "%0, %1 # test_and_clear_bit \n" | ||
367 | " or %2, %0, %3 \n" | 389 | " or %2, %0, %3 \n" |
368 | " xor %2, %3 \n" | 390 | " xor %2, %3 \n" |
369 | __SC "%2, %1 \n" | 391 | " " __SC "%2, %1 \n" |
370 | " beqz %2, 1b \n" | 392 | " beqz %2, 1b \n" |
371 | " and %2, %0, %3 \n" | 393 | " and %2, %0, %3 \n" |
372 | #ifdef CONFIG_SMP | 394 | #ifdef CONFIG_SMP |
373 | " sync \n" | 395 | " sync \n" |
374 | #endif | 396 | #endif |
375 | " .set reorder \n" | 397 | " .set pop \n" |
376 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 398 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
377 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 399 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
378 | : "memory"); | 400 | : "memory"); |
@@ -435,14 +457,16 @@ static inline int test_and_change_bit(unsigned long nr, | |||
435 | unsigned long temp, res; | 457 | unsigned long temp, res; |
436 | 458 | ||
437 | __asm__ __volatile__( | 459 | __asm__ __volatile__( |
438 | "1: " __LL " %0, %1 # test_and_change_bit \n" | 460 | " " __SET_MIPS " \n" |
461 | "1: " __LL "%0, %1 # test_and_change_bit \n" | ||
439 | " xor %2, %0, %3 \n" | 462 | " xor %2, %0, %3 \n" |
440 | " "__SC "%2, %1 \n" | 463 | " " __SC "%2, %1 \n" |
441 | " beqzl %2, 1b \n" | 464 | " beqzl %2, 1b \n" |
442 | " and %2, %0, %3 \n" | 465 | " and %2, %0, %3 \n" |
443 | #ifdef CONFIG_SMP | 466 | #ifdef CONFIG_SMP |
444 | " sync \n" | 467 | " sync \n" |
445 | #endif | 468 | #endif |
469 | " .set mips0 \n" | ||
446 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 470 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
447 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 471 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
448 | : "memory"); | 472 | : "memory"); |
@@ -453,16 +477,18 @@ static inline int test_and_change_bit(unsigned long nr, | |||
453 | unsigned long temp, res; | 477 | unsigned long temp, res; |
454 | 478 | ||
455 | __asm__ __volatile__( | 479 | __asm__ __volatile__( |
456 | " .set noreorder # test_and_change_bit \n" | 480 | " .set push \n" |
457 | "1: " __LL " %0, %1 \n" | 481 | " .set noreorder \n" |
482 | " " __SET_MIPS " \n" | ||
483 | "1: " __LL "%0, %1 # test_and_change_bit \n" | ||
458 | " xor %2, %0, %3 \n" | 484 | " xor %2, %0, %3 \n" |
459 | " "__SC "\t%2, %1 \n" | 485 | " " __SC "\t%2, %1 \n" |
460 | " beqz %2, 1b \n" | 486 | " beqz %2, 1b \n" |
461 | " and %2, %0, %3 \n" | 487 | " and %2, %0, %3 \n" |
462 | #ifdef CONFIG_SMP | 488 | #ifdef CONFIG_SMP |
463 | " sync \n" | 489 | " sync \n" |
464 | #endif | 490 | #endif |
465 | " .set reorder \n" | 491 | " .set pop \n" |
466 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 492 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
467 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 493 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
468 | : "memory"); | 494 | : "memory"); |
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index cd3a6bca7abd..ec29c9349e07 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -176,6 +176,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
176 | unsigned long dummy; | 176 | unsigned long dummy; |
177 | 177 | ||
178 | __asm__ __volatile__( | 178 | __asm__ __volatile__( |
179 | " .set mips2 \n" | ||
179 | "1: ll %0, %3 # xchg_u32 \n" | 180 | "1: ll %0, %3 # xchg_u32 \n" |
180 | " move %2, %z4 \n" | 181 | " move %2, %z4 \n" |
181 | " sc %2, %1 \n" | 182 | " sc %2, %1 \n" |
@@ -184,6 +185,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
184 | #ifdef CONFIG_SMP | 185 | #ifdef CONFIG_SMP |
185 | " sync \n" | 186 | " sync \n" |
186 | #endif | 187 | #endif |
188 | " .set mips0 \n" | ||
187 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 189 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
188 | : "R" (*m), "Jr" (val) | 190 | : "R" (*m), "Jr" (val) |
189 | : "memory"); | 191 | : "memory"); |
@@ -191,6 +193,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
191 | unsigned long dummy; | 193 | unsigned long dummy; |
192 | 194 | ||
193 | __asm__ __volatile__( | 195 | __asm__ __volatile__( |
196 | " .set mips2 \n" | ||
194 | "1: ll %0, %3 # xchg_u32 \n" | 197 | "1: ll %0, %3 # xchg_u32 \n" |
195 | " move %2, %z4 \n" | 198 | " move %2, %z4 \n" |
196 | " sc %2, %1 \n" | 199 | " sc %2, %1 \n" |
@@ -198,6 +201,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
198 | #ifdef CONFIG_SMP | 201 | #ifdef CONFIG_SMP |
199 | " sync \n" | 202 | " sync \n" |
200 | #endif | 203 | #endif |
204 | " .set mips0 \n" | ||
201 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 205 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
202 | : "R" (*m), "Jr" (val) | 206 | : "R" (*m), "Jr" (val) |
203 | : "memory"); | 207 | : "memory"); |
@@ -222,6 +226,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
222 | unsigned long dummy; | 226 | unsigned long dummy; |
223 | 227 | ||
224 | __asm__ __volatile__( | 228 | __asm__ __volatile__( |
229 | " .set mips3 \n" | ||
225 | "1: lld %0, %3 # xchg_u64 \n" | 230 | "1: lld %0, %3 # xchg_u64 \n" |
226 | " move %2, %z4 \n" | 231 | " move %2, %z4 \n" |
227 | " scd %2, %1 \n" | 232 | " scd %2, %1 \n" |
@@ -230,6 +235,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
230 | #ifdef CONFIG_SMP | 235 | #ifdef CONFIG_SMP |
231 | " sync \n" | 236 | " sync \n" |
232 | #endif | 237 | #endif |
238 | " .set mips0 \n" | ||
233 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 239 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
234 | : "R" (*m), "Jr" (val) | 240 | : "R" (*m), "Jr" (val) |
235 | : "memory"); | 241 | : "memory"); |
@@ -237,6 +243,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
237 | unsigned long dummy; | 243 | unsigned long dummy; |
238 | 244 | ||
239 | __asm__ __volatile__( | 245 | __asm__ __volatile__( |
246 | " .set mips3 \n" | ||
240 | "1: lld %0, %3 # xchg_u64 \n" | 247 | "1: lld %0, %3 # xchg_u64 \n" |
241 | " move %2, %z4 \n" | 248 | " move %2, %z4 \n" |
242 | " scd %2, %1 \n" | 249 | " scd %2, %1 \n" |
@@ -244,6 +251,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
244 | #ifdef CONFIG_SMP | 251 | #ifdef CONFIG_SMP |
245 | " sync \n" | 252 | " sync \n" |
246 | #endif | 253 | #endif |
254 | " .set mips0 \n" | ||
247 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 255 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
248 | : "R" (*m), "Jr" (val) | 256 | : "R" (*m), "Jr" (val) |
249 | : "memory"); | 257 | : "memory"); |
@@ -291,7 +299,9 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
291 | 299 | ||
292 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 300 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
293 | __asm__ __volatile__( | 301 | __asm__ __volatile__( |
302 | " .set push \n" | ||
294 | " .set noat \n" | 303 | " .set noat \n" |
304 | " .set mips2 \n" | ||
295 | "1: ll %0, %2 # __cmpxchg_u32 \n" | 305 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
296 | " bne %0, %z3, 2f \n" | 306 | " bne %0, %z3, 2f \n" |
297 | " move $1, %z4 \n" | 307 | " move $1, %z4 \n" |
@@ -302,13 +312,15 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
302 | " sync \n" | 312 | " sync \n" |
303 | #endif | 313 | #endif |
304 | "2: \n" | 314 | "2: \n" |
305 | " .set at \n" | 315 | " .set pop \n" |
306 | : "=&r" (retval), "=m" (*m) | 316 | : "=&r" (retval), "=m" (*m) |
307 | : "R" (*m), "Jr" (old), "Jr" (new) | 317 | : "R" (*m), "Jr" (old), "Jr" (new) |
308 | : "memory"); | 318 | : "memory"); |
309 | } else if (cpu_has_llsc) { | 319 | } else if (cpu_has_llsc) { |
310 | __asm__ __volatile__( | 320 | __asm__ __volatile__( |
321 | " .set push \n" | ||
311 | " .set noat \n" | 322 | " .set noat \n" |
323 | " .set mips2 \n" | ||
312 | "1: ll %0, %2 # __cmpxchg_u32 \n" | 324 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
313 | " bne %0, %z3, 2f \n" | 325 | " bne %0, %z3, 2f \n" |
314 | " move $1, %z4 \n" | 326 | " move $1, %z4 \n" |
@@ -318,7 +330,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
318 | " sync \n" | 330 | " sync \n" |
319 | #endif | 331 | #endif |
320 | "2: \n" | 332 | "2: \n" |
321 | " .set at \n" | 333 | " .set pop \n" |
322 | : "=&r" (retval), "=m" (*m) | 334 | : "=&r" (retval), "=m" (*m) |
323 | : "R" (*m), "Jr" (old), "Jr" (new) | 335 | : "R" (*m), "Jr" (old), "Jr" (new) |
324 | : "memory"); | 336 | : "memory"); |
@@ -343,7 +355,9 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
343 | 355 | ||
344 | if (cpu_has_llsc) { | 356 | if (cpu_has_llsc) { |
345 | __asm__ __volatile__( | 357 | __asm__ __volatile__( |
358 | " .set push \n" | ||
346 | " .set noat \n" | 359 | " .set noat \n" |
360 | " .set mips3 \n" | ||
347 | "1: lld %0, %2 # __cmpxchg_u64 \n" | 361 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
348 | " bne %0, %z3, 2f \n" | 362 | " bne %0, %z3, 2f \n" |
349 | " move $1, %z4 \n" | 363 | " move $1, %z4 \n" |
@@ -354,13 +368,15 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
354 | " sync \n" | 368 | " sync \n" |
355 | #endif | 369 | #endif |
356 | "2: \n" | 370 | "2: \n" |
357 | " .set at \n" | 371 | " .set pop \n" |
358 | : "=&r" (retval), "=m" (*m) | 372 | : "=&r" (retval), "=m" (*m) |
359 | : "R" (*m), "Jr" (old), "Jr" (new) | 373 | : "R" (*m), "Jr" (old), "Jr" (new) |
360 | : "memory"); | 374 | : "memory"); |
361 | } else if (cpu_has_llsc) { | 375 | } else if (cpu_has_llsc) { |
362 | __asm__ __volatile__( | 376 | __asm__ __volatile__( |
377 | " .set push \n" | ||
363 | " .set noat \n" | 378 | " .set noat \n" |
379 | " .set mips2 \n" | ||
364 | "1: lld %0, %2 # __cmpxchg_u64 \n" | 380 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
365 | " bne %0, %z3, 2f \n" | 381 | " bne %0, %z3, 2f \n" |
366 | " move $1, %z4 \n" | 382 | " move $1, %z4 \n" |
@@ -370,7 +386,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
370 | " sync \n" | 386 | " sync \n" |
371 | #endif | 387 | #endif |
372 | "2: \n" | 388 | "2: \n" |
373 | " .set at \n" | 389 | " .set pop \n" |
374 | : "=&r" (retval), "=m" (*m) | 390 | : "=&r" (retval), "=m" (*m) |
375 | : "R" (*m), "Jr" (old), "Jr" (new) | 391 | : "R" (*m), "Jr" (old), "Jr" (new) |
376 | : "memory"); | 392 | : "memory"); |