diff options
author | Maciej W. Rozycki <macro@linux-mips.org> | 2005-06-14 13:35:03 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2005-10-29 14:31:22 -0400 |
commit | aac8aa7717a23a9bf8740dbfb59755b1d62f04bf (patch) | |
tree | cae373db64607dafc496827c0d2f3b67b91d880f /include/asm-mips/atomic.h | |
parent | fded2e508a1d3c26ab477ab3b98f13274d4359ba (diff) |
Enable a suitable ISA for the assembler around ll/sc so that code
builds even for processors that don't support the instructions.
Plus minor formatting fixes.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/atomic.h')
-rw-r--r-- | include/asm-mips/atomic.h | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index c0bd8d014e14..80ea3fbd3ece 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h | |||
@@ -62,20 +62,24 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
62 | unsigned long temp; | 62 | unsigned long temp; |
63 | 63 | ||
64 | __asm__ __volatile__( | 64 | __asm__ __volatile__( |
65 | " .set mips2 \n" | ||
65 | "1: ll %0, %1 # atomic_add \n" | 66 | "1: ll %0, %1 # atomic_add \n" |
66 | " addu %0, %2 \n" | 67 | " addu %0, %2 \n" |
67 | " sc %0, %1 \n" | 68 | " sc %0, %1 \n" |
68 | " beqzl %0, 1b \n" | 69 | " beqzl %0, 1b \n" |
70 | " .set mips0 \n" | ||
69 | : "=&r" (temp), "=m" (v->counter) | 71 | : "=&r" (temp), "=m" (v->counter) |
70 | : "Ir" (i), "m" (v->counter)); | 72 | : "Ir" (i), "m" (v->counter)); |
71 | } else if (cpu_has_llsc) { | 73 | } else if (cpu_has_llsc) { |
72 | unsigned long temp; | 74 | unsigned long temp; |
73 | 75 | ||
74 | __asm__ __volatile__( | 76 | __asm__ __volatile__( |
77 | " .set mips2 \n" | ||
75 | "1: ll %0, %1 # atomic_add \n" | 78 | "1: ll %0, %1 # atomic_add \n" |
76 | " addu %0, %2 \n" | 79 | " addu %0, %2 \n" |
77 | " sc %0, %1 \n" | 80 | " sc %0, %1 \n" |
78 | " beqz %0, 1b \n" | 81 | " beqz %0, 1b \n" |
82 | " .set mips0 \n" | ||
79 | : "=&r" (temp), "=m" (v->counter) | 83 | : "=&r" (temp), "=m" (v->counter) |
80 | : "Ir" (i), "m" (v->counter)); | 84 | : "Ir" (i), "m" (v->counter)); |
81 | } else { | 85 | } else { |
@@ -100,20 +104,24 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
100 | unsigned long temp; | 104 | unsigned long temp; |
101 | 105 | ||
102 | __asm__ __volatile__( | 106 | __asm__ __volatile__( |
107 | " .set mips2 \n" | ||
103 | "1: ll %0, %1 # atomic_sub \n" | 108 | "1: ll %0, %1 # atomic_sub \n" |
104 | " subu %0, %2 \n" | 109 | " subu %0, %2 \n" |
105 | " sc %0, %1 \n" | 110 | " sc %0, %1 \n" |
106 | " beqzl %0, 1b \n" | 111 | " beqzl %0, 1b \n" |
112 | " .set mips0 \n" | ||
107 | : "=&r" (temp), "=m" (v->counter) | 113 | : "=&r" (temp), "=m" (v->counter) |
108 | : "Ir" (i), "m" (v->counter)); | 114 | : "Ir" (i), "m" (v->counter)); |
109 | } else if (cpu_has_llsc) { | 115 | } else if (cpu_has_llsc) { |
110 | unsigned long temp; | 116 | unsigned long temp; |
111 | 117 | ||
112 | __asm__ __volatile__( | 118 | __asm__ __volatile__( |
119 | " .set mips2 \n" | ||
113 | "1: ll %0, %1 # atomic_sub \n" | 120 | "1: ll %0, %1 # atomic_sub \n" |
114 | " subu %0, %2 \n" | 121 | " subu %0, %2 \n" |
115 | " sc %0, %1 \n" | 122 | " sc %0, %1 \n" |
116 | " beqz %0, 1b \n" | 123 | " beqz %0, 1b \n" |
124 | " .set mips0 \n" | ||
117 | : "=&r" (temp), "=m" (v->counter) | 125 | : "=&r" (temp), "=m" (v->counter) |
118 | : "Ir" (i), "m" (v->counter)); | 126 | : "Ir" (i), "m" (v->counter)); |
119 | } else { | 127 | } else { |
@@ -136,12 +144,14 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
136 | unsigned long temp; | 144 | unsigned long temp; |
137 | 145 | ||
138 | __asm__ __volatile__( | 146 | __asm__ __volatile__( |
147 | " .set mips2 \n" | ||
139 | "1: ll %1, %2 # atomic_add_return \n" | 148 | "1: ll %1, %2 # atomic_add_return \n" |
140 | " addu %0, %1, %3 \n" | 149 | " addu %0, %1, %3 \n" |
141 | " sc %0, %2 \n" | 150 | " sc %0, %2 \n" |
142 | " beqzl %0, 1b \n" | 151 | " beqzl %0, 1b \n" |
143 | " addu %0, %1, %3 \n" | 152 | " addu %0, %1, %3 \n" |
144 | " sync \n" | 153 | " sync \n" |
154 | " .set mips0 \n" | ||
145 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 155 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
146 | : "Ir" (i), "m" (v->counter) | 156 | : "Ir" (i), "m" (v->counter) |
147 | : "memory"); | 157 | : "memory"); |
@@ -149,12 +159,14 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
149 | unsigned long temp; | 159 | unsigned long temp; |
150 | 160 | ||
151 | __asm__ __volatile__( | 161 | __asm__ __volatile__( |
162 | " .set mips2 \n" | ||
152 | "1: ll %1, %2 # atomic_add_return \n" | 163 | "1: ll %1, %2 # atomic_add_return \n" |
153 | " addu %0, %1, %3 \n" | 164 | " addu %0, %1, %3 \n" |
154 | " sc %0, %2 \n" | 165 | " sc %0, %2 \n" |
155 | " beqz %0, 1b \n" | 166 | " beqz %0, 1b \n" |
156 | " addu %0, %1, %3 \n" | 167 | " addu %0, %1, %3 \n" |
157 | " sync \n" | 168 | " sync \n" |
169 | " .set mips0 \n" | ||
158 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 170 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
159 | : "Ir" (i), "m" (v->counter) | 171 | : "Ir" (i), "m" (v->counter) |
160 | : "memory"); | 172 | : "memory"); |
@@ -179,12 +191,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
179 | unsigned long temp; | 191 | unsigned long temp; |
180 | 192 | ||
181 | __asm__ __volatile__( | 193 | __asm__ __volatile__( |
194 | " .set mips2 \n" | ||
182 | "1: ll %1, %2 # atomic_sub_return \n" | 195 | "1: ll %1, %2 # atomic_sub_return \n" |
183 | " subu %0, %1, %3 \n" | 196 | " subu %0, %1, %3 \n" |
184 | " sc %0, %2 \n" | 197 | " sc %0, %2 \n" |
185 | " beqzl %0, 1b \n" | 198 | " beqzl %0, 1b \n" |
186 | " subu %0, %1, %3 \n" | 199 | " subu %0, %1, %3 \n" |
187 | " sync \n" | 200 | " sync \n" |
201 | " .set mips0 \n" | ||
188 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 202 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
189 | : "Ir" (i), "m" (v->counter) | 203 | : "Ir" (i), "m" (v->counter) |
190 | : "memory"); | 204 | : "memory"); |
@@ -192,12 +206,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
192 | unsigned long temp; | 206 | unsigned long temp; |
193 | 207 | ||
194 | __asm__ __volatile__( | 208 | __asm__ __volatile__( |
209 | " .set mips2 \n" | ||
195 | "1: ll %1, %2 # atomic_sub_return \n" | 210 | "1: ll %1, %2 # atomic_sub_return \n" |
196 | " subu %0, %1, %3 \n" | 211 | " subu %0, %1, %3 \n" |
197 | " sc %0, %2 \n" | 212 | " sc %0, %2 \n" |
198 | " beqz %0, 1b \n" | 213 | " beqz %0, 1b \n" |
199 | " subu %0, %1, %3 \n" | 214 | " subu %0, %1, %3 \n" |
200 | " sync \n" | 215 | " sync \n" |
216 | " .set mips0 \n" | ||
201 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 217 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
202 | : "Ir" (i), "m" (v->counter) | 218 | : "Ir" (i), "m" (v->counter) |
203 | : "memory"); | 219 | : "memory"); |
@@ -229,6 +245,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
229 | unsigned long temp; | 245 | unsigned long temp; |
230 | 246 | ||
231 | __asm__ __volatile__( | 247 | __asm__ __volatile__( |
248 | " .set mips2 \n" | ||
232 | "1: ll %1, %2 # atomic_sub_if_positive\n" | 249 | "1: ll %1, %2 # atomic_sub_if_positive\n" |
233 | " subu %0, %1, %3 \n" | 250 | " subu %0, %1, %3 \n" |
234 | " bltz %0, 1f \n" | 251 | " bltz %0, 1f \n" |
@@ -236,6 +253,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
236 | " beqzl %0, 1b \n" | 253 | " beqzl %0, 1b \n" |
237 | " sync \n" | 254 | " sync \n" |
238 | "1: \n" | 255 | "1: \n" |
256 | " .set mips0 \n" | ||
239 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 257 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
240 | : "Ir" (i), "m" (v->counter) | 258 | : "Ir" (i), "m" (v->counter) |
241 | : "memory"); | 259 | : "memory"); |
@@ -243,6 +261,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
243 | unsigned long temp; | 261 | unsigned long temp; |
244 | 262 | ||
245 | __asm__ __volatile__( | 263 | __asm__ __volatile__( |
264 | " .set mips2 \n" | ||
246 | "1: ll %1, %2 # atomic_sub_if_positive\n" | 265 | "1: ll %1, %2 # atomic_sub_if_positive\n" |
247 | " subu %0, %1, %3 \n" | 266 | " subu %0, %1, %3 \n" |
248 | " bltz %0, 1f \n" | 267 | " bltz %0, 1f \n" |
@@ -250,6 +269,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
250 | " beqz %0, 1b \n" | 269 | " beqz %0, 1b \n" |
251 | " sync \n" | 270 | " sync \n" |
252 | "1: \n" | 271 | "1: \n" |
272 | " .set mips0 \n" | ||
253 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 273 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
254 | : "Ir" (i), "m" (v->counter) | 274 | : "Ir" (i), "m" (v->counter) |
255 | : "memory"); | 275 | : "memory"); |
@@ -367,20 +387,24 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
367 | unsigned long temp; | 387 | unsigned long temp; |
368 | 388 | ||
369 | __asm__ __volatile__( | 389 | __asm__ __volatile__( |
390 | " .set mips3 \n" | ||
370 | "1: lld %0, %1 # atomic64_add \n" | 391 | "1: lld %0, %1 # atomic64_add \n" |
371 | " addu %0, %2 \n" | 392 | " addu %0, %2 \n" |
372 | " scd %0, %1 \n" | 393 | " scd %0, %1 \n" |
373 | " beqzl %0, 1b \n" | 394 | " beqzl %0, 1b \n" |
395 | " .set mips0 \n" | ||
374 | : "=&r" (temp), "=m" (v->counter) | 396 | : "=&r" (temp), "=m" (v->counter) |
375 | : "Ir" (i), "m" (v->counter)); | 397 | : "Ir" (i), "m" (v->counter)); |
376 | } else if (cpu_has_llsc) { | 398 | } else if (cpu_has_llsc) { |
377 | unsigned long temp; | 399 | unsigned long temp; |
378 | 400 | ||
379 | __asm__ __volatile__( | 401 | __asm__ __volatile__( |
402 | " .set mips3 \n" | ||
380 | "1: lld %0, %1 # atomic64_add \n" | 403 | "1: lld %0, %1 # atomic64_add \n" |
381 | " addu %0, %2 \n" | 404 | " addu %0, %2 \n" |
382 | " scd %0, %1 \n" | 405 | " scd %0, %1 \n" |
383 | " beqz %0, 1b \n" | 406 | " beqz %0, 1b \n" |
407 | " .set mips0 \n" | ||
384 | : "=&r" (temp), "=m" (v->counter) | 408 | : "=&r" (temp), "=m" (v->counter) |
385 | : "Ir" (i), "m" (v->counter)); | 409 | : "Ir" (i), "m" (v->counter)); |
386 | } else { | 410 | } else { |
@@ -405,20 +429,24 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
405 | unsigned long temp; | 429 | unsigned long temp; |
406 | 430 | ||
407 | __asm__ __volatile__( | 431 | __asm__ __volatile__( |
432 | " .set mips3 \n" | ||
408 | "1: lld %0, %1 # atomic64_sub \n" | 433 | "1: lld %0, %1 # atomic64_sub \n" |
409 | " subu %0, %2 \n" | 434 | " subu %0, %2 \n" |
410 | " scd %0, %1 \n" | 435 | " scd %0, %1 \n" |
411 | " beqzl %0, 1b \n" | 436 | " beqzl %0, 1b \n" |
437 | " .set mips0 \n" | ||
412 | : "=&r" (temp), "=m" (v->counter) | 438 | : "=&r" (temp), "=m" (v->counter) |
413 | : "Ir" (i), "m" (v->counter)); | 439 | : "Ir" (i), "m" (v->counter)); |
414 | } else if (cpu_has_llsc) { | 440 | } else if (cpu_has_llsc) { |
415 | unsigned long temp; | 441 | unsigned long temp; |
416 | 442 | ||
417 | __asm__ __volatile__( | 443 | __asm__ __volatile__( |
444 | " .set mips3 \n" | ||
418 | "1: lld %0, %1 # atomic64_sub \n" | 445 | "1: lld %0, %1 # atomic64_sub \n" |
419 | " subu %0, %2 \n" | 446 | " subu %0, %2 \n" |
420 | " scd %0, %1 \n" | 447 | " scd %0, %1 \n" |
421 | " beqz %0, 1b \n" | 448 | " beqz %0, 1b \n" |
449 | " .set mips0 \n" | ||
422 | : "=&r" (temp), "=m" (v->counter) | 450 | : "=&r" (temp), "=m" (v->counter) |
423 | : "Ir" (i), "m" (v->counter)); | 451 | : "Ir" (i), "m" (v->counter)); |
424 | } else { | 452 | } else { |
@@ -441,12 +469,14 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
441 | unsigned long temp; | 469 | unsigned long temp; |
442 | 470 | ||
443 | __asm__ __volatile__( | 471 | __asm__ __volatile__( |
472 | " .set mips3 \n" | ||
444 | "1: lld %1, %2 # atomic64_add_return \n" | 473 | "1: lld %1, %2 # atomic64_add_return \n" |
445 | " addu %0, %1, %3 \n" | 474 | " addu %0, %1, %3 \n" |
446 | " scd %0, %2 \n" | 475 | " scd %0, %2 \n" |
447 | " beqzl %0, 1b \n" | 476 | " beqzl %0, 1b \n" |
448 | " addu %0, %1, %3 \n" | 477 | " addu %0, %1, %3 \n" |
449 | " sync \n" | 478 | " sync \n" |
479 | " .set mips0 \n" | ||
450 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 480 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
451 | : "Ir" (i), "m" (v->counter) | 481 | : "Ir" (i), "m" (v->counter) |
452 | : "memory"); | 482 | : "memory"); |
@@ -454,12 +484,14 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
454 | unsigned long temp; | 484 | unsigned long temp; |
455 | 485 | ||
456 | __asm__ __volatile__( | 486 | __asm__ __volatile__( |
487 | " .set mips3 \n" | ||
457 | "1: lld %1, %2 # atomic64_add_return \n" | 488 | "1: lld %1, %2 # atomic64_add_return \n" |
458 | " addu %0, %1, %3 \n" | 489 | " addu %0, %1, %3 \n" |
459 | " scd %0, %2 \n" | 490 | " scd %0, %2 \n" |
460 | " beqz %0, 1b \n" | 491 | " beqz %0, 1b \n" |
461 | " addu %0, %1, %3 \n" | 492 | " addu %0, %1, %3 \n" |
462 | " sync \n" | 493 | " sync \n" |
494 | " .set mips0 \n" | ||
463 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 495 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
464 | : "Ir" (i), "m" (v->counter) | 496 | : "Ir" (i), "m" (v->counter) |
465 | : "memory"); | 497 | : "memory"); |
@@ -484,12 +516,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
484 | unsigned long temp; | 516 | unsigned long temp; |
485 | 517 | ||
486 | __asm__ __volatile__( | 518 | __asm__ __volatile__( |
519 | " .set mips3 \n" | ||
487 | "1: lld %1, %2 # atomic64_sub_return \n" | 520 | "1: lld %1, %2 # atomic64_sub_return \n" |
488 | " subu %0, %1, %3 \n" | 521 | " subu %0, %1, %3 \n" |
489 | " scd %0, %2 \n" | 522 | " scd %0, %2 \n" |
490 | " beqzl %0, 1b \n" | 523 | " beqzl %0, 1b \n" |
491 | " subu %0, %1, %3 \n" | 524 | " subu %0, %1, %3 \n" |
492 | " sync \n" | 525 | " sync \n" |
526 | " .set mips0 \n" | ||
493 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 527 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
494 | : "Ir" (i), "m" (v->counter) | 528 | : "Ir" (i), "m" (v->counter) |
495 | : "memory"); | 529 | : "memory"); |
@@ -497,12 +531,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
497 | unsigned long temp; | 531 | unsigned long temp; |
498 | 532 | ||
499 | __asm__ __volatile__( | 533 | __asm__ __volatile__( |
534 | " .set mips3 \n" | ||
500 | "1: lld %1, %2 # atomic64_sub_return \n" | 535 | "1: lld %1, %2 # atomic64_sub_return \n" |
501 | " subu %0, %1, %3 \n" | 536 | " subu %0, %1, %3 \n" |
502 | " scd %0, %2 \n" | 537 | " scd %0, %2 \n" |
503 | " beqz %0, 1b \n" | 538 | " beqz %0, 1b \n" |
504 | " subu %0, %1, %3 \n" | 539 | " subu %0, %1, %3 \n" |
505 | " sync \n" | 540 | " sync \n" |
541 | " .set mips0 \n" | ||
506 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 542 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
507 | : "Ir" (i), "m" (v->counter) | 543 | : "Ir" (i), "m" (v->counter) |
508 | : "memory"); | 544 | : "memory"); |
@@ -534,6 +570,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
534 | unsigned long temp; | 570 | unsigned long temp; |
535 | 571 | ||
536 | __asm__ __volatile__( | 572 | __asm__ __volatile__( |
573 | " .set mips3 \n" | ||
537 | "1: lld %1, %2 # atomic64_sub_if_positive\n" | 574 | "1: lld %1, %2 # atomic64_sub_if_positive\n" |
538 | " dsubu %0, %1, %3 \n" | 575 | " dsubu %0, %1, %3 \n" |
539 | " bltz %0, 1f \n" | 576 | " bltz %0, 1f \n" |
@@ -541,6 +578,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
541 | " beqzl %0, 1b \n" | 578 | " beqzl %0, 1b \n" |
542 | " sync \n" | 579 | " sync \n" |
543 | "1: \n" | 580 | "1: \n" |
581 | " .set mips0 \n" | ||
544 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 582 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
545 | : "Ir" (i), "m" (v->counter) | 583 | : "Ir" (i), "m" (v->counter) |
546 | : "memory"); | 584 | : "memory"); |
@@ -548,6 +586,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
548 | unsigned long temp; | 586 | unsigned long temp; |
549 | 587 | ||
550 | __asm__ __volatile__( | 588 | __asm__ __volatile__( |
589 | " .set mips3 \n" | ||
551 | "1: lld %1, %2 # atomic64_sub_if_positive\n" | 590 | "1: lld %1, %2 # atomic64_sub_if_positive\n" |
552 | " dsubu %0, %1, %3 \n" | 591 | " dsubu %0, %1, %3 \n" |
553 | " bltz %0, 1f \n" | 592 | " bltz %0, 1f \n" |
@@ -555,6 +594,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
555 | " beqz %0, 1b \n" | 594 | " beqz %0, 1b \n" |
556 | " sync \n" | 595 | " sync \n" |
557 | "1: \n" | 596 | "1: \n" |
597 | " .set mips0 \n" | ||
558 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 598 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
559 | : "Ir" (i), "m" (v->counter) | 599 | : "Ir" (i), "m" (v->counter) |
560 | : "memory"); | 600 | : "memory"); |