aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
-rw-r--r--arch/arm/include/asm/atomic.h132
1 files changed, 66 insertions, 66 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index a0162fa9456..7e79503ab89 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
40 int result; 40 int result;
41 41
42 __asm__ __volatile__("@ atomic_add\n" 42 __asm__ __volatile__("@ atomic_add\n"
43"1: ldrex %0, [%2]\n" 43"1: ldrex %0, [%3]\n"
44" add %0, %0, %3\n" 44" add %0, %0, %4\n"
45" strex %1, %0, [%2]\n" 45" strex %1, %0, [%3]\n"
46" teq %1, #0\n" 46" teq %1, #0\n"
47" bne 1b" 47" bne 1b"
48 : "=&r" (result), "=&r" (tmp) 48 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
49 : "r" (&v->counter), "Ir" (i) 49 : "r" (&v->counter), "Ir" (i)
50 : "cc"); 50 : "cc");
51} 51}
@@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
58 smp_mb(); 58 smp_mb();
59 59
60 __asm__ __volatile__("@ atomic_add_return\n" 60 __asm__ __volatile__("@ atomic_add_return\n"
61"1: ldrex %0, [%2]\n" 61"1: ldrex %0, [%3]\n"
62" add %0, %0, %3\n" 62" add %0, %0, %4\n"
63" strex %1, %0, [%2]\n" 63" strex %1, %0, [%3]\n"
64" teq %1, #0\n" 64" teq %1, #0\n"
65" bne 1b" 65" bne 1b"
66 : "=&r" (result), "=&r" (tmp) 66 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
67 : "r" (&v->counter), "Ir" (i) 67 : "r" (&v->counter), "Ir" (i)
68 : "cc"); 68 : "cc");
69 69
@@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
78 int result; 78 int result;
79 79
80 __asm__ __volatile__("@ atomic_sub\n" 80 __asm__ __volatile__("@ atomic_sub\n"
81"1: ldrex %0, [%2]\n" 81"1: ldrex %0, [%3]\n"
82" sub %0, %0, %3\n" 82" sub %0, %0, %4\n"
83" strex %1, %0, [%2]\n" 83" strex %1, %0, [%3]\n"
84" teq %1, #0\n" 84" teq %1, #0\n"
85" bne 1b" 85" bne 1b"
86 : "=&r" (result), "=&r" (tmp) 86 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
87 : "r" (&v->counter), "Ir" (i) 87 : "r" (&v->counter), "Ir" (i)
88 : "cc"); 88 : "cc");
89} 89}
@@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 smp_mb(); 96 smp_mb();
97 97
98 __asm__ __volatile__("@ atomic_sub_return\n" 98 __asm__ __volatile__("@ atomic_sub_return\n"
99"1: ldrex %0, [%2]\n" 99"1: ldrex %0, [%3]\n"
100" sub %0, %0, %3\n" 100" sub %0, %0, %4\n"
101" strex %1, %0, [%2]\n" 101" strex %1, %0, [%3]\n"
102" teq %1, #0\n" 102" teq %1, #0\n"
103" bne 1b" 103" bne 1b"
104 : "=&r" (result), "=&r" (tmp) 104 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105 : "r" (&v->counter), "Ir" (i) 105 : "r" (&v->counter), "Ir" (i)
106 : "cc"); 106 : "cc");
107 107
@@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
118 118
119 do { 119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n" 120 __asm__ __volatile__("@ atomic_cmpxchg\n"
121 "ldrex %1, [%2]\n" 121 "ldrex %1, [%3]\n"
122 "mov %0, #0\n" 122 "mov %0, #0\n"
123 "teq %1, %3\n" 123 "teq %1, %4\n"
124 "strexeq %0, %4, [%2]\n" 124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res), "=&r" (oldval) 125 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126 : "r" (&ptr->counter), "Ir" (old), "r" (new) 126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc"); 127 : "cc");
128 } while (res); 128 } while (res);
@@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
137 unsigned long tmp, tmp2; 137 unsigned long tmp, tmp2;
138 138
139 __asm__ __volatile__("@ atomic_clear_mask\n" 139 __asm__ __volatile__("@ atomic_clear_mask\n"
140"1: ldrex %0, [%2]\n" 140"1: ldrex %0, [%3]\n"
141" bic %0, %0, %3\n" 141" bic %0, %0, %4\n"
142" strex %1, %0, [%2]\n" 142" strex %1, %0, [%3]\n"
143" teq %1, #0\n" 143" teq %1, #0\n"
144" bne 1b" 144" bne 1b"
145 : "=&r" (tmp), "=&r" (tmp2) 145 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146 : "r" (addr), "Ir" (mask) 146 : "r" (addr), "Ir" (mask)
147 : "cc"); 147 : "cc");
148} 148}
@@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v)
249 __asm__ __volatile__("@ atomic64_read\n" 249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]" 250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result) 251 : "=&r" (result)
252 : "r" (&v->counter) 252 : "r" (&v->counter), "Qo" (v->counter)
253 ); 253 );
254 254
255 return result; 255 return result;
@@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
260 u64 tmp; 260 u64 tmp;
261 261
262 __asm__ __volatile__("@ atomic64_set\n" 262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n" 263"1: ldrexd %0, %H0, [%2]\n"
264" strexd %0, %2, %H2, [%1]\n" 264" strexd %0, %3, %H3, [%2]\n"
265" teq %0, #0\n" 265" teq %0, #0\n"
266" bne 1b" 266" bne 1b"
267 : "=&r" (tmp) 267 : "=&r" (tmp), "=Qo" (v->counter)
268 : "r" (&v->counter), "r" (i) 268 : "r" (&v->counter), "r" (i)
269 : "cc"); 269 : "cc");
270} 270}
@@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
275 unsigned long tmp; 275 unsigned long tmp;
276 276
277 __asm__ __volatile__("@ atomic64_add\n" 277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n" 278"1: ldrexd %0, %H0, [%3]\n"
279" adds %0, %0, %3\n" 279" adds %0, %0, %4\n"
280" adc %H0, %H0, %H3\n" 280" adc %H0, %H0, %H4\n"
281" strexd %1, %0, %H0, [%2]\n" 281" strexd %1, %0, %H0, [%3]\n"
282" teq %1, #0\n" 282" teq %1, #0\n"
283" bne 1b" 283" bne 1b"
284 : "=&r" (result), "=&r" (tmp) 284 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
285 : "r" (&v->counter), "r" (i) 285 : "r" (&v->counter), "r" (i)
286 : "cc"); 286 : "cc");
287} 287}
@@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
294 smp_mb(); 294 smp_mb();
295 295
296 __asm__ __volatile__("@ atomic64_add_return\n" 296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n" 297"1: ldrexd %0, %H0, [%3]\n"
298" adds %0, %0, %3\n" 298" adds %0, %0, %4\n"
299" adc %H0, %H0, %H3\n" 299" adc %H0, %H0, %H4\n"
300" strexd %1, %0, %H0, [%2]\n" 300" strexd %1, %0, %H0, [%3]\n"
301" teq %1, #0\n" 301" teq %1, #0\n"
302" bne 1b" 302" bne 1b"
303 : "=&r" (result), "=&r" (tmp) 303 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
304 : "r" (&v->counter), "r" (i) 304 : "r" (&v->counter), "r" (i)
305 : "cc"); 305 : "cc");
306 306
@@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
315 unsigned long tmp; 315 unsigned long tmp;
316 316
317 __asm__ __volatile__("@ atomic64_sub\n" 317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n" 318"1: ldrexd %0, %H0, [%3]\n"
319" subs %0, %0, %3\n" 319" subs %0, %0, %4\n"
320" sbc %H0, %H0, %H3\n" 320" sbc %H0, %H0, %H4\n"
321" strexd %1, %0, %H0, [%2]\n" 321" strexd %1, %0, %H0, [%3]\n"
322" teq %1, #0\n" 322" teq %1, #0\n"
323" bne 1b" 323" bne 1b"
324 : "=&r" (result), "=&r" (tmp) 324 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
325 : "r" (&v->counter), "r" (i) 325 : "r" (&v->counter), "r" (i)
326 : "cc"); 326 : "cc");
327} 327}
@@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
334 smp_mb(); 334 smp_mb();
335 335
336 __asm__ __volatile__("@ atomic64_sub_return\n" 336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n" 337"1: ldrexd %0, %H0, [%3]\n"
338" subs %0, %0, %3\n" 338" subs %0, %0, %4\n"
339" sbc %H0, %H0, %H3\n" 339" sbc %H0, %H0, %H4\n"
340" strexd %1, %0, %H0, [%2]\n" 340" strexd %1, %0, %H0, [%3]\n"
341" teq %1, #0\n" 341" teq %1, #0\n"
342" bne 1b" 342" bne 1b"
343 : "=&r" (result), "=&r" (tmp) 343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
344 : "r" (&v->counter), "r" (i) 344 : "r" (&v->counter), "r" (i)
345 : "cc"); 345 : "cc");
346 346
@@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
358 358
359 do { 359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n" 360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n" 361 "ldrexd %1, %H1, [%3]\n"
362 "mov %0, #0\n" 362 "mov %0, #0\n"
363 "teq %1, %3\n" 363 "teq %1, %4\n"
364 "teqeq %H1, %H3\n" 364 "teqeq %H1, %H4\n"
365 "strexdeq %0, %4, %H4, [%2]" 365 "strexdeq %0, %5, %H5, [%3]"
366 : "=&r" (res), "=&r" (oldval) 366 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
367 : "r" (&ptr->counter), "r" (old), "r" (new) 367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc"); 368 : "cc");
369 } while (res); 369 } while (res);
@@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
381 smp_mb(); 381 smp_mb();
382 382
383 __asm__ __volatile__("@ atomic64_xchg\n" 383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n" 384"1: ldrexd %0, %H0, [%3]\n"
385" strexd %1, %3, %H3, [%2]\n" 385" strexd %1, %4, %H4, [%3]\n"
386" teq %1, #0\n" 386" teq %1, #0\n"
387" bne 1b" 387" bne 1b"
388 : "=&r" (result), "=&r" (tmp) 388 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
389 : "r" (&ptr->counter), "r" (new) 389 : "r" (&ptr->counter), "r" (new)
390 : "cc"); 390 : "cc");
391 391
@@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
402 smp_mb(); 402 smp_mb();
403 403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n" 405"1: ldrexd %0, %H0, [%3]\n"
406" subs %0, %0, #1\n" 406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n" 407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n" 408" teq %H0, #0\n"
409" bmi 2f\n" 409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n" 410" strexd %1, %0, %H0, [%3]\n"
411" teq %1, #0\n" 411" teq %1, #0\n"
412" bne 1b\n" 412" bne 1b\n"
413"2:" 413"2:"
414 : "=&r" (result), "=&r" (tmp) 414 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
415 : "r" (&v->counter) 415 : "r" (&v->counter)
416 : "cc"); 416 : "cc");
417 417
@@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
429 smp_mb(); 429 smp_mb();
430 430
431 __asm__ __volatile__("@ atomic64_add_unless\n" 431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n" 432"1: ldrexd %0, %H0, [%4]\n"
433" teq %0, %4\n" 433" teq %0, %5\n"
434" teqeq %H0, %H4\n" 434" teqeq %H0, %H5\n"
435" moveq %1, #0\n" 435" moveq %1, #0\n"
436" beq 2f\n" 436" beq 2f\n"
437" adds %0, %0, %5\n" 437" adds %0, %0, %6\n"
438" adc %H0, %H0, %H5\n" 438" adc %H0, %H0, %H6\n"
439" strexd %2, %0, %H0, [%3]\n" 439" strexd %2, %0, %H0, [%4]\n"
440" teq %2, #0\n" 440" teq %2, #0\n"
441" bne 1b\n" 441" bne 1b\n"
442"2:" 442"2:"
443 : "=&r" (val), "=&r" (ret), "=&r" (tmp) 443 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
444 : "r" (&v->counter), "r" (u), "r" (a) 444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc"); 445 : "cc");
446 446