aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-07-08 05:59:16 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-07-09 06:29:35 -0400
commit398aa66827155ef52bab58bebd24597d90968929 (patch)
tree51c49d73d0aba2bb3433e735533274f0c61362fb /arch/arm/include/asm
parent068de8d1be48a04b92fd97f76bb7e113b7be82a8 (diff)
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not include memory constraints in the inline assembly blocks. In the case of barrier-less operations [for example, atomic_add], this means that the compiler may constant fold values which have actually been modified by a call to an atomic operation. This issue can be observed in the atomic64_test routine in <kernel root>/lib/atomic64_test.c: 00000000 <test_atomic64>: 0: e1a0c00d mov ip, sp 4: e92dd830 push {r4, r5, fp, ip, lr, pc} 8: e24cb004 sub fp, ip, #4 c: e24dd008 sub sp, sp, #8 10: e24b3014 sub r3, fp, #20 14: e30d000d movw r0, #53261 ; 0xd00d 18: e3011337 movw r1, #4919 ; 0x1337 1c: e34c0001 movt r0, #49153 ; 0xc001 20: e34a1aa3 movt r1, #43683 ; 0xaaa3 24: e16300f8 strd r0, [r3, #-8]! 28: e30c0afe movw r0, #51966 ; 0xcafe 2c: e30b1eef movw r1, #48879 ; 0xbeef 30: e34d0eaf movt r0, #57007 ; 0xdeaf 34: e34d1ead movt r1, #57005 ; 0xdead 38: e1b34f9f ldrexd r4, [r3] 3c: e1a34f90 strexd r4, r0, [r3] 40: e3340000 teq r4, #0 44: 1afffffb bne 38 <test_atomic64+0x38> 48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54> 4c: e3a0101e mov r1, #30 50: ebfffffe bl 0 <__bug> 54: 00000000 .word 0x00000000 The atomic64_set (0x38-0x44) writes to the atomic64_t, but the compiler doesn't see this, assumes the test condition is always false and generates an unconditional branch to __bug. The rest of the test is optimised away. This patch adds suitable memory constraints to the atomic operations on ARM to ensure that the compiler is informed of the correct data hazards. We have to use the "Qo" constraints to avoid hitting the GCC anomaly described at http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler makes assumptions about the writeback in the addressing mode used by the inline assembly. These constraints forbid the use of auto{inc,dec} addressing modes, so it doesn't matter if we don't use the operand exactly once. Cc: stable@kernel.org Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/atomic.h132
1 files changed, 66 insertions, 66 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e9e56c00b858..7e79503ab89b 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
40 int result; 40 int result;
41 41
42 __asm__ __volatile__("@ atomic_add\n" 42 __asm__ __volatile__("@ atomic_add\n"
43"1: ldrex %0, [%2]\n" 43"1: ldrex %0, [%3]\n"
44" add %0, %0, %3\n" 44" add %0, %0, %4\n"
45" strex %1, %0, [%2]\n" 45" strex %1, %0, [%3]\n"
46" teq %1, #0\n" 46" teq %1, #0\n"
47" bne 1b" 47" bne 1b"
48 : "=&r" (result), "=&r" (tmp) 48 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
49 : "r" (&v->counter), "Ir" (i) 49 : "r" (&v->counter), "Ir" (i)
50 : "cc"); 50 : "cc");
51} 51}
@@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
58 smp_mb(); 58 smp_mb();
59 59
60 __asm__ __volatile__("@ atomic_add_return\n" 60 __asm__ __volatile__("@ atomic_add_return\n"
61"1: ldrex %0, [%2]\n" 61"1: ldrex %0, [%3]\n"
62" add %0, %0, %3\n" 62" add %0, %0, %4\n"
63" strex %1, %0, [%2]\n" 63" strex %1, %0, [%3]\n"
64" teq %1, #0\n" 64" teq %1, #0\n"
65" bne 1b" 65" bne 1b"
66 : "=&r" (result), "=&r" (tmp) 66 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
67 : "r" (&v->counter), "Ir" (i) 67 : "r" (&v->counter), "Ir" (i)
68 : "cc"); 68 : "cc");
69 69
@@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
78 int result; 78 int result;
79 79
80 __asm__ __volatile__("@ atomic_sub\n" 80 __asm__ __volatile__("@ atomic_sub\n"
81"1: ldrex %0, [%2]\n" 81"1: ldrex %0, [%3]\n"
82" sub %0, %0, %3\n" 82" sub %0, %0, %4\n"
83" strex %1, %0, [%2]\n" 83" strex %1, %0, [%3]\n"
84" teq %1, #0\n" 84" teq %1, #0\n"
85" bne 1b" 85" bne 1b"
86 : "=&r" (result), "=&r" (tmp) 86 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
87 : "r" (&v->counter), "Ir" (i) 87 : "r" (&v->counter), "Ir" (i)
88 : "cc"); 88 : "cc");
89} 89}
@@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 smp_mb(); 96 smp_mb();
97 97
98 __asm__ __volatile__("@ atomic_sub_return\n" 98 __asm__ __volatile__("@ atomic_sub_return\n"
99"1: ldrex %0, [%2]\n" 99"1: ldrex %0, [%3]\n"
100" sub %0, %0, %3\n" 100" sub %0, %0, %4\n"
101" strex %1, %0, [%2]\n" 101" strex %1, %0, [%3]\n"
102" teq %1, #0\n" 102" teq %1, #0\n"
103" bne 1b" 103" bne 1b"
104 : "=&r" (result), "=&r" (tmp) 104 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105 : "r" (&v->counter), "Ir" (i) 105 : "r" (&v->counter), "Ir" (i)
106 : "cc"); 106 : "cc");
107 107
@@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
118 118
119 do { 119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n" 120 __asm__ __volatile__("@ atomic_cmpxchg\n"
121 "ldrex %1, [%2]\n" 121 "ldrex %1, [%3]\n"
122 "mov %0, #0\n" 122 "mov %0, #0\n"
123 "teq %1, %3\n" 123 "teq %1, %4\n"
124 "strexeq %0, %4, [%2]\n" 124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res), "=&r" (oldval) 125 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126 : "r" (&ptr->counter), "Ir" (old), "r" (new) 126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc"); 127 : "cc");
128 } while (res); 128 } while (res);
@@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
137 unsigned long tmp, tmp2; 137 unsigned long tmp, tmp2;
138 138
139 __asm__ __volatile__("@ atomic_clear_mask\n" 139 __asm__ __volatile__("@ atomic_clear_mask\n"
140"1: ldrex %0, [%2]\n" 140"1: ldrex %0, [%3]\n"
141" bic %0, %0, %3\n" 141" bic %0, %0, %4\n"
142" strex %1, %0, [%2]\n" 142" strex %1, %0, [%3]\n"
143" teq %1, #0\n" 143" teq %1, #0\n"
144" bne 1b" 144" bne 1b"
145 : "=&r" (tmp), "=&r" (tmp2) 145 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146 : "r" (addr), "Ir" (mask) 146 : "r" (addr), "Ir" (mask)
147 : "cc"); 147 : "cc");
148} 148}
@@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v)
249 __asm__ __volatile__("@ atomic64_read\n" 249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]" 250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result) 251 : "=&r" (result)
252 : "r" (&v->counter) 252 : "r" (&v->counter), "Qo" (v->counter)
253 ); 253 );
254 254
255 return result; 255 return result;
@@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
260 u64 tmp; 260 u64 tmp;
261 261
262 __asm__ __volatile__("@ atomic64_set\n" 262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n" 263"1: ldrexd %0, %H0, [%2]\n"
264" strexd %0, %2, %H2, [%1]\n" 264" strexd %0, %3, %H3, [%2]\n"
265" teq %0, #0\n" 265" teq %0, #0\n"
266" bne 1b" 266" bne 1b"
267 : "=&r" (tmp) 267 : "=&r" (tmp), "=Qo" (v->counter)
268 : "r" (&v->counter), "r" (i) 268 : "r" (&v->counter), "r" (i)
269 : "cc"); 269 : "cc");
270} 270}
@@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
275 unsigned long tmp; 275 unsigned long tmp;
276 276
277 __asm__ __volatile__("@ atomic64_add\n" 277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n" 278"1: ldrexd %0, %H0, [%3]\n"
279" adds %0, %0, %3\n" 279" adds %0, %0, %4\n"
280" adc %H0, %H0, %H3\n" 280" adc %H0, %H0, %H4\n"
281" strexd %1, %0, %H0, [%2]\n" 281" strexd %1, %0, %H0, [%3]\n"
282" teq %1, #0\n" 282" teq %1, #0\n"
283" bne 1b" 283" bne 1b"
284 : "=&r" (result), "=&r" (tmp) 284 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
285 : "r" (&v->counter), "r" (i) 285 : "r" (&v->counter), "r" (i)
286 : "cc"); 286 : "cc");
287} 287}
@@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
294 smp_mb(); 294 smp_mb();
295 295
296 __asm__ __volatile__("@ atomic64_add_return\n" 296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n" 297"1: ldrexd %0, %H0, [%3]\n"
298" adds %0, %0, %3\n" 298" adds %0, %0, %4\n"
299" adc %H0, %H0, %H3\n" 299" adc %H0, %H0, %H4\n"
300" strexd %1, %0, %H0, [%2]\n" 300" strexd %1, %0, %H0, [%3]\n"
301" teq %1, #0\n" 301" teq %1, #0\n"
302" bne 1b" 302" bne 1b"
303 : "=&r" (result), "=&r" (tmp) 303 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
304 : "r" (&v->counter), "r" (i) 304 : "r" (&v->counter), "r" (i)
305 : "cc"); 305 : "cc");
306 306
@@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
315 unsigned long tmp; 315 unsigned long tmp;
316 316
317 __asm__ __volatile__("@ atomic64_sub\n" 317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n" 318"1: ldrexd %0, %H0, [%3]\n"
319" subs %0, %0, %3\n" 319" subs %0, %0, %4\n"
320" sbc %H0, %H0, %H3\n" 320" sbc %H0, %H0, %H4\n"
321" strexd %1, %0, %H0, [%2]\n" 321" strexd %1, %0, %H0, [%3]\n"
322" teq %1, #0\n" 322" teq %1, #0\n"
323" bne 1b" 323" bne 1b"
324 : "=&r" (result), "=&r" (tmp) 324 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
325 : "r" (&v->counter), "r" (i) 325 : "r" (&v->counter), "r" (i)
326 : "cc"); 326 : "cc");
327} 327}
@@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
334 smp_mb(); 334 smp_mb();
335 335
336 __asm__ __volatile__("@ atomic64_sub_return\n" 336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n" 337"1: ldrexd %0, %H0, [%3]\n"
338" subs %0, %0, %3\n" 338" subs %0, %0, %4\n"
339" sbc %H0, %H0, %H3\n" 339" sbc %H0, %H0, %H4\n"
340" strexd %1, %0, %H0, [%2]\n" 340" strexd %1, %0, %H0, [%3]\n"
341" teq %1, #0\n" 341" teq %1, #0\n"
342" bne 1b" 342" bne 1b"
343 : "=&r" (result), "=&r" (tmp) 343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
344 : "r" (&v->counter), "r" (i) 344 : "r" (&v->counter), "r" (i)
345 : "cc"); 345 : "cc");
346 346
@@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
358 358
359 do { 359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n" 360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n" 361 "ldrexd %1, %H1, [%3]\n"
362 "mov %0, #0\n" 362 "mov %0, #0\n"
363 "teq %1, %3\n" 363 "teq %1, %4\n"
364 "teqeq %H1, %H3\n" 364 "teqeq %H1, %H4\n"
365 "strexdeq %0, %4, %H4, [%2]" 365 "strexdeq %0, %5, %H5, [%3]"
366 : "=&r" (res), "=&r" (oldval) 366 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
367 : "r" (&ptr->counter), "r" (old), "r" (new) 367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc"); 368 : "cc");
369 } while (res); 369 } while (res);
@@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
381 smp_mb(); 381 smp_mb();
382 382
383 __asm__ __volatile__("@ atomic64_xchg\n" 383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n" 384"1: ldrexd %0, %H0, [%3]\n"
385" strexd %1, %3, %H3, [%2]\n" 385" strexd %1, %4, %H4, [%3]\n"
386" teq %1, #0\n" 386" teq %1, #0\n"
387" bne 1b" 387" bne 1b"
388 : "=&r" (result), "=&r" (tmp) 388 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
389 : "r" (&ptr->counter), "r" (new) 389 : "r" (&ptr->counter), "r" (new)
390 : "cc"); 390 : "cc");
391 391
@@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
402 smp_mb(); 402 smp_mb();
403 403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n" 405"1: ldrexd %0, %H0, [%3]\n"
406" subs %0, %0, #1\n" 406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n" 407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n" 408" teq %H0, #0\n"
409" bmi 2f\n" 409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n" 410" strexd %1, %0, %H0, [%3]\n"
411" teq %1, #0\n" 411" teq %1, #0\n"
412" bne 1b\n" 412" bne 1b\n"
413"2:" 413"2:"
414 : "=&r" (result), "=&r" (tmp) 414 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
415 : "r" (&v->counter) 415 : "r" (&v->counter)
416 : "cc"); 416 : "cc");
417 417
@@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
429 smp_mb(); 429 smp_mb();
430 430
431 __asm__ __volatile__("@ atomic64_add_unless\n" 431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n" 432"1: ldrexd %0, %H0, [%4]\n"
433" teq %0, %4\n" 433" teq %0, %5\n"
434" teqeq %H0, %H4\n" 434" teqeq %H0, %H5\n"
435" moveq %1, #0\n" 435" moveq %1, #0\n"
436" beq 2f\n" 436" beq 2f\n"
437" adds %0, %0, %5\n" 437" adds %0, %0, %6\n"
438" adc %H0, %H0, %H5\n" 438" adc %H0, %H0, %H6\n"
439" strexd %2, %0, %H0, [%3]\n" 439" strexd %2, %0, %H0, [%4]\n"
440" teq %2, #0\n" 440" teq %2, #0\n"
441" bne 1b\n" 441" bne 1b\n"
442"2:" 442"2:"
443 : "=&r" (val), "+r" (ret), "=&r" (tmp) 443 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
444 : "r" (&v->counter), "r" (u), "r" (a) 444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc"); 445 : "cc");
446 446