diff options
author | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2008-08-16 03:39:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-18 01:47:30 -0400 |
commit | 3c3b5c3b0bf798316a410e27e3d7e6f015663602 (patch) | |
tree | a774ddfd39dace35a85316ee69da4055f8ebde51 /include/asm-x86 | |
parent | 2fdc86901d2ab30a12402b46238951d2a7891590 (diff) |
x86: correct register constraints for 64-bit atomic operations
x86_64 add/sub atomic ops does not seems to accept integer values bigger
than 32 bits as immediates. Intel's add/sub documentation specifies they
have to be passed as registers.
The only operations in the x86-64 architecture which accept arbitrary
64-bit immediates is "movq" to any register; similarly, the only
operation which accept arbitrary 64-bit displacement is "movabs" to or
from al/ax/eax/rax.
http://gcc.gnu.org/onlinedocs/gcc-4.3.0/gcc/Machine-Constraints.html
states :
e
32-bit signed integer constant, or a symbolic reference known to fit
that range (for immediate operands in sign-extending x86-64
instructions).
Z
32-bit unsigned integer constant, or a symbolic reference known to
fit that range (for immediate operands in zero-extending x86-64
instructions).
Since add/sub does sign extension, using the "e" constraint seems appropriate.
It applies to 2.6.27-rc, 2.6.26, 2.6.25...
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/atomic_64.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index a0095191c02e..91c7d03e65bc 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h | |||
@@ -228,7 +228,7 @@ static inline void atomic64_add(long i, atomic64_t *v) | |||
228 | { | 228 | { |
229 | asm volatile(LOCK_PREFIX "addq %1,%0" | 229 | asm volatile(LOCK_PREFIX "addq %1,%0" |
230 | : "=m" (v->counter) | 230 | : "=m" (v->counter) |
231 | : "ir" (i), "m" (v->counter)); | 231 | : "er" (i), "m" (v->counter)); |
232 | } | 232 | } |
233 | 233 | ||
234 | /** | 234 | /** |
@@ -242,7 +242,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) | |||
242 | { | 242 | { |
243 | asm volatile(LOCK_PREFIX "subq %1,%0" | 243 | asm volatile(LOCK_PREFIX "subq %1,%0" |
244 | : "=m" (v->counter) | 244 | : "=m" (v->counter) |
245 | : "ir" (i), "m" (v->counter)); | 245 | : "er" (i), "m" (v->counter)); |
246 | } | 246 | } |
247 | 247 | ||
248 | /** | 248 | /** |
@@ -260,7 +260,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) | |||
260 | 260 | ||
261 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | 261 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" |
262 | : "=m" (v->counter), "=qm" (c) | 262 | : "=m" (v->counter), "=qm" (c) |
263 | : "ir" (i), "m" (v->counter) : "memory"); | 263 | : "er" (i), "m" (v->counter) : "memory"); |
264 | return c; | 264 | return c; |
265 | } | 265 | } |
266 | 266 | ||
@@ -341,7 +341,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) | |||
341 | 341 | ||
342 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | 342 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" |
343 | : "=m" (v->counter), "=qm" (c) | 343 | : "=m" (v->counter), "=qm" (c) |
344 | : "ir" (i), "m" (v->counter) : "memory"); | 344 | : "er" (i), "m" (v->counter) : "memory"); |
345 | return c; | 345 | return c; |
346 | } | 346 | } |
347 | 347 | ||