diff options
author | Jan Beulich <JBeulich@novell.com> | 2005-10-30 17:59:27 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 20:37:11 -0500 |
commit | 8896fab35e62aa748a5ce62ac773508e51f10be1 (patch) | |
tree | bb8f2dce7b5d0aa6ffe68aa6b97920d2ce32e164 /include/asm-i386/system.h | |
parent | dacb16b1a034fa7a0b868ee30758119fbfd90bc1 (diff) |
[PATCH] x86: cmpxchg improvements
This adjusts i386's cmpxchg patterns so that
- for word and long cmpxchg-es the compiler can utilize all possible
registers
- cmpxchg8b gets disabled when the minimum specified hardware architectur
doesn't support it (like was already happening for the byte, word, and
long ones).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r-- | include/asm-i386/system.h | 33 |
1 files changed, 30 insertions, 3 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index acd5c26b69ba..97d52ac49e46 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -167,6 +167,8 @@ struct __xchg_dummy { unsigned long a[100]; }; | |||
167 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 167 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
168 | 168 | ||
169 | 169 | ||
170 | #ifdef CONFIG_X86_CMPXCHG64 | ||
171 | |||
170 | /* | 172 | /* |
171 | * The semantics of XCHGCMP8B are a bit strange, this is why | 173 | * The semantics of XCHGCMP8B are a bit strange, this is why |
172 | * there is a loop and the loading of %%eax and %%edx has to | 174 | * there is a loop and the loading of %%eax and %%edx has to |
@@ -221,6 +223,8 @@ static inline void __set_64bit_var (unsigned long long *ptr, | |||
221 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | 223 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ |
222 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | 224 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) |
223 | 225 | ||
226 | #endif | ||
227 | |||
224 | /* | 228 | /* |
225 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 229 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
226 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 230 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
@@ -259,7 +263,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
259 | 263 | ||
260 | #ifdef CONFIG_X86_CMPXCHG | 264 | #ifdef CONFIG_X86_CMPXCHG |
261 | #define __HAVE_ARCH_CMPXCHG 1 | 265 | #define __HAVE_ARCH_CMPXCHG 1 |
262 | #endif | ||
263 | 266 | ||
264 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 267 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
265 | unsigned long new, int size) | 268 | unsigned long new, int size) |
@@ -275,13 +278,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
275 | case 2: | 278 | case 2: |
276 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 279 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" |
277 | : "=a"(prev) | 280 | : "=a"(prev) |
278 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 281 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
279 | : "memory"); | 282 | : "memory"); |
280 | return prev; | 283 | return prev; |
281 | case 4: | 284 | case 4: |
282 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | 285 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" |
283 | : "=a"(prev) | 286 | : "=a"(prev) |
284 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 287 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
285 | : "memory"); | 288 | : "memory"); |
286 | return prev; | 289 | return prev; |
287 | } | 290 | } |
@@ -291,6 +294,30 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
291 | #define cmpxchg(ptr,o,n)\ | 294 | #define cmpxchg(ptr,o,n)\ |
292 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | 295 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ |
293 | (unsigned long)(n),sizeof(*(ptr)))) | 296 | (unsigned long)(n),sizeof(*(ptr)))) |
297 | |||
298 | #endif | ||
299 | |||
300 | #ifdef CONFIG_X86_CMPXCHG64 | ||
301 | |||
302 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | ||
303 | unsigned long long new) | ||
304 | { | ||
305 | unsigned long long prev; | ||
306 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | ||
307 | : "=A"(prev) | ||
308 | : "b"((unsigned long)new), | ||
309 | "c"((unsigned long)(new >> 32)), | ||
310 | "m"(*__xg(ptr)), | ||
311 | "0"(old) | ||
312 | : "memory"); | ||
313 | return prev; | ||
314 | } | ||
315 | |||
316 | #define cmpxchg64(ptr,o,n)\ | ||
317 | ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ | ||
318 | (unsigned long long)(n))) | ||
319 | |||
320 | #endif | ||
294 | 321 | ||
295 | #ifdef __KERNEL__ | 322 | #ifdef __KERNEL__ |
296 | struct alt_instr { | 323 | struct alt_instr { |