diff options
author | Anton Blanchard <anton@samba.org> | 2005-09-05 23:05:58 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-06 02:07:53 -0400 |
commit | b2c0ab17ba751abe13a28508b1ac7e9ca074cd87 (patch) | |
tree | 08abcde7f48b3393afcae069e3668923ee3e4492 /include | |
parent | 4721e2214b5fd6eca48caea76afb1bad3148930f (diff) |
[PATCH] ppc64: speedup cmpxchg
cmpxchg has the following code:
__typeof__(*(ptr)) _o_ = (o);
__typeof__(*(ptr)) _n_ = (n);
Unfortunately it makes gcc 4.0 store and load the variables to the stack.
Eg in atomic_dec_and_test we get:
stw r10,112(r1)
stw r9,116(r1)
lwz r9,112(r1)
lwz r0,116(r1)
x86 is just casting the values so do that instead. Also change __xchg*
and __cmpxchg* to take unsigned values, removing a few sign extensions.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ppc64/system.h | 19 |
1 files changed, 8 insertions, 11 deletions
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index b9e1835351e9..c0396428cc3c 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h | |||
@@ -158,7 +158,7 @@ static inline int __is_processor(unsigned long pv) | |||
158 | * is more like most of the other architectures. | 158 | * is more like most of the other architectures. |
159 | */ | 159 | */ |
160 | static __inline__ unsigned long | 160 | static __inline__ unsigned long |
161 | __xchg_u32(volatile int *m, unsigned long val) | 161 | __xchg_u32(volatile unsigned int *m, unsigned long val) |
162 | { | 162 | { |
163 | unsigned long dummy; | 163 | unsigned long dummy; |
164 | 164 | ||
@@ -200,7 +200,7 @@ __xchg_u64(volatile long *m, unsigned long val) | |||
200 | extern void __xchg_called_with_bad_pointer(void); | 200 | extern void __xchg_called_with_bad_pointer(void); |
201 | 201 | ||
202 | static __inline__ unsigned long | 202 | static __inline__ unsigned long |
203 | __xchg(volatile void *ptr, unsigned long x, int size) | 203 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) |
204 | { | 204 | { |
205 | switch (size) { | 205 | switch (size) { |
206 | case 4: | 206 | case 4: |
@@ -223,7 +223,7 @@ __xchg(volatile void *ptr, unsigned long x, int size) | |||
223 | #define __HAVE_ARCH_CMPXCHG 1 | 223 | #define __HAVE_ARCH_CMPXCHG 1 |
224 | 224 | ||
225 | static __inline__ unsigned long | 225 | static __inline__ unsigned long |
226 | __cmpxchg_u32(volatile int *p, int old, int new) | 226 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) |
227 | { | 227 | { |
228 | unsigned int prev; | 228 | unsigned int prev; |
229 | 229 | ||
@@ -271,7 +271,8 @@ __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) | |||
271 | extern void __cmpxchg_called_with_bad_pointer(void); | 271 | extern void __cmpxchg_called_with_bad_pointer(void); |
272 | 272 | ||
273 | static __inline__ unsigned long | 273 | static __inline__ unsigned long |
274 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | 274 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, |
275 | unsigned int size) | ||
275 | { | 276 | { |
276 | switch (size) { | 277 | switch (size) { |
277 | case 4: | 278 | case 4: |
@@ -283,13 +284,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
283 | return old; | 284 | return old; |
284 | } | 285 | } |
285 | 286 | ||
286 | #define cmpxchg(ptr,o,n) \ | 287 | #define cmpxchg(ptr,o,n)\ |
287 | ({ \ | 288 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ |
288 | __typeof__(*(ptr)) _o_ = (o); \ | 289 | (unsigned long)(n),sizeof(*(ptr)))) |
289 | __typeof__(*(ptr)) _n_ = (n); \ | ||
290 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
291 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
292 | }) | ||
293 | 290 | ||
294 | /* | 291 | /* |
295 | * We handle most unaligned accesses in hardware. On the other hand | 292 | * We handle most unaligned accesses in hardware. On the other hand |