diff options
| author | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2008-02-07 03:16:24 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 11:42:32 -0500 |
| commit | 405321d3ab2b41960e2874f2f74609daffbc7a4c (patch) | |
| tree | af0d51539011d57b2df0df6c69171433f9d60ff8 /include | |
| parent | fe4130131ef9e55763fd634a02b1db9290dbbe5a (diff) | |
Add cmpxchg_local to sparc, move __cmpxchg to system.h
Move cmpxchg and add cmpxchg_local to system.h.
Use the new generic cmpxchg_local (disables interrupt).
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-sparc/atomic.h | 36 | ||||
| -rw-r--r-- | include/asm-sparc/system.h | 48 |
2 files changed, 48 insertions, 36 deletions
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index 3328950dbfe6..5c944b5a8040 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h | |||
| @@ -17,42 +17,6 @@ typedef struct { volatile int counter; } atomic_t; | |||
| 17 | 17 | ||
| 18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
| 19 | 19 | ||
| 20 | /* Emulate cmpxchg() the same way we emulate atomics, | ||
| 21 | * by hashing the object address and indexing into an array | ||
| 22 | * of spinlocks to get a bit of performance... | ||
| 23 | * | ||
| 24 | * See arch/sparc/lib/atomic32.c for implementation. | ||
| 25 | * | ||
| 26 | * Cribbed from <asm-parisc/atomic.h> | ||
| 27 | */ | ||
| 28 | #define __HAVE_ARCH_CMPXCHG 1 | ||
| 29 | |||
| 30 | /* bug catcher for when unsupported size is used - won't link */ | ||
| 31 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
| 32 | /* we only need to support cmpxchg of a u32 on sparc */ | ||
| 33 | extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); | ||
| 34 | |||
| 35 | /* don't worry...optimizer will get rid of most of this */ | ||
| 36 | static inline unsigned long | ||
| 37 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | ||
| 38 | { | ||
| 39 | switch(size) { | ||
| 40 | case 4: | ||
| 41 | return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); | ||
| 42 | default: | ||
| 43 | __cmpxchg_called_with_bad_pointer(); | ||
| 44 | break; | ||
| 45 | } | ||
| 46 | return old; | ||
| 47 | } | ||
| 48 | |||
| 49 | #define cmpxchg(ptr,o,n) ({ \ | ||
| 50 | __typeof__(*(ptr)) _o_ = (o); \ | ||
| 51 | __typeof__(*(ptr)) _n_ = (n); \ | ||
| 52 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
| 53 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
| 54 | }) | ||
| 55 | |||
| 56 | #define ATOMIC_INIT(i) { (i) } | 20 | #define ATOMIC_INIT(i) { (i) } |
| 57 | 21 | ||
| 58 | extern int __atomic_add_return(int, atomic_t *); | 22 | extern int __atomic_add_return(int, atomic_t *); |
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 2655d142b22d..45e47c159a6e 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h | |||
| @@ -225,6 +225,54 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int | |||
| 225 | return x; | 225 | return x; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | /* Emulate cmpxchg() the same way we emulate atomics, | ||
| 229 | * by hashing the object address and indexing into an array | ||
| 230 | * of spinlocks to get a bit of performance... | ||
| 231 | * | ||
| 232 | * See arch/sparc/lib/atomic32.c for implementation. | ||
| 233 | * | ||
| 234 | * Cribbed from <asm-parisc/atomic.h> | ||
| 235 | */ | ||
| 236 | #define __HAVE_ARCH_CMPXCHG 1 | ||
| 237 | |||
| 238 | /* bug catcher for when unsupported size is used - won't link */ | ||
| 239 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
| 240 | /* we only need to support cmpxchg of a u32 on sparc */ | ||
| 241 | extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); | ||
| 242 | |||
| 243 | /* don't worry...optimizer will get rid of most of this */ | ||
| 244 | static inline unsigned long | ||
| 245 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | ||
| 246 | { | ||
| 247 | switch (size) { | ||
| 248 | case 4: | ||
| 249 | return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); | ||
| 250 | default: | ||
| 251 | __cmpxchg_called_with_bad_pointer(); | ||
| 252 | break; | ||
| 253 | } | ||
| 254 | return old; | ||
| 255 | } | ||
| 256 | |||
| 257 | #define cmpxchg(ptr, o, n) \ | ||
| 258 | ({ \ | ||
| 259 | __typeof__(*(ptr)) _o_ = (o); \ | ||
| 260 | __typeof__(*(ptr)) _n_ = (n); \ | ||
| 261 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
| 262 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
| 263 | }) | ||
| 264 | |||
| 265 | #include <asm-generic/cmpxchg-local.h> | ||
| 266 | |||
| 267 | /* | ||
| 268 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
| 269 | * them available. | ||
| 270 | */ | ||
| 271 | #define cmpxchg_local(ptr, o, n) \ | ||
| 272 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
| 273 | (unsigned long)(n), sizeof(*(ptr)))) | ||
| 274 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
| 275 | |||
| 228 | extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); | 276 | extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); |
| 229 | 277 | ||
| 230 | #endif /* __KERNEL__ */ | 278 | #endif /* __KERNEL__ */ |
