diff options
Diffstat (limited to 'include/asm-sparc')
-rw-r--r-- | include/asm-sparc/atomic.h | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index 731fa56e0c37..bdca5416d8b0 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
4 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) | 4 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) |
5 | * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org) | ||
5 | * | 6 | * |
6 | * Additions by Keith M Wesolowski (wesolows@foobazco.org) based | 7 | * Additions by Keith M Wesolowski (wesolows@foobazco.org) based |
7 | * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. | 8 | * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. |
@@ -10,11 +11,48 @@ | |||
10 | #ifndef __ARCH_SPARC_ATOMIC__ | 11 | #ifndef __ARCH_SPARC_ATOMIC__ |
11 | #define __ARCH_SPARC_ATOMIC__ | 12 | #define __ARCH_SPARC_ATOMIC__ |
12 | 13 | ||
14 | #include <linux/types.h> | ||
13 | 15 | ||
14 | typedef struct { volatile int counter; } atomic_t; | 16 | typedef struct { volatile int counter; } atomic_t; |
15 | 17 | ||
16 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
17 | 19 | ||
20 | /* Emulate cmpxchg() the same way we emulate atomics, | ||
21 | * by hashing the object address and indexing into an array | ||
22 | * of spinlocks to get a bit of performance... | ||
23 | * | ||
24 | * See arch/sparc/lib/atomic32.c for implementation. | ||
25 | * | ||
26 | * Cribbed from <asm-parisc/atomic.h> | ||
27 | */ | ||
28 | #define __HAVE_ARCH_CMPXCHG 1 | ||
29 | |||
30 | /* bug catcher for when unsupported size is used - won't link */ | ||
31 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
32 | /* we only need to support cmpxchg of a u32 on sparc */ | ||
33 | extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); | ||
34 | |||
35 | /* don't worry...optimizer will get rid of most of this */ | ||
36 | static __inline__ unsigned long | ||
37 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | ||
38 | { | ||
39 | switch(size) { | ||
40 | case 4: | ||
41 | return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); | ||
42 | default: | ||
43 | __cmpxchg_called_with_bad_pointer(); | ||
44 | break; | ||
45 | } | ||
46 | return old; | ||
47 | } | ||
48 | |||
49 | #define cmpxchg(ptr,o,n) ({ \ | ||
50 | __typeof__(*(ptr)) _o_ = (o); \ | ||
51 | __typeof__(*(ptr)) _n_ = (n); \ | ||
52 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
53 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
54 | }) | ||
55 | |||
18 | #define ATOMIC_INIT(i) { (i) } | 56 | #define ATOMIC_INIT(i) { (i) } |
19 | 57 | ||
20 | extern int __atomic_add_return(int, atomic_t *); | 58 | extern int __atomic_add_return(int, atomic_t *); |