diff options
Diffstat (limited to 'arch/sparc/include/asm/atomic_32.h')
-rw-r--r-- | arch/sparc/include/asm/atomic_32.h | 165 |
1 files changed, 165 insertions, 0 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h new file mode 100644 index 000000000000..5c944b5a8040 --- /dev/null +++ b/arch/sparc/include/asm/atomic_32.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* atomic.h: These still suck, but the I-cache hit rate is higher. | ||
2 | * | ||
3 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) | ||
5 | * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org) | ||
6 | * | ||
7 | * Additions by Keith M Wesolowski (wesolows@foobazco.org) based | ||
8 | * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ARCH_SPARC_ATOMIC__ | ||
12 | #define __ARCH_SPARC_ATOMIC__ | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | |||
16 | typedef struct { volatile int counter; } atomic_t; | ||
17 | |||
18 | #ifdef __KERNEL__ | ||
19 | |||
20 | #define ATOMIC_INIT(i) { (i) } | ||
21 | |||
22 | extern int __atomic_add_return(int, atomic_t *); | ||
23 | extern int atomic_cmpxchg(atomic_t *, int, int); | ||
24 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
25 | extern int atomic_add_unless(atomic_t *, int, int); | ||
26 | extern void atomic_set(atomic_t *, int); | ||
27 | |||
28 | #define atomic_read(v) ((v)->counter) | ||
29 | |||
30 | #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) | ||
31 | #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) | ||
32 | #define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) | ||
33 | #define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) | ||
34 | |||
35 | #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) | ||
36 | #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) | ||
37 | #define atomic_inc_return(v) (__atomic_add_return( 1, (v))) | ||
38 | #define atomic_dec_return(v) (__atomic_add_return( -1, (v))) | ||
39 | |||
40 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
41 | |||
42 | /* | ||
43 | * atomic_inc_and_test - increment and test | ||
44 | * @v: pointer of type atomic_t | ||
45 | * | ||
46 | * Atomically increments @v by 1 | ||
47 | * and returns true if the result is zero, or false for all | ||
48 | * other cases. | ||
49 | */ | ||
50 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
51 | |||
52 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
53 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | ||
54 | |||
55 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
56 | |||
57 | /* This is the old 24-bit implementation. It's still used internally | ||
58 | * by some sparc-specific code, notably the semaphore implementation. | ||
59 | */ | ||
60 | typedef struct { volatile int counter; } atomic24_t; | ||
61 | |||
62 | #ifndef CONFIG_SMP | ||
63 | |||
64 | #define ATOMIC24_INIT(i) { (i) } | ||
65 | #define atomic24_read(v) ((v)->counter) | ||
66 | #define atomic24_set(v, i) (((v)->counter) = i) | ||
67 | |||
68 | #else | ||
69 | /* We do the bulk of the actual work out of line in two common | ||
70 | * routines in assembler, see arch/sparc/lib/atomic.S for the | ||
71 | * "fun" details. | ||
72 | * | ||
73 | * For SMP the trick is you embed the spin lock byte within | ||
74 | * the word, use the low byte so signedness is easily retained | ||
75 | * via a quick arithmetic shift. It looks like this: | ||
76 | * | ||
77 | * ---------------------------------------- | ||
78 | * | signed 24-bit counter value | lock | atomic_t | ||
79 | * ---------------------------------------- | ||
80 | * 31 8 7 0 | ||
81 | */ | ||
82 | |||
83 | #define ATOMIC24_INIT(i) { ((i) << 8) } | ||
84 | |||
85 | static inline int atomic24_read(const atomic24_t *v) | ||
86 | { | ||
87 | int ret = v->counter; | ||
88 | |||
89 | while(ret & 0xff) | ||
90 | ret = v->counter; | ||
91 | |||
92 | return ret >> 8; | ||
93 | } | ||
94 | |||
95 | #define atomic24_set(v, i) (((v)->counter) = ((i) << 8)) | ||
96 | #endif | ||
97 | |||
98 | static inline int __atomic24_add(int i, atomic24_t *v) | ||
99 | { | ||
100 | register volatile int *ptr asm("g1"); | ||
101 | register int increment asm("g2"); | ||
102 | register int tmp1 asm("g3"); | ||
103 | register int tmp2 asm("g4"); | ||
104 | register int tmp3 asm("g7"); | ||
105 | |||
106 | ptr = &v->counter; | ||
107 | increment = i; | ||
108 | |||
109 | __asm__ __volatile__( | ||
110 | "mov %%o7, %%g4\n\t" | ||
111 | "call ___atomic24_add\n\t" | ||
112 | " add %%o7, 8, %%o7\n" | ||
113 | : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) | ||
114 | : "0" (increment), "r" (ptr) | ||
115 | : "memory", "cc"); | ||
116 | |||
117 | return increment; | ||
118 | } | ||
119 | |||
120 | static inline int __atomic24_sub(int i, atomic24_t *v) | ||
121 | { | ||
122 | register volatile int *ptr asm("g1"); | ||
123 | register int increment asm("g2"); | ||
124 | register int tmp1 asm("g3"); | ||
125 | register int tmp2 asm("g4"); | ||
126 | register int tmp3 asm("g7"); | ||
127 | |||
128 | ptr = &v->counter; | ||
129 | increment = i; | ||
130 | |||
131 | __asm__ __volatile__( | ||
132 | "mov %%o7, %%g4\n\t" | ||
133 | "call ___atomic24_sub\n\t" | ||
134 | " add %%o7, 8, %%o7\n" | ||
135 | : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) | ||
136 | : "0" (increment), "r" (ptr) | ||
137 | : "memory", "cc"); | ||
138 | |||
139 | return increment; | ||
140 | } | ||
141 | |||
142 | #define atomic24_add(i, v) ((void)__atomic24_add((i), (v))) | ||
143 | #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v))) | ||
144 | |||
145 | #define atomic24_dec_return(v) __atomic24_sub(1, (v)) | ||
146 | #define atomic24_inc_return(v) __atomic24_add(1, (v)) | ||
147 | |||
148 | #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0) | ||
149 | #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0) | ||
150 | |||
151 | #define atomic24_inc(v) ((void)__atomic24_add(1, (v))) | ||
152 | #define atomic24_dec(v) ((void)__atomic24_sub(1, (v))) | ||
153 | |||
154 | #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0) | ||
155 | |||
156 | /* Atomic operations are already serializing */ | ||
157 | #define smp_mb__before_atomic_dec() barrier() | ||
158 | #define smp_mb__after_atomic_dec() barrier() | ||
159 | #define smp_mb__before_atomic_inc() barrier() | ||
160 | #define smp_mb__after_atomic_inc() barrier() | ||
161 | |||
162 | #endif /* !(__KERNEL__) */ | ||
163 | |||
164 | #include <asm-generic/atomic.h> | ||
165 | #endif /* !(__ARCH_SPARC_ATOMIC__) */ | ||