diff options
Diffstat (limited to 'arch/parisc/include/asm/atomic.h')
-rw-r--r-- | arch/parisc/include/asm/atomic.h | 348 |
1 files changed, 348 insertions, 0 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h new file mode 100644 index 000000000000..57fcc4a5ebb4 --- /dev/null +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -0,0 +1,348 @@ | |||
1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | ||
2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | ||
3 | */ | ||
4 | |||
5 | #ifndef _ASM_PARISC_ATOMIC_H_ | ||
6 | #define _ASM_PARISC_ATOMIC_H_ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <asm/system.h> | ||
10 | |||
11 | /* | ||
12 | * Atomic operations that C can't guarantee us. Useful for | ||
13 | * resource counting etc.. | ||
14 | * | ||
15 | * And probably incredibly slow on parisc. OTOH, we don't | ||
16 | * have to write any serious assembly. prumpf | ||
17 | */ | ||
18 | |||
19 | #ifdef CONFIG_SMP | ||
20 | #include <asm/spinlock.h> | ||
21 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | ||
22 | |||
23 | /* Use an array of spinlocks for our atomic_ts. | ||
24 | * Hash function to index into a different SPINLOCK. | ||
25 | * Since "a" is usually an address, use one spinlock per cacheline. | ||
26 | */ | ||
27 | # define ATOMIC_HASH_SIZE 4 | ||
28 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | ||
29 | |||
30 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | ||
31 | |||
32 | /* Can't use raw_spin_lock_irq because of #include problems, so | ||
33 | * this is the substitute */ | ||
34 | #define _atomic_spin_lock_irqsave(l,f) do { \ | ||
35 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
36 | local_irq_save(f); \ | ||
37 | __raw_spin_lock(s); \ | ||
38 | } while(0) | ||
39 | |||
40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | ||
41 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
42 | __raw_spin_unlock(s); \ | ||
43 | local_irq_restore(f); \ | ||
44 | } while(0) | ||
45 | |||
46 | |||
47 | #else | ||
48 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | ||
49 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | ||
50 | #endif | ||
51 | |||
52 | /* This should get optimized out since it's never called. | ||
53 | ** Or get a link error if xchg is used "wrong". | ||
54 | */ | ||
55 | extern void __xchg_called_with_bad_pointer(void); | ||
56 | |||
57 | |||
58 | /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ | ||
59 | extern unsigned long __xchg8(char, char *); | ||
60 | extern unsigned long __xchg32(int, int *); | ||
61 | #ifdef CONFIG_64BIT | ||
62 | extern unsigned long __xchg64(unsigned long, unsigned long *); | ||
63 | #endif | ||
64 | |||
65 | /* optimizer better get rid of switch since size is a constant */ | ||
66 | static __inline__ unsigned long | ||
67 | __xchg(unsigned long x, __volatile__ void * ptr, int size) | ||
68 | { | ||
69 | switch(size) { | ||
70 | #ifdef CONFIG_64BIT | ||
71 | case 8: return __xchg64(x,(unsigned long *) ptr); | ||
72 | #endif | ||
73 | case 4: return __xchg32((int) x, (int *) ptr); | ||
74 | case 1: return __xchg8((char) x, (char *) ptr); | ||
75 | } | ||
76 | __xchg_called_with_bad_pointer(); | ||
77 | return x; | ||
78 | } | ||
79 | |||
80 | |||
81 | /* | ||
82 | ** REVISIT - Abandoned use of LDCW in xchg() for now: | ||
83 | ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes | ||
84 | ** o and while we are at it, could CONFIG_64BIT code use LDCD too? | ||
85 | ** | ||
86 | ** if (__builtin_constant_p(x) && (x == NULL)) | ||
87 | ** if (((unsigned long)p & 0xf) == 0) | ||
88 | ** return __ldcw(p); | ||
89 | */ | ||
90 | #define xchg(ptr,x) \ | ||
91 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
92 | |||
93 | |||
94 | #define __HAVE_ARCH_CMPXCHG 1 | ||
95 | |||
96 | /* bug catcher for when unsupported size is used - won't link */ | ||
97 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
98 | |||
99 | /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ | ||
100 | extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); | ||
101 | extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); | ||
102 | |||
103 | /* don't worry...optimizer will get rid of most of this */ | ||
104 | static __inline__ unsigned long | ||
105 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | ||
106 | { | ||
107 | switch(size) { | ||
108 | #ifdef CONFIG_64BIT | ||
109 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); | ||
110 | #endif | ||
111 | case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); | ||
112 | } | ||
113 | __cmpxchg_called_with_bad_pointer(); | ||
114 | return old; | ||
115 | } | ||
116 | |||
117 | #define cmpxchg(ptr,o,n) \ | ||
118 | ({ \ | ||
119 | __typeof__(*(ptr)) _o_ = (o); \ | ||
120 | __typeof__(*(ptr)) _n_ = (n); \ | ||
121 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
122 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
123 | }) | ||
124 | |||
125 | #include <asm-generic/cmpxchg-local.h> | ||
126 | |||
127 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
128 | unsigned long old, | ||
129 | unsigned long new_, int size) | ||
130 | { | ||
131 | switch (size) { | ||
132 | #ifdef CONFIG_64BIT | ||
133 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); | ||
134 | #endif | ||
135 | case 4: return __cmpxchg_u32(ptr, old, new_); | ||
136 | default: | ||
137 | return __cmpxchg_local_generic(ptr, old, new_, size); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
143 | * them available. | ||
144 | */ | ||
145 | #define cmpxchg_local(ptr, o, n) \ | ||
146 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
147 | (unsigned long)(n), sizeof(*(ptr)))) | ||
148 | #ifdef CONFIG_64BIT | ||
149 | #define cmpxchg64_local(ptr, o, n) \ | ||
150 | ({ \ | ||
151 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
152 | cmpxchg_local((ptr), (o), (n)); \ | ||
153 | }) | ||
154 | #else | ||
155 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
156 | #endif | ||
157 | |||
158 | /* Note that we need not lock read accesses - aligned word writes/reads | ||
159 | * are atomic, so a reader never sees unconsistent values. | ||
160 | * | ||
161 | * Cache-line alignment would conflict with, for example, linux/module.h | ||
162 | */ | ||
163 | |||
164 | typedef struct { volatile int counter; } atomic_t; | ||
165 | |||
166 | /* It's possible to reduce all atomic operations to either | ||
167 | * __atomic_add_return, atomic_set and atomic_read (the latter | ||
168 | * is there only for consistency). | ||
169 | */ | ||
170 | |||
171 | static __inline__ int __atomic_add_return(int i, atomic_t *v) | ||
172 | { | ||
173 | int ret; | ||
174 | unsigned long flags; | ||
175 | _atomic_spin_lock_irqsave(v, flags); | ||
176 | |||
177 | ret = (v->counter += i); | ||
178 | |||
179 | _atomic_spin_unlock_irqrestore(v, flags); | ||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | static __inline__ void atomic_set(atomic_t *v, int i) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | _atomic_spin_lock_irqsave(v, flags); | ||
187 | |||
188 | v->counter = i; | ||
189 | |||
190 | _atomic_spin_unlock_irqrestore(v, flags); | ||
191 | } | ||
192 | |||
193 | static __inline__ int atomic_read(const atomic_t *v) | ||
194 | { | ||
195 | return v->counter; | ||
196 | } | ||
197 | |||
198 | /* exported interface */ | ||
199 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | ||
200 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
201 | |||
202 | /** | ||
203 | * atomic_add_unless - add unless the number is a given value | ||
204 | * @v: pointer of type atomic_t | ||
205 | * @a: the amount to add to v... | ||
206 | * @u: ...unless v is equal to u. | ||
207 | * | ||
208 | * Atomically adds @a to @v, so long as it was not @u. | ||
209 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
210 | */ | ||
211 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | ||
212 | { | ||
213 | int c, old; | ||
214 | c = atomic_read(v); | ||
215 | for (;;) { | ||
216 | if (unlikely(c == (u))) | ||
217 | break; | ||
218 | old = atomic_cmpxchg((v), c, c + (a)); | ||
219 | if (likely(old == c)) | ||
220 | break; | ||
221 | c = old; | ||
222 | } | ||
223 | return c != (u); | ||
224 | } | ||
225 | |||
226 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
227 | |||
228 | #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) | ||
229 | #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) | ||
230 | #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) | ||
231 | #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) | ||
232 | |||
233 | #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) | ||
234 | #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) | ||
235 | #define atomic_inc_return(v) (__atomic_add_return( 1,(v))) | ||
236 | #define atomic_dec_return(v) (__atomic_add_return( -1,(v))) | ||
237 | |||
238 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
239 | |||
240 | /* | ||
241 | * atomic_inc_and_test - increment and test | ||
242 | * @v: pointer of type atomic_t | ||
243 | * | ||
244 | * Atomically increments @v by 1 | ||
245 | * and returns true if the result is zero, or false for all | ||
246 | * other cases. | ||
247 | */ | ||
248 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
249 | |||
250 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
251 | |||
252 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) | ||
253 | |||
254 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | ||
255 | |||
256 | #define smp_mb__before_atomic_dec() smp_mb() | ||
257 | #define smp_mb__after_atomic_dec() smp_mb() | ||
258 | #define smp_mb__before_atomic_inc() smp_mb() | ||
259 | #define smp_mb__after_atomic_inc() smp_mb() | ||
260 | |||
261 | #ifdef CONFIG_64BIT | ||
262 | |||
263 | typedef struct { volatile s64 counter; } atomic64_t; | ||
264 | |||
265 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | ||
266 | |||
267 | static __inline__ int | ||
268 | __atomic64_add_return(s64 i, atomic64_t *v) | ||
269 | { | ||
270 | int ret; | ||
271 | unsigned long flags; | ||
272 | _atomic_spin_lock_irqsave(v, flags); | ||
273 | |||
274 | ret = (v->counter += i); | ||
275 | |||
276 | _atomic_spin_unlock_irqrestore(v, flags); | ||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | static __inline__ void | ||
281 | atomic64_set(atomic64_t *v, s64 i) | ||
282 | { | ||
283 | unsigned long flags; | ||
284 | _atomic_spin_lock_irqsave(v, flags); | ||
285 | |||
286 | v->counter = i; | ||
287 | |||
288 | _atomic_spin_unlock_irqrestore(v, flags); | ||
289 | } | ||
290 | |||
291 | static __inline__ s64 | ||
292 | atomic64_read(const atomic64_t *v) | ||
293 | { | ||
294 | return v->counter; | ||
295 | } | ||
296 | |||
297 | #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) | ||
298 | #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) | ||
299 | #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) | ||
300 | #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) | ||
301 | |||
302 | #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) | ||
303 | #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) | ||
304 | #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) | ||
305 | #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) | ||
306 | |||
307 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
308 | |||
309 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
310 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | ||
311 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) | ||
312 | |||
313 | /* exported interface */ | ||
314 | #define atomic64_cmpxchg(v, o, n) \ | ||
315 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | ||
316 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | ||
317 | |||
318 | /** | ||
319 | * atomic64_add_unless - add unless the number is a given value | ||
320 | * @v: pointer of type atomic64_t | ||
321 | * @a: the amount to add to v... | ||
322 | * @u: ...unless v is equal to u. | ||
323 | * | ||
324 | * Atomically adds @a to @v, so long as it was not @u. | ||
325 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
326 | */ | ||
327 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
328 | { | ||
329 | long c, old; | ||
330 | c = atomic64_read(v); | ||
331 | for (;;) { | ||
332 | if (unlikely(c == (u))) | ||
333 | break; | ||
334 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
335 | if (likely(old == c)) | ||
336 | break; | ||
337 | c = old; | ||
338 | } | ||
339 | return c != (u); | ||
340 | } | ||
341 | |||
342 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
343 | |||
344 | #endif /* CONFIG_64BIT */ | ||
345 | |||
346 | #include <asm-generic/atomic.h> | ||
347 | |||
348 | #endif /* _ASM_PARISC_ATOMIC_H_ */ | ||