diff options
-rw-r--r-- | include/asm-parisc/atomic.h | 84 |
1 files changed, 69 insertions, 15 deletions
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 2ca56d34aaad..4dc7253ff5d0 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h | |||
@@ -1,9 +1,13 @@ | |||
1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | ||
2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | ||
3 | */ | ||
4 | |||
1 | #ifndef _ASM_PARISC_ATOMIC_H_ | 5 | #ifndef _ASM_PARISC_ATOMIC_H_ |
2 | #define _ASM_PARISC_ATOMIC_H_ | 6 | #define _ASM_PARISC_ATOMIC_H_ |
3 | 7 | ||
4 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <linux/types.h> | ||
5 | #include <asm/system.h> | 10 | #include <asm/system.h> |
6 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */ | ||
7 | 11 | ||
8 | /* | 12 | /* |
9 | * Atomic operations that C can't guarantee us. Useful for | 13 | * Atomic operations that C can't guarantee us. Useful for |
@@ -46,15 +50,6 @@ extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | |||
46 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | 50 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) |
47 | #endif | 51 | #endif |
48 | 52 | ||
49 | /* Note that we need not lock read accesses - aligned word writes/reads | ||
50 | * are atomic, so a reader never sees unconsistent values. | ||
51 | * | ||
52 | * Cache-line alignment would conflict with, for example, linux/module.h | ||
53 | */ | ||
54 | |||
55 | typedef struct { volatile int counter; } atomic_t; | ||
56 | |||
57 | |||
58 | /* This should get optimized out since it's never called. | 53 | /* This should get optimized out since it's never called. |
59 | ** Or get a link error if xchg is used "wrong". | 54 | ** Or get a link error if xchg is used "wrong". |
60 | */ | 55 | */ |
@@ -69,10 +64,9 @@ extern unsigned long __xchg64(unsigned long, unsigned long *); | |||
69 | #endif | 64 | #endif |
70 | 65 | ||
71 | /* optimizer better get rid of switch since size is a constant */ | 66 | /* optimizer better get rid of switch since size is a constant */ |
72 | static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | 67 | static __inline__ unsigned long |
73 | int size) | 68 | __xchg(unsigned long x, __volatile__ void * ptr, int size) |
74 | { | 69 | { |
75 | |||
76 | switch(size) { | 70 | switch(size) { |
77 | #ifdef __LP64__ | 71 | #ifdef __LP64__ |
78 | case 8: return __xchg64(x,(unsigned long *) ptr); | 72 | case 8: return __xchg64(x,(unsigned long *) ptr); |
@@ -129,7 +123,13 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | |||
129 | (unsigned long)_n_, sizeof(*(ptr))); \ | 123 | (unsigned long)_n_, sizeof(*(ptr))); \ |
130 | }) | 124 | }) |
131 | 125 | ||
126 | /* Note that we need not lock read accesses - aligned word writes/reads | ||
127 | * are atomic, so a reader never sees unconsistent values. | ||
128 | * | ||
129 | * Cache-line alignment would conflict with, for example, linux/module.h | ||
130 | */ | ||
132 | 131 | ||
132 | typedef struct { volatile int counter; } atomic_t; | ||
133 | 133 | ||
134 | /* It's possible to reduce all atomic operations to either | 134 | /* It's possible to reduce all atomic operations to either |
135 | * __atomic_add_return, atomic_set and atomic_read (the latter | 135 | * __atomic_add_return, atomic_set and atomic_read (the latter |
@@ -210,12 +210,66 @@ static __inline__ int atomic_read(const atomic_t *v) | |||
210 | 210 | ||
211 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | 211 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) |
212 | 212 | ||
213 | #define ATOMIC_INIT(i) { (i) } | 213 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) |
214 | 214 | ||
215 | #define smp_mb__before_atomic_dec() smp_mb() | 215 | #define smp_mb__before_atomic_dec() smp_mb() |
216 | #define smp_mb__after_atomic_dec() smp_mb() | 216 | #define smp_mb__after_atomic_dec() smp_mb() |
217 | #define smp_mb__before_atomic_inc() smp_mb() | 217 | #define smp_mb__before_atomic_inc() smp_mb() |
218 | #define smp_mb__after_atomic_inc() smp_mb() | 218 | #define smp_mb__after_atomic_inc() smp_mb() |
219 | 219 | ||
220 | #ifdef __LP64__ | ||
221 | |||
222 | typedef struct { volatile s64 counter; } atomic64_t; | ||
223 | |||
224 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | ||
225 | |||
226 | static __inline__ int | ||
227 | __atomic64_add_return(s64 i, atomic64_t *v) | ||
228 | { | ||
229 | int ret; | ||
230 | unsigned long flags; | ||
231 | _atomic_spin_lock_irqsave(v, flags); | ||
232 | |||
233 | ret = (v->counter += i); | ||
234 | |||
235 | _atomic_spin_unlock_irqrestore(v, flags); | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | static __inline__ void | ||
240 | atomic64_set(atomic64_t *v, s64 i) | ||
241 | { | ||
242 | unsigned long flags; | ||
243 | _atomic_spin_lock_irqsave(v, flags); | ||
244 | |||
245 | v->counter = i; | ||
246 | |||
247 | _atomic_spin_unlock_irqrestore(v, flags); | ||
248 | } | ||
249 | |||
250 | static __inline__ s64 | ||
251 | atomic64_read(const atomic64_t *v) | ||
252 | { | ||
253 | return v->counter; | ||
254 | } | ||
255 | |||
256 | #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) | ||
257 | #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) | ||
258 | #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) | ||
259 | #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) | ||
260 | |||
261 | #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) | ||
262 | #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) | ||
263 | #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) | ||
264 | #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) | ||
265 | |||
266 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
267 | |||
268 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
269 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | ||
270 | |||
271 | #endif /* __LP64__ */ | ||
272 | |||
220 | #include <asm-generic/atomic.h> | 273 | #include <asm-generic/atomic.h> |
221 | #endif | 274 | |
275 | #endif /* _ASM_PARISC_ATOMIC_H_ */ | ||