aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/include/asm/atomic.h')
-rw-r--r--arch/blackfin/include/asm/atomic.h144
1 files changed, 144 insertions, 0 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
new file mode 100644
index 000000000000..7cf508718605
--- /dev/null
+++ b/arch/blackfin/include/asm/atomic.h
@@ -0,0 +1,144 @@
1#ifndef __ARCH_BLACKFIN_ATOMIC__
2#define __ARCH_BLACKFIN_ATOMIC__
3
4#include <asm/system.h> /* local_irq_XXX() */
5
6/*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc..
9 *
10 * Generally we do not concern about SMP BFIN systems, so we don't have
11 * to deal with that.
12 *
13 * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
14 */
15
16typedef struct {
17 int counter;
18} atomic_t;
19#define ATOMIC_INIT(i) { (i) }
20
21#define atomic_read(v) ((v)->counter)
22#define atomic_set(v, i) (((v)->counter) = i)
23
24static __inline__ void atomic_add(int i, atomic_t * v)
25{
26 long flags;
27
28 local_irq_save(flags);
29 v->counter += i;
30 local_irq_restore(flags);
31}
32
33static __inline__ void atomic_sub(int i, atomic_t * v)
34{
35 long flags;
36
37 local_irq_save(flags);
38 v->counter -= i;
39 local_irq_restore(flags);
40
41}
42
43static inline int atomic_add_return(int i, atomic_t * v)
44{
45 int __temp = 0;
46 long flags;
47
48 local_irq_save(flags);
49 v->counter += i;
50 __temp = v->counter;
51 local_irq_restore(flags);
52
53
54 return __temp;
55}
56
57#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
58static inline int atomic_sub_return(int i, atomic_t * v)
59{
60 int __temp = 0;
61 long flags;
62
63 local_irq_save(flags);
64 v->counter -= i;
65 __temp = v->counter;
66 local_irq_restore(flags);
67
68 return __temp;
69}
70
71static __inline__ void atomic_inc(volatile atomic_t * v)
72{
73 long flags;
74
75 local_irq_save(flags);
76 v->counter++;
77 local_irq_restore(flags);
78}
79
80#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
81#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
82
83#define atomic_add_unless(v, a, u) \
84({ \
85 int c, old; \
86 c = atomic_read(v); \
87 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
88 c = old; \
89 c != (u); \
90})
91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
92
93static __inline__ void atomic_dec(volatile atomic_t * v)
94{
95 long flags;
96
97 local_irq_save(flags);
98 v->counter--;
99 local_irq_restore(flags);
100}
101
102static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
103{
104 long flags;
105
106 local_irq_save(flags);
107 v->counter &= ~mask;
108 local_irq_restore(flags);
109}
110
111static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
112{
113 long flags;
114
115 local_irq_save(flags);
116 v->counter |= mask;
117 local_irq_restore(flags);
118}
119
120/* Atomic operations are already serializing */
121#define smp_mb__before_atomic_dec() barrier()
122#define smp_mb__after_atomic_dec() barrier()
123#define smp_mb__before_atomic_inc() barrier()
124#define smp_mb__after_atomic_inc() barrier()
125
126#define atomic_dec_return(v) atomic_sub_return(1,(v))
127#define atomic_inc_return(v) atomic_add_return(1,(v))
128
129/*
130 * atomic_inc_and_test - increment and test
131 * @v: pointer of type atomic_t
132 *
133 * Atomically increments @v by 1
134 * and returns true if the result is zero, or false for all
135 * other cases.
136 */
137#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
138
139#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
140#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
141
142#include <asm-generic/atomic.h>
143
144#endif /* __ARCH_BLACKFIN_ATOMIC __ */