aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-cris/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-cris/atomic.h')
-rw-r--r--include/asm-cris/atomic.h150
1 files changed, 150 insertions, 0 deletions
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
new file mode 100644
index 000000000000..b3dfea5a71e4
--- /dev/null
+++ b/include/asm-cris/atomic.h
@@ -0,0 +1,150 @@
1/* $Id: atomic.h,v 1.3 2001/07/25 16:15:19 bjornw Exp $ */
2
3#ifndef __ASM_CRIS_ATOMIC__
4#define __ASM_CRIS_ATOMIC__
5
6#include <asm/system.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13/*
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18
19#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
20
21typedef struct { int counter; } atomic_t;
22
23#define ATOMIC_INIT(i) { (i) }
24
25#define atomic_read(v) ((v)->counter)
26#define atomic_set(v,i) (((v)->counter) = (i))
27
28/* These should be written in asm but we do it in C for now. */
29
30extern __inline__ void atomic_add(int i, volatile atomic_t *v)
31{
32 unsigned long flags;
33 local_save_flags(flags);
34 local_irq_disable();
35 v->counter += i;
36 local_irq_restore(flags);
37}
38
39extern __inline__ void atomic_sub(int i, volatile atomic_t *v)
40{
41 unsigned long flags;
42 local_save_flags(flags);
43 local_irq_disable();
44 v->counter -= i;
45 local_irq_restore(flags);
46}
47
48extern __inline__ int atomic_add_return(int i, volatile atomic_t *v)
49{
50 unsigned long flags;
51 int retval;
52 local_save_flags(flags);
53 local_irq_disable();
54 retval = (v->counter += i);
55 local_irq_restore(flags);
56 return retval;
57}
58
59#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
60
61extern __inline__ int atomic_sub_return(int i, volatile atomic_t *v)
62{
63 unsigned long flags;
64 int retval;
65 local_save_flags(flags);
66 local_irq_disable();
67 retval = (v->counter -= i);
68 local_irq_restore(flags);
69 return retval;
70}
71
72extern __inline__ int atomic_sub_and_test(int i, volatile atomic_t *v)
73{
74 int retval;
75 unsigned long flags;
76 local_save_flags(flags);
77 local_irq_disable();
78 retval = (v->counter -= i) == 0;
79 local_irq_restore(flags);
80 return retval;
81}
82
83extern __inline__ void atomic_inc(volatile atomic_t *v)
84{
85 unsigned long flags;
86 local_save_flags(flags);
87 local_irq_disable();
88 (v->counter)++;
89 local_irq_restore(flags);
90}
91
92extern __inline__ void atomic_dec(volatile atomic_t *v)
93{
94 unsigned long flags;
95 local_save_flags(flags);
96 local_irq_disable();
97 (v->counter)--;
98 local_irq_restore(flags);
99}
100
101extern __inline__ int atomic_inc_return(volatile atomic_t *v)
102{
103 unsigned long flags;
104 int retval;
105 local_save_flags(flags);
106 local_irq_disable();
107 retval = (v->counter)++;
108 local_irq_restore(flags);
109 return retval;
110}
111
112extern __inline__ int atomic_dec_return(volatile atomic_t *v)
113{
114 unsigned long flags;
115 int retval;
116 local_save_flags(flags);
117 local_irq_disable();
118 retval = (v->counter)--;
119 local_irq_restore(flags);
120 return retval;
121}
122extern __inline__ int atomic_dec_and_test(volatile atomic_t *v)
123{
124 int retval;
125 unsigned long flags;
126 local_save_flags(flags);
127 local_irq_disable();
128 retval = --(v->counter) == 0;
129 local_irq_restore(flags);
130 return retval;
131}
132
133extern __inline__ int atomic_inc_and_test(volatile atomic_t *v)
134{
135 int retval;
136 unsigned long flags;
137 local_save_flags(flags);
138 local_irq_disable();
139 retval = ++(v->counter) == 0;
140 local_irq_restore(flags);
141 return retval;
142}
143
144/* Atomic operations are already serializing */
145#define smp_mb__before_atomic_dec() barrier()
146#define smp_mb__after_atomic_dec() barrier()
147#define smp_mb__before_atomic_inc() barrier()
148#define smp_mb__after_atomic_inc() barrier()
149
150#endif