diff options
Diffstat (limited to 'include/asm-alpha/atomic.h')
-rw-r--r-- | include/asm-alpha/atomic.h | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 1b383e3cb68c..6183eab006d4 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ALPHA_ATOMIC_H | 1 | #ifndef _ALPHA_ATOMIC_H |
2 | #define _ALPHA_ATOMIC_H | 2 | #define _ALPHA_ATOMIC_H |
3 | 3 | ||
4 | #include <asm/barrier.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Atomic operations that C can't guarantee us. Useful for | 7 | * Atomic operations that C can't guarantee us. Useful for |
6 | * resource counting etc... | 8 | * resource counting etc... |
@@ -100,77 +102,94 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
100 | static __inline__ long atomic_add_return(int i, atomic_t * v) | 102 | static __inline__ long atomic_add_return(int i, atomic_t * v) |
101 | { | 103 | { |
102 | long temp, result; | 104 | long temp, result; |
105 | smp_mb(); | ||
103 | __asm__ __volatile__( | 106 | __asm__ __volatile__( |
104 | "1: ldl_l %0,%1\n" | 107 | "1: ldl_l %0,%1\n" |
105 | " addl %0,%3,%2\n" | 108 | " addl %0,%3,%2\n" |
106 | " addl %0,%3,%0\n" | 109 | " addl %0,%3,%0\n" |
107 | " stl_c %0,%1\n" | 110 | " stl_c %0,%1\n" |
108 | " beq %0,2f\n" | 111 | " beq %0,2f\n" |
109 | " mb\n" | ||
110 | ".subsection 2\n" | 112 | ".subsection 2\n" |
111 | "2: br 1b\n" | 113 | "2: br 1b\n" |
112 | ".previous" | 114 | ".previous" |
113 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | 115 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
114 | :"Ir" (i), "m" (v->counter) : "memory"); | 116 | :"Ir" (i), "m" (v->counter) : "memory"); |
117 | smp_mb(); | ||
115 | return result; | 118 | return result; |
116 | } | 119 | } |
117 | 120 | ||
118 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
119 | |||
120 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) | 121 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) |
121 | { | 122 | { |
122 | long temp, result; | 123 | long temp, result; |
124 | smp_mb(); | ||
123 | __asm__ __volatile__( | 125 | __asm__ __volatile__( |
124 | "1: ldq_l %0,%1\n" | 126 | "1: ldq_l %0,%1\n" |
125 | " addq %0,%3,%2\n" | 127 | " addq %0,%3,%2\n" |
126 | " addq %0,%3,%0\n" | 128 | " addq %0,%3,%0\n" |
127 | " stq_c %0,%1\n" | 129 | " stq_c %0,%1\n" |
128 | " beq %0,2f\n" | 130 | " beq %0,2f\n" |
129 | " mb\n" | ||
130 | ".subsection 2\n" | 131 | ".subsection 2\n" |
131 | "2: br 1b\n" | 132 | "2: br 1b\n" |
132 | ".previous" | 133 | ".previous" |
133 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | 134 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
134 | :"Ir" (i), "m" (v->counter) : "memory"); | 135 | :"Ir" (i), "m" (v->counter) : "memory"); |
136 | smp_mb(); | ||
135 | return result; | 137 | return result; |
136 | } | 138 | } |
137 | 139 | ||
138 | static __inline__ long atomic_sub_return(int i, atomic_t * v) | 140 | static __inline__ long atomic_sub_return(int i, atomic_t * v) |
139 | { | 141 | { |
140 | long temp, result; | 142 | long temp, result; |
143 | smp_mb(); | ||
141 | __asm__ __volatile__( | 144 | __asm__ __volatile__( |
142 | "1: ldl_l %0,%1\n" | 145 | "1: ldl_l %0,%1\n" |
143 | " subl %0,%3,%2\n" | 146 | " subl %0,%3,%2\n" |
144 | " subl %0,%3,%0\n" | 147 | " subl %0,%3,%0\n" |
145 | " stl_c %0,%1\n" | 148 | " stl_c %0,%1\n" |
146 | " beq %0,2f\n" | 149 | " beq %0,2f\n" |
147 | " mb\n" | ||
148 | ".subsection 2\n" | 150 | ".subsection 2\n" |
149 | "2: br 1b\n" | 151 | "2: br 1b\n" |
150 | ".previous" | 152 | ".previous" |
151 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | 153 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
152 | :"Ir" (i), "m" (v->counter) : "memory"); | 154 | :"Ir" (i), "m" (v->counter) : "memory"); |
155 | smp_mb(); | ||
153 | return result; | 156 | return result; |
154 | } | 157 | } |
155 | 158 | ||
156 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | 159 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) |
157 | { | 160 | { |
158 | long temp, result; | 161 | long temp, result; |
162 | smp_mb(); | ||
159 | __asm__ __volatile__( | 163 | __asm__ __volatile__( |
160 | "1: ldq_l %0,%1\n" | 164 | "1: ldq_l %0,%1\n" |
161 | " subq %0,%3,%2\n" | 165 | " subq %0,%3,%2\n" |
162 | " subq %0,%3,%0\n" | 166 | " subq %0,%3,%0\n" |
163 | " stq_c %0,%1\n" | 167 | " stq_c %0,%1\n" |
164 | " beq %0,2f\n" | 168 | " beq %0,2f\n" |
165 | " mb\n" | ||
166 | ".subsection 2\n" | 169 | ".subsection 2\n" |
167 | "2: br 1b\n" | 170 | "2: br 1b\n" |
168 | ".previous" | 171 | ".previous" |
169 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | 172 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
170 | :"Ir" (i), "m" (v->counter) : "memory"); | 173 | :"Ir" (i), "m" (v->counter) : "memory"); |
174 | smp_mb(); | ||
171 | return result; | 175 | return result; |
172 | } | 176 | } |
173 | 177 | ||
178 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||
179 | |||
180 | #define atomic_add_unless(v, a, u) \ | ||
181 | ({ \ | ||
182 | int c, old; \ | ||
183 | c = atomic_read(v); \ | ||
184 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
185 | c = old; \ | ||
186 | c != (u); \ | ||
187 | }) | ||
188 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
189 | |||
190 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
191 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
192 | |||
174 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) | 193 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
175 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) | 194 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) |
176 | 195 | ||
@@ -181,6 +200,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
181 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | 200 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) |
182 | 201 | ||
183 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | 202 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) |
203 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) | ||
204 | |||
184 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | 205 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
185 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | 206 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) |
186 | 207 | ||