diff options
Diffstat (limited to 'arch/s390/include/asm/atomic.h')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 205 |
1 files changed, 146 insertions, 59 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c7d0abfb0f00..ae7c8f9f94a5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -1,33 +1,23 @@ | |||
1 | #ifndef __ARCH_S390_ATOMIC__ | 1 | #ifndef __ARCH_S390_ATOMIC__ |
2 | #define __ARCH_S390_ATOMIC__ | 2 | #define __ARCH_S390_ATOMIC__ |
3 | 3 | ||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | /* | 4 | /* |
8 | * include/asm-s390/atomic.h | 5 | * Copyright 1999,2009 IBM Corp. |
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
7 | * Denis Joseph Barrow, | ||
8 | * Arnd Bergmann <arndb@de.ibm.com>, | ||
9 | * | 9 | * |
10 | * S390 version | 10 | * Atomic operations that C can't guarantee us. |
11 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 11 | * Useful for resource counting etc. |
12 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 12 | * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. |
13 | * Denis Joseph Barrow, | ||
14 | * Arnd Bergmann (arndb@de.ibm.com) | ||
15 | * | ||
16 | * Derived from "include/asm-i386/bitops.h" | ||
17 | * Copyright (C) 1992, Linus Torvalds | ||
18 | * | 13 | * |
19 | */ | 14 | */ |
20 | 15 | ||
21 | /* | 16 | #include <linux/compiler.h> |
22 | * Atomic operations that C can't guarantee us. Useful for | 17 | #include <linux/types.h> |
23 | * resource counting etc.. | ||
24 | * S390 uses 'Compare And Swap' for atomicity in SMP enviroment | ||
25 | */ | ||
26 | 18 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
28 | 20 | ||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
32 | 22 | ||
33 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
77 | barrier(); | 67 | barrier(); |
78 | } | 68 | } |
79 | 69 | ||
80 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 70 | static inline int atomic_add_return(int i, atomic_t *v) |
81 | { | 71 | { |
82 | return __CS_LOOP(v, i, "ar"); | 72 | return __CS_LOOP(v, i, "ar"); |
83 | } | 73 | } |
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
87 | #define atomic_inc_return(_v) atomic_add_return(1, _v) | 77 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
88 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | 78 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
89 | 79 | ||
90 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 80 | static inline int atomic_sub_return(int i, atomic_t *v) |
91 | { | 81 | { |
92 | return __CS_LOOP(v, i, "sr"); | 82 | return __CS_LOOP(v, i, "sr"); |
93 | } | 83 | } |
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
97 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 87 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
98 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 88 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
99 | 89 | ||
100 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 90 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
101 | { | 91 | { |
102 | __CS_LOOP(v, ~mask, "nr"); | 92 | __CS_LOOP(v, ~mask, "nr"); |
103 | } | 93 | } |
104 | 94 | ||
105 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 95 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
106 | { | 96 | { |
107 | __CS_LOOP(v, mask, "or"); | 97 | __CS_LOOP(v, mask, "or"); |
108 | } | 98 | } |
109 | 99 | ||
110 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 100 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
111 | 101 | ||
112 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | 102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
113 | { | 103 | { |
114 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
115 | asm volatile( | 105 | asm volatile( |
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
127 | return old; | 117 | return old; |
128 | } | 118 | } |
129 | 119 | ||
130 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 120 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
131 | { | 121 | { |
132 | int c, old; | 122 | int c, old; |
133 | c = atomic_read(v); | 123 | c = atomic_read(v); |
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
146 | 136 | ||
147 | #undef __CS_LOOP | 137 | #undef __CS_LOOP |
148 | 138 | ||
149 | #ifdef __s390x__ | ||
150 | #define ATOMIC64_INIT(i) { (i) } | 139 | #define ATOMIC64_INIT(i) { (i) } |
151 | 140 | ||
141 | #ifdef CONFIG_64BIT | ||
142 | |||
152 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 143 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
153 | 144 | ||
154 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 145 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
162 | : "=&d" (old_val), "=&d" (new_val), \ | 153 | : "=&d" (old_val), "=&d" (new_val), \ |
163 | "=Q" (((atomic_t *)(ptr))->counter) \ | 154 | "=Q" (((atomic_t *)(ptr))->counter) \ |
164 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 155 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
165 | : "cc", "memory" ); \ | 156 | : "cc", "memory"); \ |
166 | new_val; \ | 157 | new_val; \ |
167 | }) | 158 | }) |
168 | 159 | ||
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
180 | "=m" (((atomic_t *)(ptr))->counter) \ | 171 | "=m" (((atomic_t *)(ptr))->counter) \ |
181 | : "a" (ptr), "d" (op_val), \ | 172 | : "a" (ptr), "d" (op_val), \ |
182 | "m" (((atomic_t *)(ptr))->counter) \ | 173 | "m" (((atomic_t *)(ptr))->counter) \ |
183 | : "cc", "memory" ); \ | 174 | : "cc", "memory"); \ |
184 | new_val; \ | 175 | new_val; \ |
185 | }) | 176 | }) |
186 | 177 | ||
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
198 | barrier(); | 189 | barrier(); |
199 | } | 190 | } |
200 | 191 | ||
201 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 192 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
202 | { | 193 | { |
203 | return __CSG_LOOP(v, i, "agr"); | 194 | return __CSG_LOOP(v, i, "agr"); |
204 | } | 195 | } |
205 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) | ||
206 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | ||
207 | #define atomic64_inc(_v) atomic64_add_return(1, _v) | ||
208 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | ||
209 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) | ||
210 | 196 | ||
211 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) | 197 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
212 | { | 198 | { |
213 | return __CSG_LOOP(v, i, "sgr"); | 199 | return __CSG_LOOP(v, i, "sgr"); |
214 | } | 200 | } |
215 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
216 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
217 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
218 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
219 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
220 | 201 | ||
221 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 202 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
222 | { | 203 | { |
223 | __CSG_LOOP(v, ~mask, "ngr"); | 204 | __CSG_LOOP(v, ~mask, "ngr"); |
224 | } | 205 | } |
225 | 206 | ||
226 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 207 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
227 | { | 208 | { |
228 | __CSG_LOOP(v, mask, "ogr"); | 209 | __CSG_LOOP(v, mask, "ogr"); |
229 | } | 210 | } |
230 | 211 | ||
231 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 212 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
232 | 213 | ||
233 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | 214 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
234 | long long old, long long new) | 215 | long long old, long long new) |
235 | { | 216 | { |
236 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | |||
249 | return old; | 230 | return old; |
250 | } | 231 | } |
251 | 232 | ||
252 | static __inline__ int atomic64_add_unless(atomic64_t *v, | 233 | #undef __CSG_LOOP |
253 | long long a, long long u) | 234 | |
235 | #else /* CONFIG_64BIT */ | ||
236 | |||
237 | typedef struct { | ||
238 | long long counter; | ||
239 | } atomic64_t; | ||
240 | |||
241 | static inline long long atomic64_read(const atomic64_t *v) | ||
242 | { | ||
243 | register_pair rp; | ||
244 | |||
245 | asm volatile( | ||
246 | " lm %0,%N0,0(%1)" | ||
247 | : "=&d" (rp) | ||
248 | : "a" (&v->counter), "m" (v->counter) | ||
249 | ); | ||
250 | return rp.pair; | ||
251 | } | ||
252 | |||
253 | static inline void atomic64_set(atomic64_t *v, long long i) | ||
254 | { | ||
255 | register_pair rp = {.pair = i}; | ||
256 | |||
257 | asm volatile( | ||
258 | " stm %1,%N1,0(%2)" | ||
259 | : "=m" (v->counter) | ||
260 | : "d" (rp), "a" (&v->counter) | ||
261 | ); | ||
262 | } | ||
263 | |||
264 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | ||
265 | { | ||
266 | register_pair rp_new = {.pair = new}; | ||
267 | register_pair rp_old; | ||
268 | |||
269 | asm volatile( | ||
270 | " lm %0,%N0,0(%2)\n" | ||
271 | "0: cds %0,%3,0(%2)\n" | ||
272 | " jl 0b\n" | ||
273 | : "=&d" (rp_old), "+m" (v->counter) | ||
274 | : "a" (&v->counter), "d" (rp_new) | ||
275 | : "cc"); | ||
276 | return rp_old.pair; | ||
277 | } | ||
278 | |||
279 | static inline long long atomic64_cmpxchg(atomic64_t *v, | ||
280 | long long old, long long new) | ||
281 | { | ||
282 | register_pair rp_old = {.pair = old}; | ||
283 | register_pair rp_new = {.pair = new}; | ||
284 | |||
285 | asm volatile( | ||
286 | " cds %0,%3,0(%2)" | ||
287 | : "+&d" (rp_old), "+m" (v->counter) | ||
288 | : "a" (&v->counter), "d" (rp_new) | ||
289 | : "cc"); | ||
290 | return rp_old.pair; | ||
291 | } | ||
292 | |||
293 | |||
294 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | ||
295 | { | ||
296 | long long old, new; | ||
297 | |||
298 | do { | ||
299 | old = atomic64_read(v); | ||
300 | new = old + i; | ||
301 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
302 | return new; | ||
303 | } | ||
304 | |||
305 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) | ||
306 | { | ||
307 | long long old, new; | ||
308 | |||
309 | do { | ||
310 | old = atomic64_read(v); | ||
311 | new = old - i; | ||
312 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
313 | return new; | ||
314 | } | ||
315 | |||
316 | static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) | ||
317 | { | ||
318 | long long old, new; | ||
319 | |||
320 | do { | ||
321 | old = atomic64_read(v); | ||
322 | new = old | mask; | ||
323 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
324 | } | ||
325 | |||
326 | static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | ||
327 | { | ||
328 | long long old, new; | ||
329 | |||
330 | do { | ||
331 | old = atomic64_read(v); | ||
332 | new = old & mask; | ||
333 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
334 | } | ||
335 | |||
336 | #endif /* CONFIG_64BIT */ | ||
337 | |||
338 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
254 | { | 339 | { |
255 | long long c, old; | 340 | long long c, old; |
256 | c = atomic64_read(v); | 341 | c = atomic64_read(v); |
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
265 | return c != u; | 350 | return c != u; |
266 | } | 351 | } |
267 | 352 | ||
268 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 353 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
269 | 354 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | |
270 | #undef __CSG_LOOP | 355 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
271 | 356 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | |
272 | #else /* __s390x__ */ | 357 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
273 | 358 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | |
274 | #include <asm-generic/atomic64.h> | 359 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) |
275 | 360 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | |
276 | #endif /* __s390x__ */ | 361 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) |
362 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
363 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
277 | 364 | ||
278 | #define smp_mb__before_atomic_dec() smp_mb() | 365 | #define smp_mb__before_atomic_dec() smp_mb() |
279 | #define smp_mb__after_atomic_dec() smp_mb() | 366 | #define smp_mb__after_atomic_dec() smp_mb() |
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
281 | #define smp_mb__after_atomic_inc() smp_mb() | 368 | #define smp_mb__after_atomic_inc() smp_mb() |
282 | 369 | ||
283 | #include <asm-generic/atomic-long.h> | 370 | #include <asm-generic/atomic-long.h> |
284 | #endif /* __KERNEL__ */ | 371 | |
285 | #endif /* __ARCH_S390_ATOMIC__ */ | 372 | #endif /* __ARCH_S390_ATOMIC__ */ |