diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 14:15:48 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 08:06:23 -0400 |
commit | ae8c35c85be92b79c545c57c2a14c2f8136d3353 (patch) | |
tree | 2103a425e45c3678c63fd8c1bbd58ca67c0cc884 | |
parent | 2a3ed90f428cab65567c5421ebc0f6c8d02c1216 (diff) |
s390: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/s390/include/asm/atomic.h | 47 |
1 files changed, 33 insertions, 14 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index adbe3802e377..b3859d8e001f 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define __ATOMIC_OR "lao" | 27 | #define __ATOMIC_OR "lao" |
28 | #define __ATOMIC_AND "lan" | 28 | #define __ATOMIC_AND "lan" |
29 | #define __ATOMIC_ADD "laa" | 29 | #define __ATOMIC_ADD "laa" |
30 | #define __ATOMIC_XOR "lax" | ||
30 | #define __ATOMIC_BARRIER "bcr 14,0\n" | 31 | #define __ATOMIC_BARRIER "bcr 14,0\n" |
31 | 32 | ||
32 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ | 33 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ |
@@ -49,6 +50,7 @@ | |||
49 | #define __ATOMIC_OR "or" | 50 | #define __ATOMIC_OR "or" |
50 | #define __ATOMIC_AND "nr" | 51 | #define __ATOMIC_AND "nr" |
51 | #define __ATOMIC_ADD "ar" | 52 | #define __ATOMIC_ADD "ar" |
53 | #define __ATOMIC_XOR "xr" | ||
52 | #define __ATOMIC_BARRIER "\n" | 54 | #define __ATOMIC_BARRIER "\n" |
53 | 55 | ||
54 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ | 56 | #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ |
@@ -118,14 +120,26 @@ static inline void atomic_add(int i, atomic_t *v) | |||
118 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 120 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
119 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 121 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
120 | 122 | ||
121 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | 123 | #define ATOMIC_OP(op, OP) \ |
124 | static inline void atomic_##op(int i, atomic_t *v) \ | ||
125 | { \ | ||
126 | __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ | ||
127 | } | ||
128 | |||
129 | ATOMIC_OP(and, AND) | ||
130 | ATOMIC_OP(or, OR) | ||
131 | ATOMIC_OP(xor, XOR) | ||
132 | |||
133 | #undef ATOMIC_OP | ||
134 | |||
135 | static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
122 | { | 136 | { |
123 | __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); | 137 | atomic_and(~mask, v); |
124 | } | 138 | } |
125 | 139 | ||
126 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | 140 | static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) |
127 | { | 141 | { |
128 | __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); | 142 | atomic_or(mask, v); |
129 | } | 143 | } |
130 | 144 | ||
131 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 145 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -167,6 +181,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
167 | #define __ATOMIC64_OR "laog" | 181 | #define __ATOMIC64_OR "laog" |
168 | #define __ATOMIC64_AND "lang" | 182 | #define __ATOMIC64_AND "lang" |
169 | #define __ATOMIC64_ADD "laag" | 183 | #define __ATOMIC64_ADD "laag" |
184 | #define __ATOMIC64_XOR "laxg" | ||
170 | #define __ATOMIC64_BARRIER "bcr 14,0\n" | 185 | #define __ATOMIC64_BARRIER "bcr 14,0\n" |
171 | 186 | ||
172 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ | 187 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ |
@@ -189,6 +204,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
189 | #define __ATOMIC64_OR "ogr" | 204 | #define __ATOMIC64_OR "ogr" |
190 | #define __ATOMIC64_AND "ngr" | 205 | #define __ATOMIC64_AND "ngr" |
191 | #define __ATOMIC64_ADD "agr" | 206 | #define __ATOMIC64_ADD "agr" |
207 | #define __ATOMIC64_XOR "xgr" | ||
192 | #define __ATOMIC64_BARRIER "\n" | 208 | #define __ATOMIC64_BARRIER "\n" |
193 | 209 | ||
194 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ | 210 | #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ |
@@ -247,16 +263,6 @@ static inline void atomic64_add(long long i, atomic64_t *v) | |||
247 | __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); | 263 | __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); |
248 | } | 264 | } |
249 | 265 | ||
250 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) | ||
251 | { | ||
252 | __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); | ||
253 | } | ||
254 | |||
255 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) | ||
256 | { | ||
257 | __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); | ||
258 | } | ||
259 | |||
260 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 266 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
261 | 267 | ||
262 | static inline long long atomic64_cmpxchg(atomic64_t *v, | 268 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
@@ -270,6 +276,19 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, | |||
270 | return old; | 276 | return old; |
271 | } | 277 | } |
272 | 278 | ||
279 | #define ATOMIC64_OP(op, OP) \ | ||
280 | static inline void atomic64_##op(long i, atomic64_t *v) \ | ||
281 | { \ | ||
282 | __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ | ||
283 | } | ||
284 | |||
285 | #define CONFIG_ARCH_HAS_ATOMIC_OR | ||
286 | |||
287 | ATOMIC64_OP(and, AND) | ||
288 | ATOMIC64_OP(or, OR) | ||
289 | ATOMIC64_OP(xor, XOR) | ||
290 | |||
291 | #undef ATOMIC64_OP | ||
273 | #undef __ATOMIC64_LOOP | 292 | #undef __ATOMIC64_LOOP |
274 | 293 | ||
275 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | 294 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) |