aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-04-23 13:35:00 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 08:06:23 -0400
commit2a3ed90f428cab65567c5421ebc0f6c8d02c1216 (patch)
treed85187cd28a785f6807d39757a5fcc4c009e7e90
parent304a0d699a3c6103b61d5ea18d56820e7d8e3116 (diff)
xtensa: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/xtensa/include/asm/atomic.h85
1 files changed, 16 insertions, 69 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 00b7d46b35b8..4dd2450300a6 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -145,10 +145,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
145ATOMIC_OPS(add) 145ATOMIC_OPS(add)
146ATOMIC_OPS(sub) 146ATOMIC_OPS(sub)
147 147
148#define CONFIG_ARCH_HAS_ATOMIC_OR
149
150ATOMIC_OP(and)
151ATOMIC_OP(or)
152ATOMIC_OP(xor)
153
148#undef ATOMIC_OPS 154#undef ATOMIC_OPS
149#undef ATOMIC_OP_RETURN 155#undef ATOMIC_OP_RETURN
150#undef ATOMIC_OP 156#undef ATOMIC_OP
151 157
158static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
159{
160 atomic_or(mask, v);
161}
162
163static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
164{
165 atomic_and(~mask, v);
166}
167
152/** 168/**
153 * atomic_sub_and_test - subtract value from variable and test result 169 * atomic_sub_and_test - subtract value from variable and test result
154 * @i: integer value to subtract 170 * @i: integer value to subtract
@@ -250,75 +266,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
250 return c; 266 return c;
251} 267}
252 268
253
254static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
255{
256#if XCHAL_HAVE_S32C1I
257 unsigned long tmp;
258 int result;
259
260 __asm__ __volatile__(
261 "1: l32i %1, %3, 0\n"
262 " wsr %1, scompare1\n"
263 " and %0, %1, %2\n"
264 " s32c1i %0, %3, 0\n"
265 " bne %0, %1, 1b\n"
266 : "=&a" (result), "=&a" (tmp)
267 : "a" (~mask), "a" (v)
268 : "memory"
269 );
270#else
271 unsigned int all_f = -1;
272 unsigned int vval;
273
274 __asm__ __volatile__(
275 " rsil a15,"__stringify(LOCKLEVEL)"\n"
276 " l32i %0, %2, 0\n"
277 " xor %1, %4, %3\n"
278 " and %0, %0, %4\n"
279 " s32i %0, %2, 0\n"
280 " wsr a15, ps\n"
281 " rsync\n"
282 : "=&a" (vval), "=a" (mask)
283 : "a" (v), "a" (all_f), "1" (mask)
284 : "a15", "memory"
285 );
286#endif
287}
288
289static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
290{
291#if XCHAL_HAVE_S32C1I
292 unsigned long tmp;
293 int result;
294
295 __asm__ __volatile__(
296 "1: l32i %1, %3, 0\n"
297 " wsr %1, scompare1\n"
298 " or %0, %1, %2\n"
299 " s32c1i %0, %3, 0\n"
300 " bne %0, %1, 1b\n"
301 : "=&a" (result), "=&a" (tmp)
302 : "a" (mask), "a" (v)
303 : "memory"
304 );
305#else
306 unsigned int vval;
307
308 __asm__ __volatile__(
309 " rsil a15,"__stringify(LOCKLEVEL)"\n"
310 " l32i %0, %2, 0\n"
311 " or %0, %0, %1\n"
312 " s32i %0, %2, 0\n"
313 " wsr a15, ps\n"
314 " rsync\n"
315 : "=&a" (vval)
316 : "a" (mask), "a" (v)
317 : "a15", "memory"
318 );
319#endif
320}
321
322#endif /* __KERNEL__ */ 269#endif /* __KERNEL__ */
323 270
324#endif /* _XTENSA_ATOMIC_H */ 271#endif /* _XTENSA_ATOMIC_H */