summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2016-04-22 13:01:33 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-16 04:48:22 -0400
commit2efe95fe695270ae1a225805f016303505972d86 (patch)
tree2d67fc185ffec9ef5b0462432c7886e017776796
parent6822a84dd4e35a1beb70028e46b5f60c14fc422d (diff)
locking/atomic, arch/arm64: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() for LSE instructions
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). This patch implements the LSE variants. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1461344493-8262-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/arm64/include/asm/atomic_lse.h172
1 files changed, 172 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 37a0f03560f7..b5890be8f257 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -46,6 +46,38 @@ ATOMIC_OP(add, stadd)
46 46
47#undef ATOMIC_OP 47#undef ATOMIC_OP
48 48
49#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
50static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
51{ \
52 register int w0 asm ("w0") = i; \
53 register atomic_t *x1 asm ("x1") = v; \
54 \
55 asm volatile(ARM64_LSE_ATOMIC_INSN( \
56 /* LL/SC */ \
57 __LL_SC_ATOMIC(fetch_##op##name), \
58 /* LSE atomics */ \
59" " #asm_op #mb " %w[i], %w[i], %[v]") \
60 : [i] "+r" (w0), [v] "+Q" (v->counter) \
61 : "r" (x1) \
62 : __LL_SC_CLOBBERS, ##cl); \
63 \
64 return w0; \
65}
66
67#define ATOMIC_FETCH_OPS(op, asm_op) \
68 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
69 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
70 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
71 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
72
73ATOMIC_FETCH_OPS(andnot, ldclr)
74ATOMIC_FETCH_OPS(or, ldset)
75ATOMIC_FETCH_OPS(xor, ldeor)
76ATOMIC_FETCH_OPS(add, ldadd)
77
78#undef ATOMIC_FETCH_OP
79#undef ATOMIC_FETCH_OPS
80
49#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ 81#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
50static inline int atomic_add_return##name(int i, atomic_t *v) \ 82static inline int atomic_add_return##name(int i, atomic_t *v) \
51{ \ 83{ \
@@ -90,6 +122,33 @@ static inline void atomic_and(int i, atomic_t *v)
90 : __LL_SC_CLOBBERS); 122 : __LL_SC_CLOBBERS);
91} 123}
92 124
125#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
126static inline int atomic_fetch_and##name(int i, atomic_t *v) \
127{ \
128 register int w0 asm ("w0") = i; \
129 register atomic_t *x1 asm ("x1") = v; \
130 \
131 asm volatile(ARM64_LSE_ATOMIC_INSN( \
132 /* LL/SC */ \
133 " nop\n" \
134 __LL_SC_ATOMIC(fetch_and##name), \
135 /* LSE atomics */ \
136 " mvn %w[i], %w[i]\n" \
137 " ldclr" #mb " %w[i], %w[i], %[v]") \
138 : [i] "+r" (w0), [v] "+Q" (v->counter) \
139 : "r" (x1) \
140 : __LL_SC_CLOBBERS, ##cl); \
141 \
142 return w0; \
143}
144
145ATOMIC_FETCH_OP_AND(_relaxed, )
146ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
147ATOMIC_FETCH_OP_AND(_release, l, "memory")
148ATOMIC_FETCH_OP_AND( , al, "memory")
149
150#undef ATOMIC_FETCH_OP_AND
151
93static inline void atomic_sub(int i, atomic_t *v) 152static inline void atomic_sub(int i, atomic_t *v)
94{ 153{
95 register int w0 asm ("w0") = i; 154 register int w0 asm ("w0") = i;
@@ -135,6 +194,33 @@ ATOMIC_OP_SUB_RETURN(_release, l, "memory")
135ATOMIC_OP_SUB_RETURN( , al, "memory") 194ATOMIC_OP_SUB_RETURN( , al, "memory")
136 195
137#undef ATOMIC_OP_SUB_RETURN 196#undef ATOMIC_OP_SUB_RETURN
197
198#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
199static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
200{ \
201 register int w0 asm ("w0") = i; \
202 register atomic_t *x1 asm ("x1") = v; \
203 \
204 asm volatile(ARM64_LSE_ATOMIC_INSN( \
205 /* LL/SC */ \
206 " nop\n" \
207 __LL_SC_ATOMIC(fetch_sub##name), \
208 /* LSE atomics */ \
209 " neg %w[i], %w[i]\n" \
210 " ldadd" #mb " %w[i], %w[i], %[v]") \
211 : [i] "+r" (w0), [v] "+Q" (v->counter) \
212 : "r" (x1) \
213 : __LL_SC_CLOBBERS, ##cl); \
214 \
215 return w0; \
216}
217
218ATOMIC_FETCH_OP_SUB(_relaxed, )
219ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
220ATOMIC_FETCH_OP_SUB(_release, l, "memory")
221ATOMIC_FETCH_OP_SUB( , al, "memory")
222
223#undef ATOMIC_FETCH_OP_SUB
138#undef __LL_SC_ATOMIC 224#undef __LL_SC_ATOMIC
139 225
140#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) 226#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
@@ -158,6 +244,38 @@ ATOMIC64_OP(add, stadd)
158 244
159#undef ATOMIC64_OP 245#undef ATOMIC64_OP
160 246
247#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
248static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
249{ \
250 register long x0 asm ("x0") = i; \
251 register atomic64_t *x1 asm ("x1") = v; \
252 \
253 asm volatile(ARM64_LSE_ATOMIC_INSN( \
254 /* LL/SC */ \
255 __LL_SC_ATOMIC64(fetch_##op##name), \
256 /* LSE atomics */ \
257" " #asm_op #mb " %[i], %[i], %[v]") \
258 : [i] "+r" (x0), [v] "+Q" (v->counter) \
259 : "r" (x1) \
260 : __LL_SC_CLOBBERS, ##cl); \
261 \
262 return x0; \
263}
264
265#define ATOMIC64_FETCH_OPS(op, asm_op) \
266 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
267 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
268 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
269 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
270
271ATOMIC64_FETCH_OPS(andnot, ldclr)
272ATOMIC64_FETCH_OPS(or, ldset)
273ATOMIC64_FETCH_OPS(xor, ldeor)
274ATOMIC64_FETCH_OPS(add, ldadd)
275
276#undef ATOMIC64_FETCH_OP
277#undef ATOMIC64_FETCH_OPS
278
161#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ 279#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
162static inline long atomic64_add_return##name(long i, atomic64_t *v) \ 280static inline long atomic64_add_return##name(long i, atomic64_t *v) \
163{ \ 281{ \
@@ -202,6 +320,33 @@ static inline void atomic64_and(long i, atomic64_t *v)
202 : __LL_SC_CLOBBERS); 320 : __LL_SC_CLOBBERS);
203} 321}
204 322
323#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
324static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
325{ \
326 register long x0 asm ("w0") = i; \
327 register atomic64_t *x1 asm ("x1") = v; \
328 \
329 asm volatile(ARM64_LSE_ATOMIC_INSN( \
330 /* LL/SC */ \
331 " nop\n" \
332 __LL_SC_ATOMIC64(fetch_and##name), \
333 /* LSE atomics */ \
334 " mvn %[i], %[i]\n" \
335 " ldclr" #mb " %[i], %[i], %[v]") \
336 : [i] "+r" (x0), [v] "+Q" (v->counter) \
337 : "r" (x1) \
338 : __LL_SC_CLOBBERS, ##cl); \
339 \
340 return x0; \
341}
342
343ATOMIC64_FETCH_OP_AND(_relaxed, )
344ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
345ATOMIC64_FETCH_OP_AND(_release, l, "memory")
346ATOMIC64_FETCH_OP_AND( , al, "memory")
347
348#undef ATOMIC64_FETCH_OP_AND
349
205static inline void atomic64_sub(long i, atomic64_t *v) 350static inline void atomic64_sub(long i, atomic64_t *v)
206{ 351{
207 register long x0 asm ("x0") = i; 352 register long x0 asm ("x0") = i;
@@ -248,6 +393,33 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
248 393
249#undef ATOMIC64_OP_SUB_RETURN 394#undef ATOMIC64_OP_SUB_RETURN
250 395
396#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
397static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
398{ \
399 register long x0 asm ("w0") = i; \
400 register atomic64_t *x1 asm ("x1") = v; \
401 \
402 asm volatile(ARM64_LSE_ATOMIC_INSN( \
403 /* LL/SC */ \
404 " nop\n" \
405 __LL_SC_ATOMIC64(fetch_sub##name), \
406 /* LSE atomics */ \
407 " neg %[i], %[i]\n" \
408 " ldadd" #mb " %[i], %[i], %[v]") \
409 : [i] "+r" (x0), [v] "+Q" (v->counter) \
410 : "r" (x1) \
411 : __LL_SC_CLOBBERS, ##cl); \
412 \
413 return x0; \
414}
415
416ATOMIC64_FETCH_OP_SUB(_relaxed, )
417ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
418ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
419ATOMIC64_FETCH_OP_SUB( , al, "memory")
420
421#undef ATOMIC64_FETCH_OP_SUB
422
251static inline long atomic64_dec_if_positive(atomic64_t *v) 423static inline long atomic64_dec_if_positive(atomic64_t *v)
252{ 424{
253 register long x0 asm ("x0") = (long)v; 425 register long x0 asm ("x0") = (long)v;