aboutsummaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm/atomic.h')
-rw-r--r--arch/riscv/include/asm/atomic.h103
1 files changed, 54 insertions, 49 deletions
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index e2e37c57cbeb..e65d1cd89e28 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
50 * have the AQ or RL bits set. These don't return anything, so there's only 50 * have the AQ or RL bits set. These don't return anything, so there's only
51 * one version to worry about. 51 * one version to worry about.
52 */ 52 */
53#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \ 53#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
54static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ 54static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
55{ \ 55{ \
56 __asm__ __volatile__ ( \ 56 __asm__ __volatile__ ( \
57 "amo" #asm_op "." #asm_type " zero, %1, %0" \ 57 "amo" #asm_op "." #asm_type " zero, %1, %0" \
58 : "+A" (v->counter) \ 58 : "+A" (v->counter) \
59 : "r" (I) \ 59 : "r" (I) \
60 : "memory"); \ 60 : "memory"); \
61} 61}
62 62
63#ifdef CONFIG_GENERIC_ATOMIC64 63#ifdef CONFIG_GENERIC_ATOMIC64
64#define ATOMIC_OPS(op, asm_op, c_op, I) \ 64#define ATOMIC_OPS(op, asm_op, I) \
65 ATOMIC_OP (op, asm_op, c_op, I, w, int, ) 65 ATOMIC_OP (op, asm_op, I, w, int, )
66#else 66#else
67#define ATOMIC_OPS(op, asm_op, c_op, I) \ 67#define ATOMIC_OPS(op, asm_op, I) \
68 ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \ 68 ATOMIC_OP (op, asm_op, I, w, int, ) \
69 ATOMIC_OP (op, asm_op, c_op, I, d, long, 64) 69 ATOMIC_OP (op, asm_op, I, d, long, 64)
70#endif 70#endif
71 71
72ATOMIC_OPS(add, add, +, i) 72ATOMIC_OPS(add, add, i)
73ATOMIC_OPS(sub, add, +, -i) 73ATOMIC_OPS(sub, add, -i)
74ATOMIC_OPS(and, and, &, i) 74ATOMIC_OPS(and, and, i)
75ATOMIC_OPS( or, or, |, i) 75ATOMIC_OPS( or, or, i)
76ATOMIC_OPS(xor, xor, ^, i) 76ATOMIC_OPS(xor, xor, i)
77 77
78#undef ATOMIC_OP 78#undef ATOMIC_OP
79#undef ATOMIC_OPS 79#undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^, i)
83 * There's two flavors of these: the arithmatic ops have both fetch and return 83 * There's two flavors of these: the arithmatic ops have both fetch and return
84 * versions, while the logical ops only have fetch versions. 84 * versions, while the logical ops only have fetch versions.
85 */ 85 */
86#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \ 86#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix) \
87static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \ 87static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
88{ \ 88{ \
89 register c_type ret; \ 89 register c_type ret; \
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
103 103
104#ifdef CONFIG_GENERIC_ATOMIC64 104#ifdef CONFIG_GENERIC_ATOMIC64
105#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 105#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
106 ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 106 ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
107 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) 107 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
108#else 108#else
109#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 109#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
110 ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 110 ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
111 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 111 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
112 ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \ 112 ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \
113 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) 113 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
114#endif 114#endif
115 115
@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl, )
126#undef ATOMIC_OPS 126#undef ATOMIC_OPS
127 127
128#ifdef CONFIG_GENERIC_ATOMIC64 128#ifdef CONFIG_GENERIC_ATOMIC64
129#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 129#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
130 ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) 130 ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, )
131#else 131#else
132#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 132#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
133 ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 133 ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) \
134 ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) 134 ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
135#endif 135#endif
136 136
137ATOMIC_OPS(and, and, &, i, , _relaxed) 137ATOMIC_OPS(and, and, i, , _relaxed)
138ATOMIC_OPS(and, and, &, i, .aq , _acquire) 138ATOMIC_OPS(and, and, i, .aq , _acquire)
139ATOMIC_OPS(and, and, &, i, .rl , _release) 139ATOMIC_OPS(and, and, i, .rl , _release)
140ATOMIC_OPS(and, and, &, i, .aqrl, ) 140ATOMIC_OPS(and, and, i, .aqrl, )
141 141
142ATOMIC_OPS( or, or, |, i, , _relaxed) 142ATOMIC_OPS( or, or, i, , _relaxed)
143ATOMIC_OPS( or, or, |, i, .aq , _acquire) 143ATOMIC_OPS( or, or, i, .aq , _acquire)
144ATOMIC_OPS( or, or, |, i, .rl , _release) 144ATOMIC_OPS( or, or, i, .rl , _release)
145ATOMIC_OPS( or, or, |, i, .aqrl, ) 145ATOMIC_OPS( or, or, i, .aqrl, )
146 146
147ATOMIC_OPS(xor, xor, ^, i, , _relaxed) 147ATOMIC_OPS(xor, xor, i, , _relaxed)
148ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire) 148ATOMIC_OPS(xor, xor, i, .aq , _acquire)
149ATOMIC_OPS(xor, xor, ^, i, .rl , _release) 149ATOMIC_OPS(xor, xor, i, .rl , _release)
150ATOMIC_OPS(xor, xor, ^, i, .aqrl, ) 150ATOMIC_OPS(xor, xor, i, .aqrl, )
151 151
152#undef ATOMIC_OPS 152#undef ATOMIC_OPS
153 153
@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add, <, 0)
182#undef ATOMIC_OP 182#undef ATOMIC_OP
183#undef ATOMIC_OPS 183#undef ATOMIC_OPS
184 184
185#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \ 185#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
186static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \ 186static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
187{ \ 187{ \
188 atomic##prefix##_##func_op(I, v); \ 188 atomic##prefix##_##func_op(I, v); \
189} 189}
190 190
191#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \ 191#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
192static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \ 192static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
193{ \ 193{ \
194 return atomic##prefix##_fetch_##func_op(I, v); \ 194 return atomic##prefix##_fetch_##func_op(I, v); \
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
202 202
203#ifdef CONFIG_GENERIC_ATOMIC64 203#ifdef CONFIG_GENERIC_ATOMIC64
204#define ATOMIC_OPS(op, asm_op, c_op, I) \ 204#define ATOMIC_OPS(op, asm_op, c_op, I) \
205 ATOMIC_OP (op, asm_op, c_op, I, int, ) \ 205 ATOMIC_OP (op, asm_op, I, int, ) \
206 ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ 206 ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
207 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) 207 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
208#else 208#else
209#define ATOMIC_OPS(op, asm_op, c_op, I) \ 209#define ATOMIC_OPS(op, asm_op, c_op, I) \
210 ATOMIC_OP (op, asm_op, c_op, I, int, ) \ 210 ATOMIC_OP (op, asm_op, I, int, ) \
211 ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ 211 ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
212 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \ 212 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
213 ATOMIC_OP (op, asm_op, c_op, I, long, 64) \ 213 ATOMIC_OP (op, asm_op, I, long, 64) \
214 ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \ 214 ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \
215 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64) 215 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
216#endif 216#endif
217 217
@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
300 300
301/* 301/*
302 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as 302 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
303 * {cmp,}xchg and the operations that return, so they need a barrier. We just 303 * {cmp,}xchg and the operations that return, so they need a barrier.
304 * use the other implementations directly. 304 */
305/*
306 * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
307 * assigning the same barrier to both the LR and SC operations, but that might
308 * not make any sense. We're waiting on a memory model specification to
309 * determine exactly what the right thing to do is here.
305 */ 310 */
306#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \ 311#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
307static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \ 312static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \