diff options
Diffstat (limited to 'arch/arm64/include/asm/atomic.h')
-rw-r--r-- | arch/arm64/include/asm/atomic.h | 53 |
1 files changed, 30 insertions, 23 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 01de5aaa3edc..0237f0867e37 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) | |||
54 | " stxr %w1, %w0, %2\n" | 54 | " stxr %w1, %w0, %2\n" |
55 | " cbnz %w1, 1b" | 55 | " cbnz %w1, 1b" |
56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
57 | : "Ir" (i) | 57 | : "Ir" (i)); |
58 | : "cc"); | ||
59 | } | 58 | } |
60 | 59 | ||
61 | static inline int atomic_add_return(int i, atomic_t *v) | 60 | static inline int atomic_add_return(int i, atomic_t *v) |
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
64 | int result; | 63 | int result; |
65 | 64 | ||
66 | asm volatile("// atomic_add_return\n" | 65 | asm volatile("// atomic_add_return\n" |
67 | "1: ldaxr %w0, %2\n" | 66 | "1: ldxr %w0, %2\n" |
68 | " add %w0, %w0, %w3\n" | 67 | " add %w0, %w0, %w3\n" |
69 | " stlxr %w1, %w0, %2\n" | 68 | " stlxr %w1, %w0, %2\n" |
70 | " cbnz %w1, 1b" | 69 | " cbnz %w1, 1b" |
71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 70 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
72 | : "Ir" (i) | 71 | : "Ir" (i) |
73 | : "cc", "memory"); | 72 | : "memory"); |
74 | 73 | ||
74 | smp_mb(); | ||
75 | return result; | 75 | return result; |
76 | } | 76 | } |
77 | 77 | ||
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
86 | " stxr %w1, %w0, %2\n" | 86 | " stxr %w1, %w0, %2\n" |
87 | " cbnz %w1, 1b" | 87 | " cbnz %w1, 1b" |
88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
89 | : "Ir" (i) | 89 | : "Ir" (i)); |
90 | : "cc"); | ||
91 | } | 90 | } |
92 | 91 | ||
93 | static inline int atomic_sub_return(int i, atomic_t *v) | 92 | static inline int atomic_sub_return(int i, atomic_t *v) |
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
96 | int result; | 95 | int result; |
97 | 96 | ||
98 | asm volatile("// atomic_sub_return\n" | 97 | asm volatile("// atomic_sub_return\n" |
99 | "1: ldaxr %w0, %2\n" | 98 | "1: ldxr %w0, %2\n" |
100 | " sub %w0, %w0, %w3\n" | 99 | " sub %w0, %w0, %w3\n" |
101 | " stlxr %w1, %w0, %2\n" | 100 | " stlxr %w1, %w0, %2\n" |
102 | " cbnz %w1, 1b" | 101 | " cbnz %w1, 1b" |
103 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 102 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
104 | : "Ir" (i) | 103 | : "Ir" (i) |
105 | : "cc", "memory"); | 104 | : "memory"); |
106 | 105 | ||
106 | smp_mb(); | ||
107 | return result; | 107 | return result; |
108 | } | 108 | } |
109 | 109 | ||
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |||
112 | unsigned long tmp; | 112 | unsigned long tmp; |
113 | int oldval; | 113 | int oldval; |
114 | 114 | ||
115 | smp_mb(); | ||
116 | |||
115 | asm volatile("// atomic_cmpxchg\n" | 117 | asm volatile("// atomic_cmpxchg\n" |
116 | "1: ldaxr %w1, %2\n" | 118 | "1: ldxr %w1, %2\n" |
117 | " cmp %w1, %w3\n" | 119 | " cmp %w1, %w3\n" |
118 | " b.ne 2f\n" | 120 | " b.ne 2f\n" |
119 | " stlxr %w0, %w4, %2\n" | 121 | " stxr %w0, %w4, %2\n" |
120 | " cbnz %w0, 1b\n" | 122 | " cbnz %w0, 1b\n" |
121 | "2:" | 123 | "2:" |
122 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) | 124 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) |
123 | : "Ir" (old), "r" (new) | 125 | : "Ir" (old), "r" (new) |
124 | : "cc", "memory"); | 126 | : "cc"); |
125 | 127 | ||
128 | smp_mb(); | ||
126 | return oldval; | 129 | return oldval; |
127 | } | 130 | } |
128 | 131 | ||
@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
173 | " stxr %w1, %0, %2\n" | 176 | " stxr %w1, %0, %2\n" |
174 | " cbnz %w1, 1b" | 177 | " cbnz %w1, 1b" |
175 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 178 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
176 | : "Ir" (i) | 179 | : "Ir" (i)); |
177 | : "cc"); | ||
178 | } | 180 | } |
179 | 181 | ||
180 | static inline long atomic64_add_return(long i, atomic64_t *v) | 182 | static inline long atomic64_add_return(long i, atomic64_t *v) |
@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v) | |||
183 | unsigned long tmp; | 185 | unsigned long tmp; |
184 | 186 | ||
185 | asm volatile("// atomic64_add_return\n" | 187 | asm volatile("// atomic64_add_return\n" |
186 | "1: ldaxr %0, %2\n" | 188 | "1: ldxr %0, %2\n" |
187 | " add %0, %0, %3\n" | 189 | " add %0, %0, %3\n" |
188 | " stlxr %w1, %0, %2\n" | 190 | " stlxr %w1, %0, %2\n" |
189 | " cbnz %w1, 1b" | 191 | " cbnz %w1, 1b" |
190 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 192 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
191 | : "Ir" (i) | 193 | : "Ir" (i) |
192 | : "cc", "memory"); | 194 | : "memory"); |
193 | 195 | ||
196 | smp_mb(); | ||
194 | return result; | 197 | return result; |
195 | } | 198 | } |
196 | 199 | ||
@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) | |||
205 | " stxr %w1, %0, %2\n" | 208 | " stxr %w1, %0, %2\n" |
206 | " cbnz %w1, 1b" | 209 | " cbnz %w1, 1b" |
207 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 210 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
208 | : "Ir" (i) | 211 | : "Ir" (i)); |
209 | : "cc"); | ||
210 | } | 212 | } |
211 | 213 | ||
212 | static inline long atomic64_sub_return(long i, atomic64_t *v) | 214 | static inline long atomic64_sub_return(long i, atomic64_t *v) |
@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) | |||
215 | unsigned long tmp; | 217 | unsigned long tmp; |
216 | 218 | ||
217 | asm volatile("// atomic64_sub_return\n" | 219 | asm volatile("// atomic64_sub_return\n" |
218 | "1: ldaxr %0, %2\n" | 220 | "1: ldxr %0, %2\n" |
219 | " sub %0, %0, %3\n" | 221 | " sub %0, %0, %3\n" |
220 | " stlxr %w1, %0, %2\n" | 222 | " stlxr %w1, %0, %2\n" |
221 | " cbnz %w1, 1b" | 223 | " cbnz %w1, 1b" |
222 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 224 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
223 | : "Ir" (i) | 225 | : "Ir" (i) |
224 | : "cc", "memory"); | 226 | : "memory"); |
225 | 227 | ||
228 | smp_mb(); | ||
226 | return result; | 229 | return result; |
227 | } | 230 | } |
228 | 231 | ||
@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | |||
231 | long oldval; | 234 | long oldval; |
232 | unsigned long res; | 235 | unsigned long res; |
233 | 236 | ||
237 | smp_mb(); | ||
238 | |||
234 | asm volatile("// atomic64_cmpxchg\n" | 239 | asm volatile("// atomic64_cmpxchg\n" |
235 | "1: ldaxr %1, %2\n" | 240 | "1: ldxr %1, %2\n" |
236 | " cmp %1, %3\n" | 241 | " cmp %1, %3\n" |
237 | " b.ne 2f\n" | 242 | " b.ne 2f\n" |
238 | " stlxr %w0, %4, %2\n" | 243 | " stxr %w0, %4, %2\n" |
239 | " cbnz %w0, 1b\n" | 244 | " cbnz %w0, 1b\n" |
240 | "2:" | 245 | "2:" |
241 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) | 246 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) |
242 | : "Ir" (old), "r" (new) | 247 | : "Ir" (old), "r" (new) |
243 | : "cc", "memory"); | 248 | : "cc"); |
244 | 249 | ||
250 | smp_mb(); | ||
245 | return oldval; | 251 | return oldval; |
246 | } | 252 | } |
247 | 253 | ||
@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) | |||
253 | unsigned long tmp; | 259 | unsigned long tmp; |
254 | 260 | ||
255 | asm volatile("// atomic64_dec_if_positive\n" | 261 | asm volatile("// atomic64_dec_if_positive\n" |
256 | "1: ldaxr %0, %2\n" | 262 | "1: ldxr %0, %2\n" |
257 | " subs %0, %0, #1\n" | 263 | " subs %0, %0, #1\n" |
258 | " b.mi 2f\n" | 264 | " b.mi 2f\n" |
259 | " stlxr %w1, %0, %2\n" | 265 | " stlxr %w1, %0, %2\n" |
260 | " cbnz %w1, 1b\n" | 266 | " cbnz %w1, 1b\n" |
267 | " dmb ish\n" | ||
261 | "2:" | 268 | "2:" |
262 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 269 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
263 | : | 270 | : |