diff options
author | Will Deacon <will.deacon@arm.com> | 2013-02-04 07:12:33 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2013-02-11 13:16:41 -0500 |
commit | 3a0310eb369aae985d6409d8ff1340146578e5c1 (patch) | |
tree | 8b1796a500d2dd694c924a57ee4502f31b15ef8b /arch/arm64 | |
parent | c0e01d5d8f15c085236df184e5bc3d79a8b700cd (diff) |
arm64: atomics: fix grossly inconsistent asm constraints for exclusives
Our uses of inline asm constraints for atomic operations are fairly
wild and varied. We basically need to guarantee the following:
1. Any instructions with barrier implications
(load-acquire/store-release) have a "memory" clobber
2. When performing exclusive accesses, the addresing mode is generated
using the "Q" constraint
3. Atomic blocks which use the condition flags, have a "cc" clobber
This patch addresses these concerns which, as well as fixing the
semantics of the code, stops GCC complaining about impossible asm
constraints.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/atomic.h | 132 | ||||
-rw-r--r-- | arch/arm64/include/asm/cmpxchg.h | 74 | ||||
-rw-r--r-- | arch/arm64/include/asm/futex.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/spinlock.h | 78 |
4 files changed, 143 insertions, 143 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 407717ba060e..836364468571 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) | |||
49 | int result; | 49 | int result; |
50 | 50 | ||
51 | asm volatile("// atomic_add\n" | 51 | asm volatile("// atomic_add\n" |
52 | "1: ldxr %w0, [%3]\n" | 52 | "1: ldxr %w0, %2\n" |
53 | " add %w0, %w0, %w4\n" | 53 | " add %w0, %w0, %w3\n" |
54 | " stxr %w1, %w0, [%3]\n" | 54 | " stxr %w1, %w0, %2\n" |
55 | " cbnz %w1, 1b" | 55 | " cbnz %w1, 1b" |
56 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
57 | : "r" (&v->counter), "Ir" (i) | 57 | : "Ir" (i) |
58 | : "cc"); | 58 | : "cc"); |
59 | } | 59 | } |
60 | 60 | ||
@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
64 | int result; | 64 | int result; |
65 | 65 | ||
66 | asm volatile("// atomic_add_return\n" | 66 | asm volatile("// atomic_add_return\n" |
67 | "1: ldaxr %w0, [%3]\n" | 67 | "1: ldaxr %w0, %2\n" |
68 | " add %w0, %w0, %w4\n" | 68 | " add %w0, %w0, %w3\n" |
69 | " stlxr %w1, %w0, [%3]\n" | 69 | " stlxr %w1, %w0, %2\n" |
70 | " cbnz %w1, 1b" | 70 | " cbnz %w1, 1b" |
71 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
72 | : "r" (&v->counter), "Ir" (i) | 72 | : "Ir" (i) |
73 | : "cc"); | 73 | : "cc", "memory"); |
74 | 74 | ||
75 | return result; | 75 | return result; |
76 | } | 76 | } |
@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
81 | int result; | 81 | int result; |
82 | 82 | ||
83 | asm volatile("// atomic_sub\n" | 83 | asm volatile("// atomic_sub\n" |
84 | "1: ldxr %w0, [%3]\n" | 84 | "1: ldxr %w0, %2\n" |
85 | " sub %w0, %w0, %w4\n" | 85 | " sub %w0, %w0, %w3\n" |
86 | " stxr %w1, %w0, [%3]\n" | 86 | " stxr %w1, %w0, %2\n" |
87 | " cbnz %w1, 1b" | 87 | " cbnz %w1, 1b" |
88 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
89 | : "r" (&v->counter), "Ir" (i) | 89 | : "Ir" (i) |
90 | : "cc"); | 90 | : "cc"); |
91 | } | 91 | } |
92 | 92 | ||
@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
96 | int result; | 96 | int result; |
97 | 97 | ||
98 | asm volatile("// atomic_sub_return\n" | 98 | asm volatile("// atomic_sub_return\n" |
99 | "1: ldaxr %w0, [%3]\n" | 99 | "1: ldaxr %w0, %2\n" |
100 | " sub %w0, %w0, %w4\n" | 100 | " sub %w0, %w0, %w3\n" |
101 | " stlxr %w1, %w0, [%3]\n" | 101 | " stlxr %w1, %w0, %2\n" |
102 | " cbnz %w1, 1b" | 102 | " cbnz %w1, 1b" |
103 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 103 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
104 | : "r" (&v->counter), "Ir" (i) | 104 | : "Ir" (i) |
105 | : "cc"); | 105 | : "cc", "memory"); |
106 | 106 | ||
107 | return result; | 107 | return result; |
108 | } | 108 | } |
@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |||
113 | int oldval; | 113 | int oldval; |
114 | 114 | ||
115 | asm volatile("// atomic_cmpxchg\n" | 115 | asm volatile("// atomic_cmpxchg\n" |
116 | "1: ldaxr %w1, [%3]\n" | 116 | "1: ldaxr %w1, %2\n" |
117 | " cmp %w1, %w4\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 2f\n" | 118 | " b.ne 2f\n" |
119 | " stlxr %w0, %w5, [%3]\n" | 119 | " stlxr %w0, %w4, %2\n" |
120 | " cbnz %w0, 1b\n" | 120 | " cbnz %w0, 1b\n" |
121 | "2:" | 121 | "2:" |
122 | : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) | 122 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) |
123 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 123 | : "Ir" (old), "r" (new) |
124 | : "cc"); | 124 | : "cc", "memory"); |
125 | 125 | ||
126 | return oldval; | 126 | return oldval; |
127 | } | 127 | } |
@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
131 | unsigned long tmp, tmp2; | 131 | unsigned long tmp, tmp2; |
132 | 132 | ||
133 | asm volatile("// atomic_clear_mask\n" | 133 | asm volatile("// atomic_clear_mask\n" |
134 | "1: ldxr %0, [%3]\n" | 134 | "1: ldxr %0, %2\n" |
135 | " bic %0, %0, %4\n" | 135 | " bic %0, %0, %3\n" |
136 | " stxr %w1, %0, [%3]\n" | 136 | " stxr %w1, %0, %2\n" |
137 | " cbnz %w1, 1b" | 137 | " cbnz %w1, 1b" |
138 | : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) | 138 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) |
139 | : "r" (addr), "Ir" (mask) | 139 | : "Ir" (mask) |
140 | : "cc"); | 140 | : "cc"); |
141 | } | 141 | } |
142 | 142 | ||
@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
182 | unsigned long tmp; | 182 | unsigned long tmp; |
183 | 183 | ||
184 | asm volatile("// atomic64_add\n" | 184 | asm volatile("// atomic64_add\n" |
185 | "1: ldxr %0, [%3]\n" | 185 | "1: ldxr %0, %2\n" |
186 | " add %0, %0, %4\n" | 186 | " add %0, %0, %3\n" |
187 | " stxr %w1, %0, [%3]\n" | 187 | " stxr %w1, %0, %2\n" |
188 | " cbnz %w1, 1b" | 188 | " cbnz %w1, 1b" |
189 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 189 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
190 | : "r" (&v->counter), "Ir" (i) | 190 | : "Ir" (i) |
191 | : "cc"); | 191 | : "cc"); |
192 | } | 192 | } |
193 | 193 | ||
@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) | |||
197 | unsigned long tmp; | 197 | unsigned long tmp; |
198 | 198 | ||
199 | asm volatile("// atomic64_add_return\n" | 199 | asm volatile("// atomic64_add_return\n" |
200 | "1: ldaxr %0, [%3]\n" | 200 | "1: ldaxr %0, %2\n" |
201 | " add %0, %0, %4\n" | 201 | " add %0, %0, %3\n" |
202 | " stlxr %w1, %0, [%3]\n" | 202 | " stlxr %w1, %0, %2\n" |
203 | " cbnz %w1, 1b" | 203 | " cbnz %w1, 1b" |
204 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 204 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
205 | : "r" (&v->counter), "Ir" (i) | 205 | : "Ir" (i) |
206 | : "cc"); | 206 | : "cc", "memory"); |
207 | 207 | ||
208 | return result; | 208 | return result; |
209 | } | 209 | } |
@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) | |||
214 | unsigned long tmp; | 214 | unsigned long tmp; |
215 | 215 | ||
216 | asm volatile("// atomic64_sub\n" | 216 | asm volatile("// atomic64_sub\n" |
217 | "1: ldxr %0, [%3]\n" | 217 | "1: ldxr %0, %2\n" |
218 | " sub %0, %0, %4\n" | 218 | " sub %0, %0, %3\n" |
219 | " stxr %w1, %0, [%3]\n" | 219 | " stxr %w1, %0, %2\n" |
220 | " cbnz %w1, 1b" | 220 | " cbnz %w1, 1b" |
221 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 221 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
222 | : "r" (&v->counter), "Ir" (i) | 222 | : "Ir" (i) |
223 | : "cc"); | 223 | : "cc"); |
224 | } | 224 | } |
225 | 225 | ||
@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) | |||
229 | unsigned long tmp; | 229 | unsigned long tmp; |
230 | 230 | ||
231 | asm volatile("// atomic64_sub_return\n" | 231 | asm volatile("// atomic64_sub_return\n" |
232 | "1: ldaxr %0, [%3]\n" | 232 | "1: ldaxr %0, %2\n" |
233 | " sub %0, %0, %4\n" | 233 | " sub %0, %0, %3\n" |
234 | " stlxr %w1, %0, [%3]\n" | 234 | " stlxr %w1, %0, %2\n" |
235 | " cbnz %w1, 1b" | 235 | " cbnz %w1, 1b" |
236 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 236 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
237 | : "r" (&v->counter), "Ir" (i) | 237 | : "Ir" (i) |
238 | : "cc"); | 238 | : "cc", "memory"); |
239 | 239 | ||
240 | return result; | 240 | return result; |
241 | } | 241 | } |
@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | |||
246 | unsigned long res; | 246 | unsigned long res; |
247 | 247 | ||
248 | asm volatile("// atomic64_cmpxchg\n" | 248 | asm volatile("// atomic64_cmpxchg\n" |
249 | "1: ldaxr %1, [%3]\n" | 249 | "1: ldaxr %1, %2\n" |
250 | " cmp %1, %4\n" | 250 | " cmp %1, %3\n" |
251 | " b.ne 2f\n" | 251 | " b.ne 2f\n" |
252 | " stlxr %w0, %5, [%3]\n" | 252 | " stlxr %w0, %4, %2\n" |
253 | " cbnz %w0, 1b\n" | 253 | " cbnz %w0, 1b\n" |
254 | "2:" | 254 | "2:" |
255 | : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) | 255 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) |
256 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 256 | : "Ir" (old), "r" (new) |
257 | : "cc"); | 257 | : "cc", "memory"); |
258 | 258 | ||
259 | return oldval; | 259 | return oldval; |
260 | } | 260 | } |
@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) | |||
267 | unsigned long tmp; | 267 | unsigned long tmp; |
268 | 268 | ||
269 | asm volatile("// atomic64_dec_if_positive\n" | 269 | asm volatile("// atomic64_dec_if_positive\n" |
270 | "1: ldaxr %0, [%3]\n" | 270 | "1: ldaxr %0, %2\n" |
271 | " subs %0, %0, #1\n" | 271 | " subs %0, %0, #1\n" |
272 | " b.mi 2f\n" | 272 | " b.mi 2f\n" |
273 | " stlxr %w1, %0, [%3]\n" | 273 | " stlxr %w1, %0, %2\n" |
274 | " cbnz %w1, 1b\n" | 274 | " cbnz %w1, 1b\n" |
275 | "2:" | 275 | "2:" |
276 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 276 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
277 | : "r" (&v->counter) | 277 | : |
278 | : "cc"); | 278 | : "cc", "memory"); |
279 | 279 | ||
280 | return result; | 280 | return result; |
281 | } | 281 | } |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e0e65b069d9e..968b5cbfc260 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |||
29 | switch (size) { | 29 | switch (size) { |
30 | case 1: | 30 | case 1: |
31 | asm volatile("// __xchg1\n" | 31 | asm volatile("// __xchg1\n" |
32 | "1: ldaxrb %w0, [%3]\n" | 32 | "1: ldaxrb %w0, %2\n" |
33 | " stlxrb %w1, %w2, [%3]\n" | 33 | " stlxrb %w1, %w3, %2\n" |
34 | " cbnz %w1, 1b\n" | 34 | " cbnz %w1, 1b\n" |
35 | : "=&r" (ret), "=&r" (tmp) | 35 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) |
36 | : "r" (x), "r" (ptr) | 36 | : "r" (x) |
37 | : "memory", "cc"); | 37 | : "cc", "memory"); |
38 | break; | 38 | break; |
39 | case 2: | 39 | case 2: |
40 | asm volatile("// __xchg2\n" | 40 | asm volatile("// __xchg2\n" |
41 | "1: ldaxrh %w0, [%3]\n" | 41 | "1: ldaxrh %w0, %2\n" |
42 | " stlxrh %w1, %w2, [%3]\n" | 42 | " stlxrh %w1, %w3, %2\n" |
43 | " cbnz %w1, 1b\n" | 43 | " cbnz %w1, 1b\n" |
44 | : "=&r" (ret), "=&r" (tmp) | 44 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) |
45 | : "r" (x), "r" (ptr) | 45 | : "r" (x) |
46 | : "memory", "cc"); | 46 | : "cc", "memory"); |
47 | break; | 47 | break; |
48 | case 4: | 48 | case 4: |
49 | asm volatile("// __xchg4\n" | 49 | asm volatile("// __xchg4\n" |
50 | "1: ldaxr %w0, [%3]\n" | 50 | "1: ldaxr %w0, %2\n" |
51 | " stlxr %w1, %w2, [%3]\n" | 51 | " stlxr %w1, %w3, %2\n" |
52 | " cbnz %w1, 1b\n" | 52 | " cbnz %w1, 1b\n" |
53 | : "=&r" (ret), "=&r" (tmp) | 53 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) |
54 | : "r" (x), "r" (ptr) | 54 | : "r" (x) |
55 | : "memory", "cc"); | 55 | : "cc", "memory"); |
56 | break; | 56 | break; |
57 | case 8: | 57 | case 8: |
58 | asm volatile("// __xchg8\n" | 58 | asm volatile("// __xchg8\n" |
59 | "1: ldaxr %0, [%3]\n" | 59 | "1: ldaxr %0, %2\n" |
60 | " stlxr %w1, %2, [%3]\n" | 60 | " stlxr %w1, %3, %2\n" |
61 | " cbnz %w1, 1b\n" | 61 | " cbnz %w1, 1b\n" |
62 | : "=&r" (ret), "=&r" (tmp) | 62 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) |
63 | : "r" (x), "r" (ptr) | 63 | : "r" (x) |
64 | : "memory", "cc"); | 64 | : "cc", "memory"); |
65 | break; | 65 | break; |
66 | default: | 66 | default: |
67 | BUILD_BUG(); | 67 | BUILD_BUG(); |
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
82 | case 1: | 82 | case 1: |
83 | do { | 83 | do { |
84 | asm volatile("// __cmpxchg1\n" | 84 | asm volatile("// __cmpxchg1\n" |
85 | " ldxrb %w1, [%2]\n" | 85 | " ldxrb %w1, %2\n" |
86 | " mov %w0, #0\n" | 86 | " mov %w0, #0\n" |
87 | " cmp %w1, %w3\n" | 87 | " cmp %w1, %w3\n" |
88 | " b.ne 1f\n" | 88 | " b.ne 1f\n" |
89 | " stxrb %w0, %w4, [%2]\n" | 89 | " stxrb %w0, %w4, %2\n" |
90 | "1:\n" | 90 | "1:\n" |
91 | : "=&r" (res), "=&r" (oldval) | 91 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) |
92 | : "r" (ptr), "Ir" (old), "r" (new) | 92 | : "Ir" (old), "r" (new) |
93 | : "cc"); | 93 | : "cc"); |
94 | } while (res); | 94 | } while (res); |
95 | break; | 95 | break; |
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
97 | case 2: | 97 | case 2: |
98 | do { | 98 | do { |
99 | asm volatile("// __cmpxchg2\n" | 99 | asm volatile("// __cmpxchg2\n" |
100 | " ldxrh %w1, [%2]\n" | 100 | " ldxrh %w1, %2\n" |
101 | " mov %w0, #0\n" | 101 | " mov %w0, #0\n" |
102 | " cmp %w1, %w3\n" | 102 | " cmp %w1, %w3\n" |
103 | " b.ne 1f\n" | 103 | " b.ne 1f\n" |
104 | " stxrh %w0, %w4, [%2]\n" | 104 | " stxrh %w0, %w4, %2\n" |
105 | "1:\n" | 105 | "1:\n" |
106 | : "=&r" (res), "=&r" (oldval) | 106 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) |
107 | : "r" (ptr), "Ir" (old), "r" (new) | 107 | : "Ir" (old), "r" (new) |
108 | : "memory", "cc"); | 108 | : "cc"); |
109 | } while (res); | 109 | } while (res); |
110 | break; | 110 | break; |
111 | 111 | ||
112 | case 4: | 112 | case 4: |
113 | do { | 113 | do { |
114 | asm volatile("// __cmpxchg4\n" | 114 | asm volatile("// __cmpxchg4\n" |
115 | " ldxr %w1, [%2]\n" | 115 | " ldxr %w1, %2\n" |
116 | " mov %w0, #0\n" | 116 | " mov %w0, #0\n" |
117 | " cmp %w1, %w3\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 1f\n" | 118 | " b.ne 1f\n" |
119 | " stxr %w0, %w4, [%2]\n" | 119 | " stxr %w0, %w4, %2\n" |
120 | "1:\n" | 120 | "1:\n" |
121 | : "=&r" (res), "=&r" (oldval) | 121 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) |
122 | : "r" (ptr), "Ir" (old), "r" (new) | 122 | : "Ir" (old), "r" (new) |
123 | : "cc"); | 123 | : "cc"); |
124 | } while (res); | 124 | } while (res); |
125 | break; | 125 | break; |
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
127 | case 8: | 127 | case 8: |
128 | do { | 128 | do { |
129 | asm volatile("// __cmpxchg8\n" | 129 | asm volatile("// __cmpxchg8\n" |
130 | " ldxr %1, [%2]\n" | 130 | " ldxr %1, %2\n" |
131 | " mov %w0, #0\n" | 131 | " mov %w0, #0\n" |
132 | " cmp %1, %3\n" | 132 | " cmp %1, %3\n" |
133 | " b.ne 1f\n" | 133 | " b.ne 1f\n" |
134 | " stxr %w0, %4, [%2]\n" | 134 | " stxr %w0, %4, %2\n" |
135 | "1:\n" | 135 | "1:\n" |
136 | : "=&r" (res), "=&r" (oldval) | 136 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) |
137 | : "r" (ptr), "Ir" (old), "r" (new) | 137 | : "Ir" (old), "r" (new) |
138 | : "cc"); | 138 | : "cc"); |
139 | } while (res); | 139 | } while (res); |
140 | break; | 140 | break; |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 3468ae8439fa..c582fa316366 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -39,7 +39,7 @@ | |||
39 | " .popsection\n" \ | 39 | " .popsection\n" \ |
40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ | 40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ |
41 | : "r" (oparg), "Ir" (-EFAULT) \ | 41 | : "r" (oparg), "Ir" (-EFAULT) \ |
42 | : "cc") | 42 | : "cc", "memory") |
43 | 43 | ||
44 | static inline int | 44 | static inline int |
45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | 45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 41112fe2f8b1..7065e920149d 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
45 | asm volatile( | 45 | asm volatile( |
46 | " sevl\n" | 46 | " sevl\n" |
47 | "1: wfe\n" | 47 | "1: wfe\n" |
48 | "2: ldaxr %w0, [%1]\n" | 48 | "2: ldaxr %w0, %1\n" |
49 | " cbnz %w0, 1b\n" | 49 | " cbnz %w0, 1b\n" |
50 | " stxr %w0, %w2, [%1]\n" | 50 | " stxr %w0, %w2, %1\n" |
51 | " cbnz %w0, 2b\n" | 51 | " cbnz %w0, 2b\n" |
52 | : "=&r" (tmp) | 52 | : "=&r" (tmp), "+Q" (lock->lock) |
53 | : "r" (&lock->lock), "r" (1) | 53 | : "r" (1) |
54 | : "memory"); | 54 | : "cc", "memory"); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
59 | unsigned int tmp; | 59 | unsigned int tmp; |
60 | 60 | ||
61 | asm volatile( | 61 | asm volatile( |
62 | " ldaxr %w0, [%1]\n" | 62 | " ldaxr %w0, %1\n" |
63 | " cbnz %w0, 1f\n" | 63 | " cbnz %w0, 1f\n" |
64 | " stxr %w0, %w2, [%1]\n" | 64 | " stxr %w0, %w2, %1\n" |
65 | "1:\n" | 65 | "1:\n" |
66 | : "=&r" (tmp) | 66 | : "=&r" (tmp), "+Q" (lock->lock) |
67 | : "r" (&lock->lock), "r" (1) | 67 | : "r" (1) |
68 | : "memory"); | 68 | : "cc", "memory"); |
69 | 69 | ||
70 | return !tmp; | 70 | return !tmp; |
71 | } | 71 | } |
@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
74 | { | 74 | { |
75 | asm volatile( | 75 | asm volatile( |
76 | " stlr %w1, [%0]\n" | 76 | " stlr %w1, %0\n" |
77 | : : "r" (&lock->lock), "r" (0) : "memory"); | 77 | : "=Q" (lock->lock) : "r" (0) : "memory"); |
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
94 | asm volatile( | 94 | asm volatile( |
95 | " sevl\n" | 95 | " sevl\n" |
96 | "1: wfe\n" | 96 | "1: wfe\n" |
97 | "2: ldaxr %w0, [%1]\n" | 97 | "2: ldaxr %w0, %1\n" |
98 | " cbnz %w0, 1b\n" | 98 | " cbnz %w0, 1b\n" |
99 | " stxr %w0, %w2, [%1]\n" | 99 | " stxr %w0, %w2, %1\n" |
100 | " cbnz %w0, 2b\n" | 100 | " cbnz %w0, 2b\n" |
101 | : "=&r" (tmp) | 101 | : "=&r" (tmp), "+Q" (rw->lock) |
102 | : "r" (&rw->lock), "r" (0x80000000) | 102 | : "r" (0x80000000) |
103 | : "memory"); | 103 | : "cc", "memory"); |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 106 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
108 | unsigned int tmp; | 108 | unsigned int tmp; |
109 | 109 | ||
110 | asm volatile( | 110 | asm volatile( |
111 | " ldaxr %w0, [%1]\n" | 111 | " ldaxr %w0, %1\n" |
112 | " cbnz %w0, 1f\n" | 112 | " cbnz %w0, 1f\n" |
113 | " stxr %w0, %w2, [%1]\n" | 113 | " stxr %w0, %w2, %1\n" |
114 | "1:\n" | 114 | "1:\n" |
115 | : "=&r" (tmp) | 115 | : "=&r" (tmp), "+Q" (rw->lock) |
116 | : "r" (&rw->lock), "r" (0x80000000) | 116 | : "r" (0x80000000) |
117 | : "memory"); | 117 | : "cc", "memory"); |
118 | 118 | ||
119 | return !tmp; | 119 | return !tmp; |
120 | } | 120 | } |
@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
122 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 122 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | asm volatile( | 124 | asm volatile( |
125 | " stlr %w1, [%0]\n" | 125 | " stlr %w1, %0\n" |
126 | : : "r" (&rw->lock), "r" (0) : "memory"); | 126 | : "=Q" (rw->lock) : "r" (0) : "memory"); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* write_can_lock - would write_trylock() succeed? */ | 129 | /* write_can_lock - would write_trylock() succeed? */ |
@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
148 | asm volatile( | 148 | asm volatile( |
149 | " sevl\n" | 149 | " sevl\n" |
150 | "1: wfe\n" | 150 | "1: wfe\n" |
151 | "2: ldaxr %w0, [%2]\n" | 151 | "2: ldaxr %w0, %2\n" |
152 | " add %w0, %w0, #1\n" | 152 | " add %w0, %w0, #1\n" |
153 | " tbnz %w0, #31, 1b\n" | 153 | " tbnz %w0, #31, 1b\n" |
154 | " stxr %w1, %w0, [%2]\n" | 154 | " stxr %w1, %w0, %2\n" |
155 | " cbnz %w1, 2b\n" | 155 | " cbnz %w1, 2b\n" |
156 | : "=&r" (tmp), "=&r" (tmp2) | 156 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
157 | : "r" (&rw->lock) | 157 | : |
158 | : "memory"); | 158 | : "cc", "memory"); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 161 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
163 | unsigned int tmp, tmp2; | 163 | unsigned int tmp, tmp2; |
164 | 164 | ||
165 | asm volatile( | 165 | asm volatile( |
166 | "1: ldxr %w0, [%2]\n" | 166 | "1: ldxr %w0, %2\n" |
167 | " sub %w0, %w0, #1\n" | 167 | " sub %w0, %w0, #1\n" |
168 | " stlxr %w1, %w0, [%2]\n" | 168 | " stlxr %w1, %w0, %2\n" |
169 | " cbnz %w1, 1b\n" | 169 | " cbnz %w1, 1b\n" |
170 | : "=&r" (tmp), "=&r" (tmp2) | 170 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
171 | : "r" (&rw->lock) | 171 | : |
172 | : "memory"); | 172 | : "cc", "memory"); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 175 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
177 | unsigned int tmp, tmp2 = 1; | 177 | unsigned int tmp, tmp2 = 1; |
178 | 178 | ||
179 | asm volatile( | 179 | asm volatile( |
180 | " ldaxr %w0, [%2]\n" | 180 | " ldaxr %w0, %2\n" |
181 | " add %w0, %w0, #1\n" | 181 | " add %w0, %w0, #1\n" |
182 | " tbnz %w0, #31, 1f\n" | 182 | " tbnz %w0, #31, 1f\n" |
183 | " stxr %w1, %w0, [%2]\n" | 183 | " stxr %w1, %w0, %2\n" |
184 | "1:\n" | 184 | "1:\n" |
185 | : "=&r" (tmp), "+r" (tmp2) | 185 | : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) |
186 | : "r" (&rw->lock) | 186 | : |
187 | : "memory"); | 187 | : "cc", "memory"); |
188 | 188 | ||
189 | return !tmp2; | 189 | return !tmp2; |
190 | } | 190 | } |