aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/arm
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/atomic.h307
1 files changed, 124 insertions, 183 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 3040359094d9..e22c11970b7b 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -27,7 +27,7 @@
27 * strex/ldrex monitor on some implementations. The reason we can use it for 27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return. 28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */ 29 */
30#define atomic_read(v) (*(volatile int *)&(v)->counter) 30#define atomic_read(v) ACCESS_ONCE((v)->counter)
31#define atomic_set(v,i) (((v)->counter) = (i)) 31#define atomic_set(v,i) (((v)->counter) = (i))
32 32
33#if __LINUX_ARM_ARCH__ >= 6 33#if __LINUX_ARM_ARCH__ >= 6
@@ -37,84 +37,47 @@
37 * store exclusive to ensure that these are atomic. We may loop 37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens. 38 * to ensure that the update happens.
39 */ 39 */
40static inline void atomic_add(int i, atomic_t *v)
41{
42 unsigned long tmp;
43 int result;
44
45 prefetchw(&v->counter);
46 __asm__ __volatile__("@ atomic_add\n"
47"1: ldrex %0, [%3]\n"
48" add %0, %0, %4\n"
49" strex %1, %0, [%3]\n"
50" teq %1, #0\n"
51" bne 1b"
52 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 : "r" (&v->counter), "Ir" (i)
54 : "cc");
55}
56 40
57static inline int atomic_add_return(int i, atomic_t *v) 41#define ATOMIC_OP(op, c_op, asm_op) \
58{ 42static inline void atomic_##op(int i, atomic_t *v) \
59 unsigned long tmp; 43{ \
60 int result; 44 unsigned long tmp; \
61 45 int result; \
62 smp_mb(); 46 \
63 prefetchw(&v->counter); 47 prefetchw(&v->counter); \
64 48 __asm__ __volatile__("@ atomic_" #op "\n" \
65 __asm__ __volatile__("@ atomic_add_return\n" 49"1: ldrex %0, [%3]\n" \
66"1: ldrex %0, [%3]\n" 50" " #asm_op " %0, %0, %4\n" \
67" add %0, %0, %4\n" 51" strex %1, %0, [%3]\n" \
68" strex %1, %0, [%3]\n" 52" teq %1, #0\n" \
69" teq %1, #0\n" 53" bne 1b" \
70" bne 1b" 54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
71 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 55 : "r" (&v->counter), "Ir" (i) \
72 : "r" (&v->counter), "Ir" (i) 56 : "cc"); \
73 : "cc"); 57} \
74 58
75 smp_mb(); 59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
76 60static inline int atomic_##op##_return(int i, atomic_t *v) \
77 return result; 61{ \
78} 62 unsigned long tmp; \
79 63 int result; \
80static inline void atomic_sub(int i, atomic_t *v) 64 \
81{ 65 smp_mb(); \
82 unsigned long tmp; 66 prefetchw(&v->counter); \
83 int result; 67 \
84 68 __asm__ __volatile__("@ atomic_" #op "_return\n" \
85 prefetchw(&v->counter); 69"1: ldrex %0, [%3]\n" \
86 __asm__ __volatile__("@ atomic_sub\n" 70" " #asm_op " %0, %0, %4\n" \
87"1: ldrex %0, [%3]\n" 71" strex %1, %0, [%3]\n" \
88" sub %0, %0, %4\n" 72" teq %1, #0\n" \
89" strex %1, %0, [%3]\n" 73" bne 1b" \
90" teq %1, #0\n" 74 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
91" bne 1b" 75 : "r" (&v->counter), "Ir" (i) \
92 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 76 : "cc"); \
93 : "r" (&v->counter), "Ir" (i) 77 \
94 : "cc"); 78 smp_mb(); \
95} 79 \
96 80 return result; \
97static inline int atomic_sub_return(int i, atomic_t *v)
98{
99 unsigned long tmp;
100 int result;
101
102 smp_mb();
103 prefetchw(&v->counter);
104
105 __asm__ __volatile__("@ atomic_sub_return\n"
106"1: ldrex %0, [%3]\n"
107" sub %0, %0, %4\n"
108" strex %1, %0, [%3]\n"
109" teq %1, #0\n"
110" bne 1b"
111 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 : "r" (&v->counter), "Ir" (i)
113 : "cc");
114
115 smp_mb();
116
117 return result;
118} 81}
119 82
120static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 83static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
174#error SMP not supported on pre-ARMv6 CPUs 137#error SMP not supported on pre-ARMv6 CPUs
175#endif 138#endif
176 139
177static inline int atomic_add_return(int i, atomic_t *v) 140#define ATOMIC_OP(op, c_op, asm_op) \
178{ 141static inline void atomic_##op(int i, atomic_t *v) \
179 unsigned long flags; 142{ \
180 int val; 143 unsigned long flags; \
181 144 \
182 raw_local_irq_save(flags); 145 raw_local_irq_save(flags); \
183 val = v->counter; 146 v->counter c_op i; \
184 v->counter = val += i; 147 raw_local_irq_restore(flags); \
185 raw_local_irq_restore(flags); 148} \
186 149
187 return val; 150#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
188} 151static inline int atomic_##op##_return(int i, atomic_t *v) \
189#define atomic_add(i, v) (void) atomic_add_return(i, v) 152{ \
190 153 unsigned long flags; \
191static inline int atomic_sub_return(int i, atomic_t *v) 154 int val; \
192{ 155 \
193 unsigned long flags; 156 raw_local_irq_save(flags); \
194 int val; 157 v->counter c_op i; \
195 158 val = v->counter; \
196 raw_local_irq_save(flags); 159 raw_local_irq_restore(flags); \
197 val = v->counter; 160 \
198 v->counter = val -= i; 161 return val; \
199 raw_local_irq_restore(flags);
200
201 return val;
202} 162}
203#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
204 163
205static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 164static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{ 165{
@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
228 187
229#endif /* __LINUX_ARM_ARCH__ */ 188#endif /* __LINUX_ARM_ARCH__ */
230 189
190#define ATOMIC_OPS(op, c_op, asm_op) \
191 ATOMIC_OP(op, c_op, asm_op) \
192 ATOMIC_OP_RETURN(op, c_op, asm_op)
193
194ATOMIC_OPS(add, +=, add)
195ATOMIC_OPS(sub, -=, sub)
196
197#undef ATOMIC_OPS
198#undef ATOMIC_OP_RETURN
199#undef ATOMIC_OP
200
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 201#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232 202
233#define atomic_inc(v) atomic_add(1, v) 203#define atomic_inc(v) atomic_add(1, v)
@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i)
300} 270}
301#endif 271#endif
302 272
303static inline void atomic64_add(long long i, atomic64_t *v) 273#define ATOMIC64_OP(op, op1, op2) \
304{ 274static inline void atomic64_##op(long long i, atomic64_t *v) \
305 long long result; 275{ \
306 unsigned long tmp; 276 long long result; \
307 277 unsigned long tmp; \
308 prefetchw(&v->counter); 278 \
309 __asm__ __volatile__("@ atomic64_add\n" 279 prefetchw(&v->counter); \
310"1: ldrexd %0, %H0, [%3]\n" 280 __asm__ __volatile__("@ atomic64_" #op "\n" \
311" adds %Q0, %Q0, %Q4\n" 281"1: ldrexd %0, %H0, [%3]\n" \
312" adc %R0, %R0, %R4\n" 282" " #op1 " %Q0, %Q0, %Q4\n" \
313" strexd %1, %0, %H0, [%3]\n" 283" " #op2 " %R0, %R0, %R4\n" \
314" teq %1, #0\n" 284" strexd %1, %0, %H0, [%3]\n" \
315" bne 1b" 285" teq %1, #0\n" \
316 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 286" bne 1b" \
317 : "r" (&v->counter), "r" (i) 287 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
318 : "cc"); 288 : "r" (&v->counter), "r" (i) \
319} 289 : "cc"); \
320 290} \
321static inline long long atomic64_add_return(long long i, atomic64_t *v) 291
322{ 292#define ATOMIC64_OP_RETURN(op, op1, op2) \
323 long long result; 293static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
324 unsigned long tmp; 294{ \
325 295 long long result; \
326 smp_mb(); 296 unsigned long tmp; \
327 prefetchw(&v->counter); 297 \
328 298 smp_mb(); \
329 __asm__ __volatile__("@ atomic64_add_return\n" 299 prefetchw(&v->counter); \
330"1: ldrexd %0, %H0, [%3]\n" 300 \
331" adds %Q0, %Q0, %Q4\n" 301 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
332" adc %R0, %R0, %R4\n" 302"1: ldrexd %0, %H0, [%3]\n" \
333" strexd %1, %0, %H0, [%3]\n" 303" " #op1 " %Q0, %Q0, %Q4\n" \
334" teq %1, #0\n" 304" " #op2 " %R0, %R0, %R4\n" \
335" bne 1b" 305" strexd %1, %0, %H0, [%3]\n" \
336 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 306" teq %1, #0\n" \
337 : "r" (&v->counter), "r" (i) 307" bne 1b" \
338 : "cc"); 308 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
339 309 : "r" (&v->counter), "r" (i) \
340 smp_mb(); 310 : "cc"); \
341 311 \
342 return result; 312 smp_mb(); \
343} 313 \
344 314 return result; \
345static inline void atomic64_sub(long long i, atomic64_t *v)
346{
347 long long result;
348 unsigned long tmp;
349
350 prefetchw(&v->counter);
351 __asm__ __volatile__("@ atomic64_sub\n"
352"1: ldrexd %0, %H0, [%3]\n"
353" subs %Q0, %Q0, %Q4\n"
354" sbc %R0, %R0, %R4\n"
355" strexd %1, %0, %H0, [%3]\n"
356" teq %1, #0\n"
357" bne 1b"
358 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
359 : "r" (&v->counter), "r" (i)
360 : "cc");
361} 315}
362 316
363static inline long long atomic64_sub_return(long long i, atomic64_t *v) 317#define ATOMIC64_OPS(op, op1, op2) \
364{ 318 ATOMIC64_OP(op, op1, op2) \
365 long long result; 319 ATOMIC64_OP_RETURN(op, op1, op2)
366 unsigned long tmp;
367
368 smp_mb();
369 prefetchw(&v->counter);
370
371 __asm__ __volatile__("@ atomic64_sub_return\n"
372"1: ldrexd %0, %H0, [%3]\n"
373" subs %Q0, %Q0, %Q4\n"
374" sbc %R0, %R0, %R4\n"
375" strexd %1, %0, %H0, [%3]\n"
376" teq %1, #0\n"
377" bne 1b"
378 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
379 : "r" (&v->counter), "r" (i)
380 : "cc");
381 320
382 smp_mb(); 321ATOMIC64_OPS(add, adds, adc)
322ATOMIC64_OPS(sub, subs, sbc)
383 323
384 return result; 324#undef ATOMIC64_OPS
385} 325#undef ATOMIC64_OP_RETURN
326#undef ATOMIC64_OP
386 327
387static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, 328static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
388 long long new) 329 long long new)