aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-08-06 12:54:44 -0400
committerIngo Molnar <mingo@kernel.org>2015-08-12 05:59:10 -0400
commit0ca326de7aa9cb253db9c1a3eb3f0487c8dbf912 (patch)
tree2964ef144f8b873c9af610577b4e3c92c0af9ac8
parentcd074aea9261784e44f292e1132830ec221802c6 (diff)
locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
By defining our SMP atomics in terms of relaxed operations, we gain a small reduction in code size and have acquire/release/fence variants generated automatically by the core code. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman.Long@hp.com Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/arm/include/asm/atomic.h37
-rw-r--r--arch/arm/include/asm/cmpxchg.h47
2 files changed, 24 insertions, 60 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 82b75a7cb762..fe3ef397f5a4 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
57} \ 57} \
58 58
59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60static inline int atomic_##op##_return(int i, atomic_t *v) \ 60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
61{ \ 61{ \
62 unsigned long tmp; \ 62 unsigned long tmp; \
63 int result; \ 63 int result; \
64 \ 64 \
65 smp_mb(); \
66 prefetchw(&v->counter); \ 65 prefetchw(&v->counter); \
67 \ 66 \
68 __asm__ __volatile__("@ atomic_" #op "_return\n" \ 67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
75 : "r" (&v->counter), "Ir" (i) \ 74 : "r" (&v->counter), "Ir" (i) \
76 : "cc"); \ 75 : "cc"); \
77 \ 76 \
78 smp_mb(); \
79 \
80 return result; \ 77 return result; \
81} 78}
82 79
83static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 80#define atomic_add_return_relaxed atomic_add_return_relaxed
81#define atomic_sub_return_relaxed atomic_sub_return_relaxed
82
83static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
84{ 84{
85 int oldval; 85 int oldval;
86 unsigned long res; 86 unsigned long res;
87 87
88 smp_mb();
89 prefetchw(&ptr->counter); 88 prefetchw(&ptr->counter);
90 89
91 do { 90 do {
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
99 : "cc"); 98 : "cc");
100 } while (res); 99 } while (res);
101 100
102 smp_mb();
103
104 return oldval; 101 return oldval;
105} 102}
103#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
106 104
107static inline int __atomic_add_unless(atomic_t *v, int a, int u) 105static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108{ 106{
@@ -297,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
297} \ 295} \
298 296
299#define ATOMIC64_OP_RETURN(op, op1, op2) \ 297#define ATOMIC64_OP_RETURN(op, op1, op2) \
300static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ 298static inline long long \
299atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
301{ \ 300{ \
302 long long result; \ 301 long long result; \
303 unsigned long tmp; \ 302 unsigned long tmp; \
304 \ 303 \
305 smp_mb(); \
306 prefetchw(&v->counter); \ 304 prefetchw(&v->counter); \
307 \ 305 \
308 __asm__ __volatile__("@ atomic64_" #op "_return\n" \ 306 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
@@ -316,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
316 : "r" (&v->counter), "r" (i) \ 314 : "r" (&v->counter), "r" (i) \
317 : "cc"); \ 315 : "cc"); \
318 \ 316 \
319 smp_mb(); \
320 \
321 return result; \ 317 return result; \
322} 318}
323 319
@@ -328,6 +324,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
328ATOMIC64_OPS(add, adds, adc) 324ATOMIC64_OPS(add, adds, adc)
329ATOMIC64_OPS(sub, subs, sbc) 325ATOMIC64_OPS(sub, subs, sbc)
330 326
327#define atomic64_add_return_relaxed atomic64_add_return_relaxed
328#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
329
331#define atomic64_andnot atomic64_andnot 330#define atomic64_andnot atomic64_andnot
332 331
333ATOMIC64_OP(and, and, and) 332ATOMIC64_OP(and, and, and)
@@ -339,13 +338,12 @@ ATOMIC64_OP(xor, eor, eor)
339#undef ATOMIC64_OP_RETURN 338#undef ATOMIC64_OP_RETURN
340#undef ATOMIC64_OP 339#undef ATOMIC64_OP
341 340
342static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, 341static inline long long
343 long long new) 342atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
344{ 343{
345 long long oldval; 344 long long oldval;
346 unsigned long res; 345 unsigned long res;
347 346
348 smp_mb();
349 prefetchw(&ptr->counter); 347 prefetchw(&ptr->counter);
350 348
351 do { 349 do {
@@ -360,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
360 : "cc"); 358 : "cc");
361 } while (res); 359 } while (res);
362 360
363 smp_mb();
364
365 return oldval; 361 return oldval;
366} 362}
363#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
367 364
368static inline long long atomic64_xchg(atomic64_t *ptr, long long new) 365static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
369{ 366{
370 long long result; 367 long long result;
371 unsigned long tmp; 368 unsigned long tmp;
372 369
373 smp_mb();
374 prefetchw(&ptr->counter); 370 prefetchw(&ptr->counter);
375 371
376 __asm__ __volatile__("@ atomic64_xchg\n" 372 __asm__ __volatile__("@ atomic64_xchg\n"
@@ -382,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
382 : "r" (&ptr->counter), "r" (new) 378 : "r" (&ptr->counter), "r" (new)
383 : "cc"); 379 : "cc");
384 380
385 smp_mb();
386
387 return result; 381 return result;
388} 382}
383#define atomic64_xchg_relaxed atomic64_xchg_relaxed
389 384
390static inline long long atomic64_dec_if_positive(atomic64_t *v) 385static inline long long atomic64_dec_if_positive(atomic64_t *v)
391{ 386{
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05d3207..916a2744d5c6 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
35 unsigned int tmp; 35 unsigned int tmp;
36#endif 36#endif
37 37
38 smp_mb();
39 prefetchw((const void *)ptr); 38 prefetchw((const void *)ptr);
40 39
41 switch (size) { 40 switch (size) {
@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
98 __bad_xchg(ptr, size), ret = 0; 97 __bad_xchg(ptr, size), ret = 0;
99 break; 98 break;
100 } 99 }
101 smp_mb();
102 100
103 return ret; 101 return ret;
104} 102}
105 103
106#define xchg(ptr, x) ({ \ 104#define xchg_relaxed(ptr, x) ({ \
107 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ 105 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
108 sizeof(*(ptr))); \ 106 sizeof(*(ptr))); \
109}) 107})
@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
117#error "SMP is not supported on this platform" 115#error "SMP is not supported on this platform"
118#endif 116#endif
119 117
118#define xchg xchg_relaxed
119
120/* 120/*
121 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 121 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
122 * them available. 122 * them available.
@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
194 return oldval; 194 return oldval;
195} 195}
196 196
197static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 197#define cmpxchg_relaxed(ptr,o,n) ({ \
198 unsigned long new, int size) 198 (__typeof__(*(ptr)))__cmpxchg((ptr), \
199{ 199 (unsigned long)(o), \
200 unsigned long ret; 200 (unsigned long)(n), \
201 201 sizeof(*(ptr))); \
202 smp_mb();
203 ret = __cmpxchg(ptr, old, new, size);
204 smp_mb();
205
206 return ret;
207}
208
209#define cmpxchg(ptr,o,n) ({ \
210 (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
211 (unsigned long)(o), \
212 (unsigned long)(n), \
213 sizeof(*(ptr))); \
214}) 202})
215 203
216static inline unsigned long __cmpxchg_local(volatile void *ptr, 204static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
273 261
274#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) 262#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
275 263
276static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
277 unsigned long long old,
278 unsigned long long new)
279{
280 unsigned long long ret;
281
282 smp_mb();
283 ret = __cmpxchg64(ptr, old, new);
284 smp_mb();
285
286 return ret;
287}
288
289#define cmpxchg64(ptr, o, n) ({ \
290 (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
291 (unsigned long long)(o), \
292 (unsigned long long)(n)); \
293})
294
295#endif /* __LINUX_ARM_ARCH__ >= 6 */ 264#endif /* __LINUX_ARM_ARCH__ >= 6 */
296 265
297#endif /* __ASM_ARM_CMPXCHG_H */ 266#endif /* __ASM_ARM_CMPXCHG_H */