aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-06 08:57:36 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-12 04:37:17 -0500
commit47933ad41a86a4a9b50bed7c9b9bd2ba242aac63 (patch)
tree4f1d94f4b09ecf0c1d99f295f2c31b22eebed492 /arch/arm64/include
parent93ea02bb84354370e51de803a9405f171f3edf88 (diff)
arch: Introduce smp_load_acquire(), smp_store_release()
A number of situations currently require the heavyweight smp_mb(), even though there is no need to order prior stores against later loads. Many architectures have much cheaper ways to handle these situations, but the Linux kernel currently has no portable way to make use of them. This commit therefore supplies smp_load_acquire() and smp_store_release() to remedy this situation. The new smp_load_acquire() primitive orders the specified load against any subsequent reads or writes, while the new smp_store_release() primitive orders the specifed store against any prior reads or writes. These primitives allow array-based circular FIFOs to be implemented without an smp_mb(), and also allow a theoretical hole in rcu_assign_pointer() to be closed at no additional expense on most architectures. In addition, the RCU experience transitioning from explicit smp_read_barrier_depends() and smp_wmb() to rcu_dereference() and rcu_assign_pointer(), respectively resulted in substantial improvements in readability. It therefore seems likely that replacing other explicit barriers with smp_load_acquire() and smp_store_release() will provide similar benefits. It appears that roughly half of the explicit barriers in core kernel code might be so replaced. [Changelog by PaulMck] Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Michael Ellerman <michael@ellerman.id.au> Cc: Michael Neuling <mikey@neuling.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Victor Kaplansky <VICTORK@il.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Oleg Nesterov <oleg@redhat.com> Link: http://lkml.kernel.org/r/20131213150640.908486364@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/barrier.h50
1 files changed, 50 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index d4a63338a53c..78e20ba8806b 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,10 +35,60 @@
35#define smp_mb() barrier() 35#define smp_mb() barrier()
36#define smp_rmb() barrier() 36#define smp_rmb() barrier()
37#define smp_wmb() barrier() 37#define smp_wmb() barrier()
38
39#define smp_store_release(p, v) \
40do { \
41 compiletime_assert_atomic_type(*p); \
42 smp_mb(); \
43 ACCESS_ONCE(*p) = (v); \
44} while (0)
45
46#define smp_load_acquire(p) \
47({ \
48 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
49 compiletime_assert_atomic_type(*p); \
50 smp_mb(); \
51 ___p1; \
52})
53
38#else 54#else
55
39#define smp_mb() asm volatile("dmb ish" : : : "memory") 56#define smp_mb() asm volatile("dmb ish" : : : "memory")
40#define smp_rmb() asm volatile("dmb ishld" : : : "memory") 57#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
41#define smp_wmb() asm volatile("dmb ishst" : : : "memory") 58#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
59
60#define smp_store_release(p, v) \
61do { \
62 compiletime_assert_atomic_type(*p); \
63 switch (sizeof(*p)) { \
64 case 4: \
65 asm volatile ("stlr %w1, %0" \
66 : "=Q" (*p) : "r" (v) : "memory"); \
67 break; \
68 case 8: \
69 asm volatile ("stlr %1, %0" \
70 : "=Q" (*p) : "r" (v) : "memory"); \
71 break; \
72 } \
73} while (0)
74
75#define smp_load_acquire(p) \
76({ \
77 typeof(*p) ___p1; \
78 compiletime_assert_atomic_type(*p); \
79 switch (sizeof(*p)) { \
80 case 4: \
81 asm volatile ("ldar %w0, %1" \
82 : "=r" (___p1) : "Q" (*p) : "memory"); \
83 break; \
84 case 8: \
85 asm volatile ("ldar %0, %1" \
86 : "=r" (___p1) : "Q" (*p) : "memory"); \
87 break; \
88 } \
89 ___p1; \
90})
91
42#endif 92#endif
43 93
44#define read_barrier_depends() do { } while(0) 94#define read_barrier_depends() do { } while(0)