aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-10-27 15:53:49 -0400
committerIngo Molnar <mingo@kernel.org>2015-12-04 05:39:51 -0500
commitd5a73cadf3fdec95e9518ee5bb91bd0747c42b30 (patch)
tree21208b866d0c81807f80da18177ea327c103e28c
parentfbd35c0d2fb41b75863a0e45fe939c8440375b0a (diff)
lcoking/barriers, arch: Use smp barriers in smp_store_release()
With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") it was made clear that the context of this call (and thus set_mb) is strictly for CPU ordering, as opposed to IO. As such all archs should use the smp variant of mb(), respecting the semantics and saving a mandatory barrier on UP. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <linux-arch@vger.kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/ia64/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/s390/include/asm/barrier.h2
-rw-r--r--include/asm-generic/barrier.h2
4 files changed, 4 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index df896a1c41d3..209c4b817c95 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -77,7 +77,7 @@ do { \
77 ___p1; \ 77 ___p1; \
78}) 78})
79 79
80#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 80#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
81 81
82/* 82/*
83 * The group barrier in front of the rsm & ssm are necessary to ensure 83 * The group barrier in front of the rsm & ssm are necessary to ensure
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 0eca6efc0631..a7af5fb7b914 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,7 @@
34#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 34#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
35#define wmb() __asm__ __volatile__ ("sync" : : : "memory") 35#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
36 36
37#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 37#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
38 38
39#ifdef __SUBARCH_HAS_LWSYNC 39#ifdef __SUBARCH_HAS_LWSYNC
40# define SMPWMB LWSYNC 40# define SMPWMB LWSYNC
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d68e11e0df5e..7ffd0b19135c 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -36,7 +36,7 @@
36#define smp_mb__before_atomic() smp_mb() 36#define smp_mb__before_atomic() smp_mb()
37#define smp_mb__after_atomic() smp_mb() 37#define smp_mb__after_atomic() smp_mb()
38 38
39#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 39#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
40 40
41#define smp_store_release(p, v) \ 41#define smp_store_release(p, v) \
42do { \ 42do { \
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index b42afada1280..0f45f93ef692 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -93,7 +93,7 @@
93#endif /* CONFIG_SMP */ 93#endif /* CONFIG_SMP */
94 94
95#ifndef smp_store_mb 95#ifndef smp_store_mb
96#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 96#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
97#endif 97#endif
98 98
99#ifndef smp_mb__before_atomic 99#ifndef smp_mb__before_atomic