aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Parri <parri.andrea@gmail.com>2018-02-22 04:24:48 -0500
committerIngo Molnar <mingo@kernel.org>2018-02-23 02:38:16 -0500
commit472e8c55cf6622d1c112dc2bc777f68bbd4189db (patch)
tree8f91fed0fa1000eef3790b086903b6409ba50358
parent79d442461df7478cdd0c50d9b8a76f431f150fa3 (diff)
locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs
Successful RMW operations are supposed to be fully ordered, but Alpha's xchg() and cmpxchg() do not meet this requirement. Will Deacon noticed the bug: > So MP using xchg: > > WRITE_ONCE(x, 1) > xchg(y, 1) > > smp_load_acquire(y) == 1 > READ_ONCE(x) == 0 > > would be allowed. ... which thus violates the above requirement. Fix it by adding a leading smp_mb() to the xchg() and cmpxchg() implementations. Reported-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrea Parri <parri.andrea@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-alpha@vger.kernel.org Link: http://lkml.kernel.org/r/1519291488-5752-1-git-send-email-parri.andrea@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/alpha/include/asm/xchg.h21
1 files changed, 18 insertions, 3 deletions
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index e1facf6fc244..e2b59fac5257 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -12,6 +12,10 @@
12 * Atomic exchange. 12 * Atomic exchange.
13 * Since it can be used to implement critical sections 13 * Since it can be used to implement critical sections
14 * it must clobber "memory" (also for interrupts in UP). 14 * it must clobber "memory" (also for interrupts in UP).
15 *
16 * The leading and the trailing memory barriers guarantee that these
17 * operations are fully ordered.
18 *
15 */ 19 */
16 20
17static inline unsigned long 21static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
19{ 23{
20 unsigned long ret, tmp, addr64; 24 unsigned long ret, tmp, addr64;
21 25
26 smp_mb();
22 __asm__ __volatile__( 27 __asm__ __volatile__(
23 " andnot %4,7,%3\n" 28 " andnot %4,7,%3\n"
24 " insbl %1,%4,%1\n" 29 " insbl %1,%4,%1\n"
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
43{ 48{
44 unsigned long ret, tmp, addr64; 49 unsigned long ret, tmp, addr64;
45 50
51 smp_mb();
46 __asm__ __volatile__( 52 __asm__ __volatile__(
47 " andnot %4,7,%3\n" 53 " andnot %4,7,%3\n"
48 " inswl %1,%4,%1\n" 54 " inswl %1,%4,%1\n"
@@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
67{ 73{
68 unsigned long dummy; 74 unsigned long dummy;
69 75
76 smp_mb();
70 __asm__ __volatile__( 77 __asm__ __volatile__(
71 "1: ldl_l %0,%4\n" 78 "1: ldl_l %0,%4\n"
72 " bis $31,%3,%1\n" 79 " bis $31,%3,%1\n"
@@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
87{ 94{
88 unsigned long dummy; 95 unsigned long dummy;
89 96
97 smp_mb();
90 __asm__ __volatile__( 98 __asm__ __volatile__(
91 "1: ldq_l %0,%4\n" 99 "1: ldq_l %0,%4\n"
92 " bis $31,%3,%1\n" 100 " bis $31,%3,%1\n"
@@ -128,9 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
128 * store NEW in MEM. Return the initial value in MEM. Success is 136 * store NEW in MEM. Return the initial value in MEM. Success is
129 * indicated by comparing RETURN with OLD. 137 * indicated by comparing RETURN with OLD.
130 * 138 *
131 * The memory barrier is placed in SMP unconditionally, in order to 139 * The leading and the trailing memory barriers guarantee that these
132 * guarantee that dependency ordering is preserved when a dependency 140 * operations are fully ordered.
133 * is headed by an unsuccessful operation. 141 *
142 * The trailing memory barrier is placed in SMP unconditionally, in
143 * order to guarantee that dependency ordering is preserved when a
144 * dependency is headed by an unsuccessful operation.
134 */ 145 */
135 146
136static inline unsigned long 147static inline unsigned long
@@ -138,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
138{ 149{
139 unsigned long prev, tmp, cmp, addr64; 150 unsigned long prev, tmp, cmp, addr64;
140 151
152 smp_mb();
141 __asm__ __volatile__( 153 __asm__ __volatile__(
142 " andnot %5,7,%4\n" 154 " andnot %5,7,%4\n"
143 " insbl %1,%5,%1\n" 155 " insbl %1,%5,%1\n"
@@ -165,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
165{ 177{
166 unsigned long prev, tmp, cmp, addr64; 178 unsigned long prev, tmp, cmp, addr64;
167 179
180 smp_mb();
168 __asm__ __volatile__( 181 __asm__ __volatile__(
169 " andnot %5,7,%4\n" 182 " andnot %5,7,%4\n"
170 " inswl %1,%5,%1\n" 183 " inswl %1,%5,%1\n"
@@ -192,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
192{ 205{
193 unsigned long prev, cmp; 206 unsigned long prev, cmp;
194 207
208 smp_mb();
195 __asm__ __volatile__( 209 __asm__ __volatile__(
196 "1: ldl_l %0,%5\n" 210 "1: ldl_l %0,%5\n"
197 " cmpeq %0,%3,%1\n" 211 " cmpeq %0,%3,%1\n"
@@ -215,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
215{ 229{
216 unsigned long prev, cmp; 230 unsigned long prev, cmp;
217 231
232 smp_mb();
218 __asm__ __volatile__( 233 __asm__ __volatile__(
219 "1: ldq_l %0,%5\n" 234 "1: ldq_l %0,%5\n"
220 " cmpeq %0,%3,%1\n" 235 " cmpeq %0,%3,%1\n"