aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-12 04:51:55 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-19 02:32:00 -0400
commitb92b8b35a2e38bde319fd1d68ec84628c1f1b0fb (patch)
tree45aea6d12a580b60848363c10dc355f3307ec1ff
parentab3f02fc237211f0583c1e7ba3bf504747be9b8d (diff)
locking/arch: Rename set_mb() to smp_store_mb()
Since set_mb() is really about an smp_mb() -- not a IO/DMA barrier like mb() rename it to match the recent smp_load_acquire() and smp_store_release(). Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/memory-barriers.txt6
-rw-r--r--arch/arm/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/ia64/include/asm/barrier.h7
-rw-r--r--arch/metag/include/asm/barrier.h2
-rw-r--r--arch/mips/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/s390/include/asm/barrier.h2
-rw-r--r--arch/sh/include/asm/barrier.h2
-rw-r--r--arch/sparc/include/asm/barrier_64.h2
-rw-r--r--arch/x86/include/asm/barrier.h4
-rw-r--r--arch/x86/um/asm/barrier.h3
-rw-r--r--fs/select.c6
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/locking/qspinlock_paravirt.h2
-rw-r--r--kernel/sched/wait.c4
18 files changed, 29 insertions, 33 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index f95746189b5d..fe4020e4b468 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1662,7 +1662,7 @@ CPU from reordering them.
1662 1662
1663There are some more advanced barrier functions: 1663There are some more advanced barrier functions:
1664 1664
1665 (*) set_mb(var, value) 1665 (*) smp_store_mb(var, value)
1666 1666
1667 This assigns the value to the variable and then inserts a full memory 1667 This assigns the value to the variable and then inserts a full memory
1668 barrier after it, depending on the function. It isn't guaranteed to 1668 barrier after it, depending on the function. It isn't guaranteed to
@@ -1975,7 +1975,7 @@ after it has altered the task state:
1975 CPU 1 1975 CPU 1
1976 =============================== 1976 ===============================
1977 set_current_state(); 1977 set_current_state();
1978 set_mb(); 1978 smp_store_mb();
1979 STORE current->state 1979 STORE current->state
1980 <general barrier> 1980 <general barrier>
1981 LOAD event_indicated 1981 LOAD event_indicated
@@ -2016,7 +2016,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
2016 CPU 1 CPU 2 2016 CPU 1 CPU 2
2017 =============================== =============================== 2017 =============================== ===============================
2018 set_current_state(); STORE event_indicated 2018 set_current_state(); STORE event_indicated
2019 set_mb(); wake_up(); 2019 smp_store_mb(); wake_up();
2020 STORE current->state <write barrier> 2020 STORE current->state <write barrier>
2021 <general barrier> STORE current->state 2021 <general barrier> STORE current->state
2022 LOAD event_indicated 2022 LOAD event_indicated
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 993150aea681..6c2327e1c732 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -81,7 +81,7 @@ do { \
81#define read_barrier_depends() do { } while(0) 81#define read_barrier_depends() do { } while(0)
82#define smp_read_barrier_depends() do { } while(0) 82#define smp_read_barrier_depends() do { } while(0)
83 83
84#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) 84#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
85 85
86#define smp_mb__before_atomic() smp_mb() 86#define smp_mb__before_atomic() smp_mb()
87#define smp_mb__after_atomic() smp_mb() 87#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index ff7de78d01b8..0fa47c4275cb 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -114,7 +114,7 @@ do { \
114#define read_barrier_depends() do { } while(0) 114#define read_barrier_depends() do { } while(0)
115#define smp_read_barrier_depends() do { } while(0) 115#define smp_read_barrier_depends() do { } while(0)
116 116
117#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) 117#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
118#define nop() asm volatile("nop"); 118#define nop() asm volatile("nop");
119 119
120#define smp_mb__before_atomic() smp_mb() 120#define smp_mb__before_atomic() smp_mb()
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 03117e7b2ab8..843ba435e43b 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -77,12 +77,7 @@ do { \
77 ___p1; \ 77 ___p1; \
78}) 78})
79 79
80/* 80#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
81 * XXX check on this ---I suspect what Linus really wants here is
82 * acquire vs release semantics but we can't discuss this stuff with
83 * Linus just yet. Grrr...
84 */
85#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
86 81
87/* 82/*
88 * The group barrier in front of the rsm & ssm are necessary to ensure 83 * The group barrier in front of the rsm & ssm are necessary to ensure
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 97eb018a2933..5a696e507930 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -84,7 +84,7 @@ static inline void fence(void)
84#define read_barrier_depends() do { } while (0) 84#define read_barrier_depends() do { } while (0)
85#define smp_read_barrier_depends() do { } while (0) 85#define smp_read_barrier_depends() do { } while (0)
86 86
87#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) 87#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
88 88
89#define smp_store_release(p, v) \ 89#define smp_store_release(p, v) \
90do { \ 90do { \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index cff1bbdaa74a..7ecba84656d4 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -112,7 +112,7 @@
112#define __WEAK_LLSC_MB " \n" 112#define __WEAK_LLSC_MB " \n"
113#endif 113#endif
114 114
115#define set_mb(var, value) \ 115#define smp_store_mb(var, value) \
116 do { WRITE_ONCE(var, value); smp_mb(); } while (0) 116 do { WRITE_ONCE(var, value); smp_mb(); } while (0)
117 117
118#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 118#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 2a072e48780d..39505d660a70 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,7 @@
34#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 34#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
35#define wmb() __asm__ __volatile__ ("sync" : : : "memory") 35#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
36 36
37#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 37#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
38 38
39#ifdef __SUBARCH_HAS_LWSYNC 39#ifdef __SUBARCH_HAS_LWSYNC
40# define SMPWMB LWSYNC 40# define SMPWMB LWSYNC
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index b66cd53d35fc..e6f8615a11eb 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -36,7 +36,7 @@
36#define smp_mb__before_atomic() smp_mb() 36#define smp_mb__before_atomic() smp_mb()
37#define smp_mb__after_atomic() smp_mb() 37#define smp_mb__after_atomic() smp_mb()
38 38
39#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 39#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
40 40
41#define smp_store_release(p, v) \ 41#define smp_store_release(p, v) \
42do { \ 42do { \
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 43715308b068..bf91037db4e0 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -32,7 +32,7 @@
32#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 32#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
33#endif 33#endif
34 34
35#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 35#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
36 36
37#include <asm-generic/barrier.h> 37#include <asm-generic/barrier.h>
38 38
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 125fec7512f4..809941e33e12 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -40,7 +40,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
40#define dma_rmb() rmb() 40#define dma_rmb() rmb()
41#define dma_wmb() wmb() 41#define dma_wmb() wmb()
42 42
43#define set_mb(__var, __value) \ 43#define smp_store_mb(__var, __value) \
44 do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) 44 do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
45 45
46#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 9de5cde133a1..e51a8f803f55 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -35,12 +35,12 @@
35#define smp_mb() mb() 35#define smp_mb() mb()
36#define smp_rmb() dma_rmb() 36#define smp_rmb() dma_rmb()
37#define smp_wmb() barrier() 37#define smp_wmb() barrier()
38#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 38#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
39#else /* !SMP */ 39#else /* !SMP */
40#define smp_mb() barrier() 40#define smp_mb() barrier()
41#define smp_rmb() barrier() 41#define smp_rmb() barrier()
42#define smp_wmb() barrier() 42#define smp_wmb() barrier()
43#define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) 43#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
44#endif /* SMP */ 44#endif /* SMP */
45 45
46#define read_barrier_depends() do { } while (0) 46#define read_barrier_depends() do { } while (0)
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index cc0cb01f346d..b9531d343134 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -39,7 +39,8 @@
39#define smp_mb() barrier() 39#define smp_mb() barrier()
40#define smp_rmb() barrier() 40#define smp_rmb() barrier()
41#define smp_wmb() barrier() 41#define smp_wmb() barrier()
42#define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) 42
43#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
43 44
44#define read_barrier_depends() do { } while (0) 45#define read_barrier_depends() do { } while (0)
45#define smp_read_barrier_depends() do { } while (0) 46#define smp_read_barrier_depends() do { } while (0)
diff --git a/fs/select.c b/fs/select.c
index f684c750e08a..015547330e88 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -189,7 +189,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
189 * doesn't imply write barrier and the users expect write 189 * doesn't imply write barrier and the users expect write
190 * barrier semantics on wakeup functions. The following 190 * barrier semantics on wakeup functions. The following
191 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 191 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192 * and is paired with set_mb() in poll_schedule_timeout. 192 * and is paired with smp_store_mb() in poll_schedule_timeout.
193 */ 193 */
194 smp_wmb(); 194 smp_wmb();
195 pwq->triggered = 1; 195 pwq->triggered = 1;
@@ -244,7 +244,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
244 /* 244 /*
245 * Prepare for the next iteration. 245 * Prepare for the next iteration.
246 * 246 *
247 * The following set_mb() serves two purposes. First, it's 247 * The following smp_store_mb() serves two purposes. First, it's
248 * the counterpart rmb of the wmb in pollwake() such that data 248 * the counterpart rmb of the wmb in pollwake() such that data
249 * written before wake up is always visible after wake up. 249 * written before wake up is always visible after wake up.
250 * Second, the full barrier guarantees that triggered clearing 250 * Second, the full barrier guarantees that triggered clearing
@@ -252,7 +252,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
252 * this problem doesn't exist for the first iteration as 252 * this problem doesn't exist for the first iteration as
253 * add_wait_queue() has full barrier semantics. 253 * add_wait_queue() has full barrier semantics.
254 */ 254 */
255 set_mb(pwq->triggered, 0); 255 smp_store_mb(pwq->triggered, 0);
256 256
257 return rc; 257 return rc;
258} 258}
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 3938716b44d7..e6a83d712ef6 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -66,8 +66,8 @@
66#define smp_read_barrier_depends() do { } while (0) 66#define smp_read_barrier_depends() do { } while (0)
67#endif 67#endif
68 68
69#ifndef set_mb 69#ifndef smp_store_mb
70#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 70#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
71#endif 71#endif
72 72
73#ifndef smp_mb__before_atomic 73#ifndef smp_mb__before_atomic
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 26a2e6122734..18f197223ebd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -252,7 +252,7 @@ extern char ___assert_task_state[1 - 2*!!(
252#define set_task_state(tsk, state_value) \ 252#define set_task_state(tsk, state_value) \
253 do { \ 253 do { \
254 (tsk)->task_state_change = _THIS_IP_; \ 254 (tsk)->task_state_change = _THIS_IP_; \
255 set_mb((tsk)->state, (state_value)); \ 255 smp_store_mb((tsk)->state, (state_value)); \
256 } while (0) 256 } while (0)
257 257
258/* 258/*
@@ -274,7 +274,7 @@ extern char ___assert_task_state[1 - 2*!!(
274#define set_current_state(state_value) \ 274#define set_current_state(state_value) \
275 do { \ 275 do { \
276 current->task_state_change = _THIS_IP_; \ 276 current->task_state_change = _THIS_IP_; \
277 set_mb(current->state, (state_value)); \ 277 smp_store_mb(current->state, (state_value)); \
278 } while (0) 278 } while (0)
279 279
280#else 280#else
@@ -282,7 +282,7 @@ extern char ___assert_task_state[1 - 2*!!(
282#define __set_task_state(tsk, state_value) \ 282#define __set_task_state(tsk, state_value) \
283 do { (tsk)->state = (state_value); } while (0) 283 do { (tsk)->state = (state_value); } while (0)
284#define set_task_state(tsk, state_value) \ 284#define set_task_state(tsk, state_value) \
285 set_mb((tsk)->state, (state_value)) 285 smp_store_mb((tsk)->state, (state_value))
286 286
287/* 287/*
288 * set_current_state() includes a barrier so that the write of current->state 288 * set_current_state() includes a barrier so that the write of current->state
@@ -298,7 +298,7 @@ extern char ___assert_task_state[1 - 2*!!(
298#define __set_current_state(state_value) \ 298#define __set_current_state(state_value) \
299 do { current->state = (state_value); } while (0) 299 do { current->state = (state_value); } while (0)
300#define set_current_state(state_value) \ 300#define set_current_state(state_value) \
301 set_mb(current->state, (state_value)) 301 smp_store_mb(current->state, (state_value))
302 302
303#endif 303#endif
304 304
diff --git a/kernel/futex.c b/kernel/futex.c
index 2579e407ff67..55ca63ad9622 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2055,7 +2055,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2055{ 2055{
2056 /* 2056 /*
2057 * The task state is guaranteed to be set before another task can 2057 * The task state is guaranteed to be set before another task can
2058 * wake it. set_current_state() is implemented using set_mb() and 2058 * wake it. set_current_state() is implemented using smp_store_mb() and
2059 * queue_me() calls spin_unlock() upon completion, both serializing 2059 * queue_me() calls spin_unlock() upon completion, both serializing
2060 * access to the hash list and forcing another memory barrier. 2060 * access to the hash list and forcing another memory barrier.
2061 */ 2061 */
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 27ab96dca68c..04ab18151cc8 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -175,7 +175,7 @@ static void pv_wait_node(struct mcs_spinlock *node)
175 * 175 *
176 * Matches the xchg() from pv_kick_node(). 176 * Matches the xchg() from pv_kick_node().
177 */ 177 */
178 set_mb(pn->state, vcpu_halted); 178 smp_store_mb(pn->state, vcpu_halted);
179 179
180 if (!READ_ONCE(node->locked)) 180 if (!READ_ONCE(node->locked))
181 pv_wait(&pn->state, vcpu_halted); 181 pv_wait(&pn->state, vcpu_halted);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 852143a79f36..9bc82329eaad 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -341,7 +341,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
341 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss 341 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
342 * an event. 342 * an event.
343 */ 343 */
344 set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ 344 smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
345 345
346 return timeout; 346 return timeout;
347} 347}
@@ -354,7 +354,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
354 * doesn't imply write barrier and the users expects write 354 * doesn't imply write barrier and the users expects write
355 * barrier semantics on wakeup functions. The following 355 * barrier semantics on wakeup functions. The following
356 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 356 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
357 * and is paired with set_mb() in wait_woken(). 357 * and is paired with smp_store_mb() in wait_woken().
358 */ 358 */
359 smp_wmb(); /* C */ 359 smp_wmb(); /* C */
360 wait->flags |= WQ_FLAG_WOKEN; 360 wait->flags |= WQ_FLAG_WOKEN;