summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-05-24 07:17:12 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-14 05:55:14 -0400
commit33ac279677dcc2441cb93d8cb9cf7a74df62814d (patch)
treebcf918587b2425d1294b780b31ab67761e068e26
parent1f03e8d2919270bd6ef64f39a45ce8df8a9f012a (diff)
locking/barriers: Introduce smp_acquire__after_ctrl_dep()
Introduce smp_acquire__after_ctrl_dep(), this construct is not uncommon, but the lack of this barrier is. Use it to better express smp_rmb() uses in WRITE_ONCE(), the IPC semaphore code and the qspinlock code. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/compiler.h17
-rw-r--r--ipc/sem.c14
-rw-r--r--kernel/locking/qspinlock.c2
3 files changed, 15 insertions, 18 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 2bcaedc0f032..59a7004fc7dd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -305,6 +305,17 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
305}) 305})
306 306
307/** 307/**
308 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
309 *
310 * A control dependency provides a LOAD->STORE order, the additional RMB
311 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
312 * aka. (load)-ACQUIRE.
313 *
314 * Architectures that do not do load speculation can have this be barrier().
315 */
316#define smp_acquire__after_ctrl_dep() smp_rmb()
317
318/**
308 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering 319 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
309 * @ptr: pointer to the variable to wait on 320 * @ptr: pointer to the variable to wait on
310 * @cond: boolean expression to wait for 321 * @cond: boolean expression to wait for
@@ -314,10 +325,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
314 * 325 *
315 * Due to C lacking lambda expressions we load the value of *ptr into a 326 * Due to C lacking lambda expressions we load the value of *ptr into a
316 * pre-named variable @VAL to be used in @cond. 327 * pre-named variable @VAL to be used in @cond.
317 *
318 * The control dependency provides a LOAD->STORE order, the additional RMB
319 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
320 * aka. ACQUIRE.
321 */ 328 */
322#ifndef smp_cond_load_acquire 329#ifndef smp_cond_load_acquire
323#define smp_cond_load_acquire(ptr, cond_expr) ({ \ 330#define smp_cond_load_acquire(ptr, cond_expr) ({ \
@@ -329,7 +336,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
329 break; \ 336 break; \
330 cpu_relax(); \ 337 cpu_relax(); \
331 } \ 338 } \
332 smp_rmb(); /* ctrl + rmb := acquire */ \ 339 smp_acquire__after_ctrl_dep(); \
333 VAL; \ 340 VAL; \
334}) 341})
335#endif 342#endif
diff --git a/ipc/sem.c b/ipc/sem.c
index b3757ea0694b..84dff3df11a4 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -260,16 +260,6 @@ static void sem_rcu_free(struct rcu_head *head)
260} 260}
261 261
262/* 262/*
263 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
264 * are only control barriers.
265 * The code must pair with spin_unlock(&sem->lock) or
266 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
267 *
268 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
269 */
270#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
271
272/*
273 * Wait until all currently ongoing simple ops have completed. 263 * Wait until all currently ongoing simple ops have completed.
274 * Caller must own sem_perm.lock. 264 * Caller must own sem_perm.lock.
275 * New simple ops cannot start, because simple ops first check 265 * New simple ops cannot start, because simple ops first check
@@ -292,7 +282,7 @@ static void sem_wait_array(struct sem_array *sma)
292 sem = sma->sem_base + i; 282 sem = sma->sem_base + i;
293 spin_unlock_wait(&sem->lock); 283 spin_unlock_wait(&sem->lock);
294 } 284 }
295 ipc_smp_acquire__after_spin_is_unlocked(); 285 smp_acquire__after_ctrl_dep();
296} 286}
297 287
298/* 288/*
@@ -350,7 +340,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
350 * complex_count++; 340 * complex_count++;
351 * spin_unlock(sem_perm.lock); 341 * spin_unlock(sem_perm.lock);
352 */ 342 */
353 ipc_smp_acquire__after_spin_is_unlocked(); 343 smp_acquire__after_ctrl_dep();
354 344
355 /* 345 /*
356 * Now repeat the test of complex_count: 346 * Now repeat the test of complex_count:
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 1b8dda90ebfa..730655533440 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -379,7 +379,7 @@ void queued_spin_unlock_wait(struct qspinlock *lock)
379 cpu_relax(); 379 cpu_relax();
380 380
381done: 381done:
382 smp_rmb(); /* CTRL + RMB -> ACQUIRE */ 382 smp_acquire__after_ctrl_dep();
383} 383}
384EXPORT_SYMBOL(queued_spin_unlock_wait); 384EXPORT_SYMBOL(queued_spin_unlock_wait);
385#endif 385#endif