diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-04-04 04:57:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-14 05:54:27 -0400 |
commit | 1f03e8d2919270bd6ef64f39a45ce8df8a9f012a (patch) | |
tree | 0cdd5de370212a021d0a1a3439bbc4b0a77fea8b /include/linux/compiler.h | |
parent | 245050c287a9176cee9f98109df101909c1eeef4 (diff) |
locking/barriers: Replace smp_cond_acquire() with smp_cond_load_acquire()
This new form allows using hardware assisted waiting.
Some hardware (ARM64 and x86) allow monitoring an address for changes,
so by providing a pointer we can use this to replace the cpu_relax()
with hardware optimized methods in the future.
Requested-by: Will Deacon <will.deacon@arm.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r-- | include/linux/compiler.h | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 06f27fd9d760..2bcaedc0f032 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -305,21 +305,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
305 | }) | 305 | }) |
306 | 306 | ||
307 | /** | 307 | /** |
308 | * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering | 308 | * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering |
309 | * @ptr: pointer to the variable to wait on | ||
309 | * @cond: boolean expression to wait for | 310 | * @cond: boolean expression to wait for |
310 | * | 311 | * |
311 | * Equivalent to using smp_load_acquire() on the condition variable but employs | 312 | * Equivalent to using smp_load_acquire() on the condition variable but employs |
312 | * the control dependency of the wait to reduce the barrier on many platforms. | 313 | * the control dependency of the wait to reduce the barrier on many platforms. |
313 | * | 314 | * |
315 | * Due to C lacking lambda expressions we load the value of *ptr into a | ||
316 | * pre-named variable @VAL to be used in @cond. | ||
317 | * | ||
314 | * The control dependency provides a LOAD->STORE order, the additional RMB | 318 | * The control dependency provides a LOAD->STORE order, the additional RMB |
315 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, | 319 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
316 | * aka. ACQUIRE. | 320 | * aka. ACQUIRE. |
317 | */ | 321 | */ |
318 | #define smp_cond_acquire(cond) do { \ | 322 | #ifndef smp_cond_load_acquire |
319 | while (!(cond)) \ | 323 | #define smp_cond_load_acquire(ptr, cond_expr) ({ \ |
320 | cpu_relax(); \ | 324 | typeof(ptr) __PTR = (ptr); \ |
321 | smp_rmb(); /* ctrl + rmb := acquire */ \ | 325 | typeof(*ptr) VAL; \ |
322 | } while (0) | 326 | for (;;) { \ |
327 | VAL = READ_ONCE(*__PTR); \ | ||
328 | if (cond_expr) \ | ||
329 | break; \ | ||
330 | cpu_relax(); \ | ||
331 | } \ | ||
332 | smp_rmb(); /* ctrl + rmb := acquire */ \ | ||
333 | VAL; \ | ||
334 | }) | ||
335 | #endif | ||
323 | 336 | ||
324 | #endif /* __KERNEL__ */ | 337 | #endif /* __KERNEL__ */ |
325 | 338 | ||