diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-11 17:18:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-11 17:18:38 -0500 |
commit | 24af98c4cf5f5e69266e270c7f3fb34b82ff6656 (patch) | |
tree | 70d71381c841c92b2d28397bf0c5d6a7d9bbbaac /include/linux/compiler.h | |
parent | 9061cbe62adeccf8c986883bcd40f4aeee59ea75 (diff) | |
parent | 337f13046ff03717a9e99675284a817527440a49 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"So we have a laundry list of locking subsystem changes:
- continuing barrier API and code improvements
- futex enhancements
- atomics API improvements
- pvqspinlock enhancements: in particular lock stealing and adaptive
spinning
- qspinlock micro-enhancements"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
futex: Allow FUTEX_CLOCK_REALTIME with FUTEX_WAIT op
futex: Cleanup the goto confusion in requeue_pi()
futex: Remove pointless put_pi_state calls in requeue()
futex: Document pi_state refcounting in requeue code
futex: Rename free_pi_state() to put_pi_state()
futex: Drop refcount if requeue_pi() acquired the rtmutex
locking/barriers, arch: Remove ambiguous statement in the smp_store_mb() documentation
lcoking/barriers, arch: Use smp barriers in smp_store_release()
locking/cmpxchg, arch: Remove tas() definitions
locking/pvqspinlock: Queue node adaptive spinning
locking/pvqspinlock: Allow limited lock stealing
locking/pvqspinlock: Collect slowpath lock statistics
sched/core, locking: Document Program-Order guarantees
locking, sched: Introduce smp_cond_acquire() and use it
locking/pvqspinlock, x86: Optimize the PV unlock code path
locking/qspinlock: Avoid redundant read of next pointer
locking/qspinlock: Prefetch the next node cacheline
locking/qspinlock: Use _acquire/_release() versions of cmpxchg() & xchg()
atomics: Add test for atomic operations with _relaxed variants
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r-- | include/linux/compiler.h | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 4dac1036594f..00b042c49ccd 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -299,6 +299,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
299 | __u.__val; \ | 299 | __u.__val; \ |
300 | }) | 300 | }) |
301 | 301 | ||
302 | /** | ||
303 | * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering | ||
304 | * @cond: boolean expression to wait for | ||
305 | * | ||
306 | * Equivalent to using smp_load_acquire() on the condition variable but employs | ||
307 | * the control dependency of the wait to reduce the barrier on many platforms. | ||
308 | * | ||
309 | * The control dependency provides a LOAD->STORE order, the additional RMB | ||
310 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, | ||
311 | * aka. ACQUIRE. | ||
312 | */ | ||
313 | #define smp_cond_acquire(cond) do { \ | ||
314 | while (!(cond)) \ | ||
315 | cpu_relax(); \ | ||
316 | smp_rmb(); /* ctrl + rmb := acquire */ \ | ||
317 | } while (0) | ||
318 | |||
302 | #endif /* __KERNEL__ */ | 319 | #endif /* __KERNEL__ */ |
303 | 320 | ||
304 | #endif /* __ASSEMBLY__ */ | 321 | #endif /* __ASSEMBLY__ */ |