aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/barrier.h2
-rw-r--r--include/asm-generic/qspinlock.h9
-rw-r--r--include/linux/compiler.h17
3 files changed, 23 insertions, 5 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index b42afada1280..0f45f93ef692 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -93,7 +93,7 @@
93#endif /* CONFIG_SMP */ 93#endif /* CONFIG_SMP */
94 94
95#ifndef smp_store_mb 95#ifndef smp_store_mb
96#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 96#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
97#endif 97#endif
98 98
99#ifndef smp_mb__before_atomic 99#ifndef smp_mb__before_atomic
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index e2aadbc7151f..39e1cb201b8e 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -12,8 +12,9 @@
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
15 * 16 *
16 * Authors: Waiman Long <waiman.long@hp.com> 17 * Authors: Waiman Long <waiman.long@hpe.com>
17 */ 18 */
18#ifndef __ASM_GENERIC_QSPINLOCK_H 19#ifndef __ASM_GENERIC_QSPINLOCK_H
19#define __ASM_GENERIC_QSPINLOCK_H 20#define __ASM_GENERIC_QSPINLOCK_H
@@ -62,7 +63,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
62static __always_inline int queued_spin_trylock(struct qspinlock *lock) 63static __always_inline int queued_spin_trylock(struct qspinlock *lock)
63{ 64{
64 if (!atomic_read(&lock->val) && 65 if (!atomic_read(&lock->val) &&
65 (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0)) 66 (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
66 return 1; 67 return 1;
67 return 0; 68 return 0;
68} 69}
@@ -77,7 +78,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
77{ 78{
78 u32 val; 79 u32 val;
79 80
80 val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); 81 val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
81 if (likely(val == 0)) 82 if (likely(val == 0))
82 return; 83 return;
83 queued_spin_lock_slowpath(lock, val); 84 queued_spin_lock_slowpath(lock, val);
@@ -93,7 +94,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
93 /* 94 /*
94 * smp_mb__before_atomic() in order to guarantee release semantics 95 * smp_mb__before_atomic() in order to guarantee release semantics
95 */ 96 */
96 smp_mb__before_atomic_dec(); 97 smp_mb__before_atomic();
97 atomic_sub(_Q_LOCKED_VAL, &lock->val); 98 atomic_sub(_Q_LOCKED_VAL, &lock->val);
98} 99}
99#endif 100#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 4dac1036594f..00b042c49ccd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -299,6 +299,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
299 __u.__val; \ 299 __u.__val; \
300}) 300})
301 301
302/**
303 * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
304 * @cond: boolean expression to wait for
305 *
306 * Equivalent to using smp_load_acquire() on the condition variable but employs
307 * the control dependency of the wait to reduce the barrier on many platforms.
308 *
309 * The control dependency provides a LOAD->STORE order, the additional RMB
310 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
311 * aka. ACQUIRE.
312 */
313#define smp_cond_acquire(cond) do { \
314 while (!(cond)) \
315 cpu_relax(); \
316 smp_rmb(); /* ctrl + rmb := acquire */ \
317} while (0)
318
302#endif /* __KERNEL__ */ 319#endif /* __KERNEL__ */
303 320
304#endif /* __ASSEMBLY__ */ 321#endif /* __ASSEMBLY__ */