aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-06-08 08:35:29 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-08 08:35:29 -0400
commitae0b5c2f0334f35d2b2effb13aa418bc1e2039b7 (patch)
treecf2174ac4de949d2bb0a639bdc4cb10be8336137
parent331b6d8c7afc2e5b900b9dcd850c265e1ba8d8e7 (diff)
parent2c610022711675ee908b903d242f0b90e1db661f (diff)
Merge branch 'locking/urgent' into locking/core, to pick up dependency
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/asm-generic/qspinlock.h53
-rw-r--r--kernel/locking/qspinlock.c60
2 files changed, 77 insertions, 36 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 6bd05700d8c9..05f05f17a7c2 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -22,37 +22,33 @@
22#include <asm-generic/qspinlock_types.h> 22#include <asm-generic/qspinlock_types.h>
23 23
24/** 24/**
25 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
26 * @lock : Pointer to queued spinlock structure
27 *
28 * There is a very slight possibility of live-lock if the lockers keep coming
29 * and the waiter is just unfortunate enough to not see any unlock state.
30 */
31#ifndef queued_spin_unlock_wait
32extern void queued_spin_unlock_wait(struct qspinlock *lock);
33#endif
34
35/**
25 * queued_spin_is_locked - is the spinlock locked? 36 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure 37 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise 38 * Return: 1 if it is locked, 0 otherwise
28 */ 39 */
40#ifndef queued_spin_is_locked
29static __always_inline int queued_spin_is_locked(struct qspinlock *lock) 41static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30{ 42{
31 /* 43 /*
32 * queued_spin_lock_slowpath() can ACQUIRE the lock before 44 * See queued_spin_unlock_wait().
33 * issuing the unordered store that sets _Q_LOCKED_VAL.
34 *
35 * See both smp_cond_acquire() sites for more detail.
36 *
37 * This however means that in code like:
38 *
39 * spin_lock(A) spin_lock(B)
40 * spin_unlock_wait(B) spin_is_locked(A)
41 * do_something() do_something()
42 *
43 * Both CPUs can end up running do_something() because the store
44 * setting _Q_LOCKED_VAL will pass through the loads in
45 * spin_unlock_wait() and/or spin_is_locked().
46 * 45 *
47 * Avoid this by issuing a full memory barrier between the spin_lock() 46 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
48 * and the loads in spin_unlock_wait() and spin_is_locked(). 47 * isn't immediately observable.
49 *
50 * Note that regular mutual exclusion doesn't care about this
51 * delayed store.
52 */ 48 */
53 smp_mb(); 49 return atomic_read(&lock->val);
54 return atomic_read(&lock->val) & _Q_LOCKED_MASK;
55} 50}
51#endif
56 52
57/** 53/**
58 * queued_spin_value_unlocked - is the spinlock structure unlocked? 54 * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
122} 118}
123#endif 119#endif
124 120
125/**
126 * queued_spin_unlock_wait - wait until current lock holder releases the lock
127 * @lock : Pointer to queued spinlock structure
128 *
129 * There is a very slight possibility of live-lock if the lockers keep coming
130 * and the waiter is just unfortunate enough to not see any unlock state.
131 */
132static inline void queued_spin_unlock_wait(struct qspinlock *lock)
133{
134 /* See queued_spin_is_locked() */
135 smp_mb();
136 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
137 cpu_relax();
138}
139
140#ifndef virt_spin_lock 121#ifndef virt_spin_lock
141static __always_inline bool virt_spin_lock(struct qspinlock *lock) 122static __always_inline bool virt_spin_lock(struct qspinlock *lock)
142{ 123{
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ce2f75e32ae1..5fc8c311b8fe 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
267#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 267#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
268#endif 268#endif
269 269
270/*
271 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
272 * issuing an _unordered_ store to set _Q_LOCKED_VAL.
273 *
274 * This means that the store can be delayed, but no later than the
275 * store-release from the unlock. This means that simply observing
276 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
277 *
278 * There are two paths that can issue the unordered store:
279 *
280 * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
281 *
282 * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
283 * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
284 *
285 * However, in both cases we have other !0 state we've set before to queue
286 * ourseves:
287 *
288 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
289 * load is constrained by that ACQUIRE to not pass before that, and thus must
290 * observe the store.
291 *
292 * For (2) we have a more intersting scenario. We enqueue ourselves using
293 * xchg_tail(), which ends up being a RELEASE. This in itself is not
294 * sufficient, however that is followed by an smp_cond_acquire() on the same
295 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
296 * guarantees we must observe that store.
297 *
298 * Therefore both cases have other !0 state that is observable before the
299 * unordered locked byte store comes through. This means we can use that to
300 * wait for the lock store, and then wait for an unlock.
301 */
302#ifndef queued_spin_unlock_wait
303void queued_spin_unlock_wait(struct qspinlock *lock)
304{
305 u32 val;
306
307 for (;;) {
308 val = atomic_read(&lock->val);
309
310 if (!val) /* not locked, we're done */
311 goto done;
312
313 if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
314 break;
315
316 /* not locked, but pending, wait until we observe the lock */
317 cpu_relax();
318 }
319
320 /* any unlock is good */
321 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
322 cpu_relax();
323
324done:
325 smp_rmb(); /* CTRL + RMB -> ACQUIRE */
326}
327EXPORT_SYMBOL(queued_spin_unlock_wait);
328#endif
329
270#endif /* _GEN_PV_LOCK_SLOWPATH */ 330#endif /* _GEN_PV_LOCK_SLOWPATH */
271 331
272/** 332/**