diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-06-20 11:25:44 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-06-20 11:25:44 -0400 |
commit | af52739b922f656eb1f39016fabaabe4baeda2e2 (patch) | |
tree | 79a7aa810d0493cd0cf4adebac26d37f12e8b545 /kernel/locking/qspinlock.c | |
parent | 25ed6a5e97809129a1bc852b6b5c7d03baa112c4 (diff) | |
parent | 33688abb2802ff3a230bd2441f765477b94cc89e (diff) |
Merge 4.7-rc4 into staging-next
We want the fixes in here, and we can resolve a merge issue in
drivers/iio/industrialio-trigger.c
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/locking/qspinlock.c')
-rw-r--r-- | kernel/locking/qspinlock.c | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ce2f75e32ae1..5fc8c311b8fe 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c | |||
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, | |||
267 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | 267 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath |
268 | #endif | 268 | #endif |
269 | 269 | ||
270 | /* | ||
271 | * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before | ||
272 | * issuing an _unordered_ store to set _Q_LOCKED_VAL. | ||
273 | * | ||
274 | * This means that the store can be delayed, but no later than the | ||
275 | * store-release from the unlock. This means that simply observing | ||
276 | * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. | ||
277 | * | ||
278 | * There are two paths that can issue the unordered store: | ||
279 | * | ||
280 | * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 | ||
281 | * | ||
282 | * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 | ||
283 | * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 | ||
284 | * | ||
285 | * However, in both cases we have other !0 state we've set before to queue | ||
286 | * ourseves: | ||
287 | * | ||
288 | * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our | ||
289 | * load is constrained by that ACQUIRE to not pass before that, and thus must | ||
290 | * observe the store. | ||
291 | * | ||
292 | * For (2) we have a more intersting scenario. We enqueue ourselves using | ||
293 | * xchg_tail(), which ends up being a RELEASE. This in itself is not | ||
294 | * sufficient, however that is followed by an smp_cond_acquire() on the same | ||
295 | * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and | ||
296 | * guarantees we must observe that store. | ||
297 | * | ||
298 | * Therefore both cases have other !0 state that is observable before the | ||
299 | * unordered locked byte store comes through. This means we can use that to | ||
300 | * wait for the lock store, and then wait for an unlock. | ||
301 | */ | ||
302 | #ifndef queued_spin_unlock_wait | ||
303 | void queued_spin_unlock_wait(struct qspinlock *lock) | ||
304 | { | ||
305 | u32 val; | ||
306 | |||
307 | for (;;) { | ||
308 | val = atomic_read(&lock->val); | ||
309 | |||
310 | if (!val) /* not locked, we're done */ | ||
311 | goto done; | ||
312 | |||
313 | if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ | ||
314 | break; | ||
315 | |||
316 | /* not locked, but pending, wait until we observe the lock */ | ||
317 | cpu_relax(); | ||
318 | } | ||
319 | |||
320 | /* any unlock is good */ | ||
321 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
322 | cpu_relax(); | ||
323 | |||
324 | done: | ||
325 | smp_rmb(); /* CTRL + RMB -> ACQUIRE */ | ||
326 | } | ||
327 | EXPORT_SYMBOL(queued_spin_unlock_wait); | ||
328 | #endif | ||
329 | |||
270 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | 330 | #endif /* _GEN_PV_LOCK_SLOWPATH */ |
271 | 331 | ||
272 | /** | 332 | /** |