aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/qspinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/qspinlock.c')
-rw-r--r--kernel/locking/qspinlock.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 87e9ce6a63c5..7868418ea586 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -14,8 +14,9 @@
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc. 15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp. 16 * (C) Copyright 2015 Intel Corp.
17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
17 * 18 *
18 * Authors: Waiman Long <waiman.long@hp.com> 19 * Authors: Waiman Long <waiman.long@hpe.com>
19 * Peter Zijlstra <peterz@infradead.org> 20 * Peter Zijlstra <peterz@infradead.org>
20 */ 21 */
21 22
@@ -176,7 +177,12 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
176{ 177{
177 struct __qspinlock *l = (void *)lock; 178 struct __qspinlock *l = (void *)lock;
178 179
179 return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; 180 /*
181 * Use release semantics to make sure that the MCS node is properly
182 * initialized before changing the tail code.
183 */
184 return (u32)xchg_release(&l->tail,
185 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
180} 186}
181 187
182#else /* _Q_PENDING_BITS == 8 */ 188#else /* _Q_PENDING_BITS == 8 */
@@ -208,7 +214,11 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
208 214
209 for (;;) { 215 for (;;) {
210 new = (val & _Q_LOCKED_PENDING_MASK) | tail; 216 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
211 old = atomic_cmpxchg(&lock->val, val, new); 217 /*
218 * Use release semantics to make sure that the MCS node is
219 * properly initialized before changing the tail code.
220 */
221 old = atomic_cmpxchg_release(&lock->val, val, new);
212 if (old == val) 222 if (old == val)
213 break; 223 break;
214 224
@@ -319,7 +329,11 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
319 if (val == new) 329 if (val == new)
320 new |= _Q_PENDING_VAL; 330 new |= _Q_PENDING_VAL;
321 331
322 old = atomic_cmpxchg(&lock->val, val, new); 332 /*
333 * Acquire semantic is required here as the function may
334 * return immediately if the lock was free.
335 */
336 old = atomic_cmpxchg_acquire(&lock->val, val, new);
323 if (old == val) 337 if (old == val)
324 break; 338 break;
325 339
@@ -426,7 +440,12 @@ queue:
426 set_locked(lock); 440 set_locked(lock);
427 break; 441 break;
428 } 442 }
429 old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL); 443 /*
444 * The smp_load_acquire() call above has provided the necessary
445 * acquire semantics required for locking. At most two
446 * iterations of this loop may be ran.
447 */
448 old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
430 if (old == val) 449 if (old == val)
431 goto release; /* No contention */ 450 goto release; /* No contention */
432 451