summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hpe.com>2015-12-10 15:17:44 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-29 04:02:42 -0500
commitcb037fdad6772df2d49fe61c97d7c0d8265bc918 (patch)
tree699e9c982d337cbc3ae63869a3424a48eb1c36ad /kernel/locking
parenteaff0e7003cca6c2748b67ead2d4b1a8ad858fc7 (diff)
locking/qspinlock: Use smp_cond_acquire() in pending code
The newly introduced smp_cond_acquire() was used to replace the slowpath lock acquisition loop. Similarly, the new function can also be applied to the pending bit locking loop. This patch uses the new function in that loop. Signed-off-by: Waiman Long <Waiman.Long@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Douglas Hatch <doug.hatch@hpe.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1449778666-13593-1-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/qspinlock.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 393d1874b9e0..ce2f75e32ae1 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -358,8 +358,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
358 * sequentiality; this is because not all clear_pending_set_locked() 358 * sequentiality; this is because not all clear_pending_set_locked()
359 * implementations imply full barriers. 359 * implementations imply full barriers.
360 */ 360 */
361 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) 361 smp_cond_acquire(!(atomic_read(&lock->val) & _Q_LOCKED_MASK));
362 cpu_relax();
363 362
364 /* 363 /*
365 * take ownership and clear the pending bit. 364 * take ownership and clear the pending bit.
@@ -435,7 +434,7 @@ queue:
435 * 434 *
436 * The PV pv_wait_head_or_lock function, if active, will acquire 435 * The PV pv_wait_head_or_lock function, if active, will acquire
437 * the lock and return a non-zero value. So we have to skip the 436 * the lock and return a non-zero value. So we have to skip the
438 * smp_load_acquire() call. As the next PV queue head hasn't been 437 * smp_cond_acquire() call. As the next PV queue head hasn't been
439 * designated yet, there is no way for the locked value to become 438 * designated yet, there is no way for the locked value to become
440 * _Q_SLOW_VAL. So both the set_locked() and the 439 * _Q_SLOW_VAL. So both the set_locked() and the
441 * atomic_cmpxchg_relaxed() calls will be safe. 440 * atomic_cmpxchg_relaxed() calls will be safe.
@@ -466,7 +465,7 @@ locked:
466 break; 465 break;
467 } 466 }
468 /* 467 /*
469 * The smp_load_acquire() call above has provided the necessary 468 * The smp_cond_acquire() call above has provided the necessary
470 * acquire semantics required for locking. At most two 469 * acquire semantics required for locking. At most two
471 * iterations of this loop may be ran. 470 * iterations of this loop may be ran.
472 */ 471 */