aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hpe.com>2015-09-11 14:37:34 -0400
committerIngo Molnar <mingo@kernel.org>2015-09-18 03:27:29 -0400
commit93edc8bd7750ff3cae088bfca453ea73dc9004a4 (patch)
tree0a2b12864982e91c8a3b2e0cd8fa41c65d6ec3ee /kernel/locking
parentc55a6ffa6285e29f874ed403979472631ec70bff (diff)
locking/pvqspinlock: Kick the PV CPU unconditionally when _Q_SLOW_VAL
If _Q_SLOW_VAL has been set, the vCPU state must have been vcpu_hashed. The extra check at the end of __pv_queued_spin_unlock() is unnecessary and can be removed. Signed-off-by: Waiman Long <Waiman.Long@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1441996658-62854-3-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/qspinlock_paravirt.h6
1 files changed, 1 insertions, 5 deletions
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index c8e6e9a596f5..f0450ff4829b 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
267 } 267 }
268 268
269 if (!lp) { /* ONCE */ 269 if (!lp) { /* ONCE */
270 WRITE_ONCE(pn->state, vcpu_hashed);
271 lp = pv_hash(lock, pn); 270 lp = pv_hash(lock, pn);
272 271
273 /* 272 /*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
275 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() 274 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
276 * we'll be sure to be able to observe our hash entry. 275 * we'll be sure to be able to observe our hash entry.
277 * 276 *
278 * [S] pn->state
279 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL 277 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL
280 * MB RMB 278 * MB RMB
281 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> 279 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash>
282 * [L] pn->state
283 * 280 *
284 * Matches the smp_rmb() in __pv_queued_spin_unlock(). 281 * Matches the smp_rmb() in __pv_queued_spin_unlock().
285 */ 282 */
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
364 * vCPU is harmless other than the additional latency in completing 361 * vCPU is harmless other than the additional latency in completing
365 * the unlock. 362 * the unlock.
366 */ 363 */
367 if (READ_ONCE(node->state) == vcpu_hashed) 364 pv_kick(node->cpu);
368 pv_kick(node->cpu);
369} 365}
370/* 366/*
371 * Include the architecture specific callee-save thunk of the 367 * Include the architecture specific callee-save thunk of the