aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hpe.com>2016-05-31 12:53:47 -0400
committerIngo Molnar <mingo@kernel.org>2016-08-10 08:16:02 -0400
commit08be8f63c40c030b5cf95b4368e314e563a86301 (patch)
tree79a14559d31108b7e9ad0c559c0070a39f146223
parent64a5e3cb308028dba0676daae0a7821d770036fa (diff)
locking/pvstat: Separate wait_again and spurious wakeup stats
Currently there are overlap in the pvqspinlock wait_again and spurious_wakeup stat counters. Because of lock stealing, it is no longer possible to accurately determine if spurious wakeup has happened in the queue head. As they track both the queue node and queue head status, it is also hard to tell how many of those comes from the queue head and how many from the queue node. This patch changes the accounting rules so that spurious wakeup is only tracked in the queue node. The wait_again count, however, is only tracked in the queue head when the vCPU failed to acquire the lock after a vCPU kick. This should give a much better indication of the wait-kick dynamics in the queue node and the queue head. Signed-off-by: Waiman Long <Waiman.Long@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Douglas Hatch <doug.hatch@hpe.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Pan Xinhui <xinhui@linux.vnet.ibm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1464713631-1066-2-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/qspinlock_paravirt.h12
-rw-r--r--kernel/locking/qspinlock_stat.h4
2 files changed, 5 insertions, 11 deletions
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 429c3dc2a5f3..3acf16d79cf4 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -288,12 +288,10 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
288{ 288{
289 struct pv_node *pn = (struct pv_node *)node; 289 struct pv_node *pn = (struct pv_node *)node;
290 struct pv_node *pp = (struct pv_node *)prev; 290 struct pv_node *pp = (struct pv_node *)prev;
291 int waitcnt = 0;
292 int loop; 291 int loop;
293 bool wait_early; 292 bool wait_early;
294 293
295 /* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */ 294 for (;;) {
296 for (;; waitcnt++) {
297 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { 295 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
298 if (READ_ONCE(node->locked)) 296 if (READ_ONCE(node->locked))
299 return; 297 return;
@@ -317,7 +315,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
317 315
318 if (!READ_ONCE(node->locked)) { 316 if (!READ_ONCE(node->locked)) {
319 qstat_inc(qstat_pv_wait_node, true); 317 qstat_inc(qstat_pv_wait_node, true);
320 qstat_inc(qstat_pv_wait_again, waitcnt);
321 qstat_inc(qstat_pv_wait_early, wait_early); 318 qstat_inc(qstat_pv_wait_early, wait_early);
322 pv_wait(&pn->state, vcpu_halted); 319 pv_wait(&pn->state, vcpu_halted);
323 } 320 }
@@ -458,12 +455,9 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
458 pv_wait(&l->locked, _Q_SLOW_VAL); 455 pv_wait(&l->locked, _Q_SLOW_VAL);
459 456
460 /* 457 /*
461 * The unlocker should have freed the lock before kicking the 458 * Because of lock stealing, the queue head vCPU may not be
462 * CPU. So if the lock is still not free, it is a spurious 459 * able to acquire the lock before it has to wait again.
463 * wakeup or another vCPU has stolen the lock. The current
464 * vCPU should spin again.
465 */ 460 */
466 qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked));
467 } 461 }
468 462
469 /* 463 /*
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index b9d031516254..eb0a599fcf58 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -24,8 +24,8 @@
24 * pv_latency_wake - average latency (ns) from vCPU kick to wakeup 24 * pv_latency_wake - average latency (ns) from vCPU kick to wakeup
25 * pv_lock_slowpath - # of locking operations via the slowpath 25 * pv_lock_slowpath - # of locking operations via the slowpath
26 * pv_lock_stealing - # of lock stealing operations 26 * pv_lock_stealing - # of lock stealing operations
27 * pv_spurious_wakeup - # of spurious wakeups 27 * pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs
28 * pv_wait_again - # of vCPU wait's that happened after a vCPU kick 28 * pv_wait_again - # of wait's after a queue head vCPU kick
29 * pv_wait_early - # of early vCPU wait's 29 * pv_wait_early - # of early vCPU wait's
30 * pv_wait_head - # of vCPU wait's at the queue head 30 * pv_wait_head - # of vCPU wait's at the queue head
31 * pv_wait_node - # of vCPU wait's at a non-head queue node 31 * pv_wait_node - # of vCPU wait's at a non-head queue node