aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>2012-07-19 05:47:52 -0400
committerAvi Kivity <avi@redhat.com>2012-07-23 06:02:37 -0400
commit06e48c510aa37f6e791602e6420422ea7071fe94 (patch)
tree083e13a1a1b58d32369adc6eaa42c2a22d17d95d /virt/kvm/kvm_main.c
parent4c088493c8d07e4e27bad53a99dcfdc14cdf45f8 (diff)
KVM: Choose better candidate for directed yield
Currently, on a large vcpu guests, there is a high probability of yielding to the same vcpu who had recently done a pause-loop exit or cpu relax intercepted. Such a yield can lead to the vcpu spinning again and hence degrade the performance. The patchset keeps track of the pause loop exit/cpu relax interception and gives chance to a vcpu which: (a) Has not done pause loop exit or cpu relax intercepted at all (probably he is preempted lock-holder) (b) Was skipped in last iteration because it did pause loop exit or cpu relax intercepted, and probably has become eligible now (next eligible lock holder) Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> # on s390x Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c42
1 files changed, 42 insertions, 0 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0892b75eeedd..1e10ebe1a370 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1579,6 +1579,43 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1579} 1579}
1580EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 1580EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1581 1581
1582#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1583/*
1584 * Helper that checks whether a VCPU is eligible for directed yield.
1585 * Most eligible candidate to yield is decided by following heuristics:
1586 *
1587 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
1588 * (preempted lock holder), indicated by @in_spin_loop.
1589 * Set at the beiginning and cleared at the end of interception/PLE handler.
1590 *
1591 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
1592 * chance last time (mostly it has become eligible now since we have probably
1593 * yielded to lockholder in last iteration. This is done by toggling
1594 * @dy_eligible each time a VCPU checked for eligibility.)
1595 *
1596 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
1597 * to preempted lock-holder could result in wrong VCPU selection and CPU
1598 * burning. Giving priority for a potential lock-holder increases lock
1599 * progress.
1600 *
1601 * Since algorithm is based on heuristics, accessing another VCPU data without
1602 * locking does not harm. It may result in trying to yield to same VCPU, fail
1603 * and continue with next VCPU and so on.
1604 */
1605bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1606{
1607 bool eligible;
1608
1609 eligible = !vcpu->spin_loop.in_spin_loop ||
1610 (vcpu->spin_loop.in_spin_loop &&
1611 vcpu->spin_loop.dy_eligible);
1612
1613 if (vcpu->spin_loop.in_spin_loop)
1614 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
1615
1616 return eligible;
1617}
1618#endif
1582void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1619void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1583{ 1620{
1584 struct kvm *kvm = me->kvm; 1621 struct kvm *kvm = me->kvm;
@@ -1607,6 +1644,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1607 continue; 1644 continue;
1608 if (waitqueue_active(&vcpu->wq)) 1645 if (waitqueue_active(&vcpu->wq))
1609 continue; 1646 continue;
1647 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
1648 continue;
1610 if (kvm_vcpu_yield_to(vcpu)) { 1649 if (kvm_vcpu_yield_to(vcpu)) {
1611 kvm->last_boosted_vcpu = i; 1650 kvm->last_boosted_vcpu = i;
1612 yielded = 1; 1651 yielded = 1;
@@ -1615,6 +1654,9 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1615 } 1654 }
1616 } 1655 }
1617 kvm_vcpu_set_in_spin_loop(me, false); 1656 kvm_vcpu_set_in_spin_loop(me, false);
1657
1658 /* Ensure vcpu is not eligible during next spinloop */
1659 kvm_vcpu_set_dy_eligible(me, false);
1618} 1660}
1619EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1661EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1620 1662