aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-08-02 06:41:13 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2013-10-29 14:25:25 -0400
commitd241aac798eb042e605f78c31a4122e583b2cd13 (patch)
tree0b2f77cfbf32cfd1ff8b46a302d4480d9cb5c8ae /arch
parent4a10c2ac2f368583138b774ca41fac4207911983 (diff)
arm64: KVM: Yield CPU when vcpu executes a WFE
On an (even slightly) oversubscribed system, spinlocks are quickly becoming a bottleneck, as some vcpus are spinning, waiting for a lock to be released, while the vcpu holding the lock may not be running at all. The solution is to trap blocking WFEs and tell KVM that we're now spinning. This ensures that other vpus will get a scheduling boost, allowing the lock to be released more quickly. Also, using CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT slightly improves the performance when the VM is severely overcommited. Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/kvm_arm.h8
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/handle_exit.c18
3 files changed, 20 insertions, 7 deletions
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index a5f28e2720c7..c98ef4771c73 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -63,6 +63,7 @@
63 * TAC: Trap ACTLR 63 * TAC: Trap ACTLR
64 * TSC: Trap SMC 64 * TSC: Trap SMC
65 * TSW: Trap cache operations by set/way 65 * TSW: Trap cache operations by set/way
66 * TWE: Trap WFE
66 * TWI: Trap WFI 67 * TWI: Trap WFI
67 * TIDCP: Trap L2CTLR/L2ECTLR 68 * TIDCP: Trap L2CTLR/L2ECTLR
68 * BSU_IS: Upgrade barriers to the inner shareable domain 69 * BSU_IS: Upgrade barriers to the inner shareable domain
@@ -72,8 +73,9 @@
72 * FMO: Override CPSR.F and enable signaling with VF 73 * FMO: Override CPSR.F and enable signaling with VF
73 * SWIO: Turn set/way invalidates into set/way clean+invalidate 74 * SWIO: Turn set/way invalidates into set/way clean+invalidate
74 */ 75 */
75#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ 76#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
76 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ 77 HCR_BSU_IS | HCR_FB | HCR_TAC | \
78 HCR_AMO | HCR_IMO | HCR_FMO | \
77 HCR_SWIO | HCR_TIDCP | HCR_RW) 79 HCR_SWIO | HCR_TIDCP | HCR_RW)
78#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) 80#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
79 81
@@ -242,4 +244,6 @@
242 244
243#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 245#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
244 246
247#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
248
245#endif /* __ARM64_KVM_ARM_H__ */ 249#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 21e90820bd23..4480ab339a00 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 select MMU_NOTIFIER 21 select MMU_NOTIFIER
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 25 select KVM_MMIO
25 select KVM_ARM_HOST 26 select KVM_ARM_HOST
26 select KVM_ARM_VGIC 27 select KVM_ARM_VGIC
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 9beaca033437..8da56067c304 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -47,21 +47,29 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
47} 47}
48 48
49/** 49/**
50 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest 50 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
51 * instruction executed by a guest
52 *
51 * @vcpu: the vcpu pointer 53 * @vcpu: the vcpu pointer
52 * 54 *
53 * Simply call kvm_vcpu_block(), which will halt execution of 55 * WFE: Yield the CPU and come back to this vcpu when the scheduler
56 * decides to.
57 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
54 * world-switches and schedule other host processes until there is an 58 * world-switches and schedule other host processes until there is an
55 * incoming IRQ or FIQ to the VM. 59 * incoming IRQ or FIQ to the VM.
56 */ 60 */
57static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) 61static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
58{ 62{
59 kvm_vcpu_block(vcpu); 63 if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
64 kvm_vcpu_on_spin(vcpu);
65 else
66 kvm_vcpu_block(vcpu);
67
60 return 1; 68 return 1;
61} 69}
62 70
63static exit_handle_fn arm_exit_handlers[] = { 71static exit_handle_fn arm_exit_handlers[] = {
64 [ESR_EL2_EC_WFI] = kvm_handle_wfi, 72 [ESR_EL2_EC_WFI] = kvm_handle_wfx,
65 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, 73 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
66 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, 74 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
67 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, 75 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,