diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2013-03-13 07:42:34 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-03-13 10:08:10 -0400 |
commit | 66450a21f99636af4fafac2afd33f1a40631bc3a (patch) | |
tree | 81a71a5ad44edcb7317567b2a922e9a861bb2bb8 /arch/x86/kvm/vmx.c | |
parent | 5d218814328da91a27e982748443e7e375e11396 (diff) |
KVM: x86: Rework INIT and SIPI handling
A VCPU sending INIT or SIPI to some other VCPU races for setting the
remote VCPU's mp_state. When we were unlucky, KVM_MP_STATE_INIT_RECEIVED
was overwritten by kvm_emulate_halt and, thus, got lost.
This introduces APIC events for those two signals, keeping them in
kvm_apic until kvm_apic_accept_events is run over the target vcpu
context. kvm_apic_has_events reports to kvm_arch_vcpu_runnable if there
are pending events, thus if vcpu blocking should end.
The patch comes with the side effect of effectively obsoleting
KVM_MP_STATE_SIPI_RECEIVED. We still accept it from user space, but
immediately translate it to KVM_MP_STATE_INIT_RECEIVED + KVM_APIC_SIPI.
The vcpu itself will no longer enter the KVM_MP_STATE_SIPI_RECEIVED
state. That also means we no longer exit to user space after receiving a
SIPI event.
Furthermore, we already reset the VCPU on INIT, only fixing up the code
segment later on when SIPI arrives. Moreover, we fix INIT handling for
the BSP: it never enter wait-for-SIPI but directly starts over on INIT.
Tested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 12 |
1 files changed, 2 insertions, 10 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f588171be177..af1ffaf20892 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4119,12 +4119,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
4119 | vmx_segment_cache_clear(vmx); | 4119 | vmx_segment_cache_clear(vmx); |
4120 | 4120 | ||
4121 | seg_setup(VCPU_SREG_CS); | 4121 | seg_setup(VCPU_SREG_CS); |
4122 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) | 4122 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); |
4123 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); | ||
4124 | else { | ||
4125 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); | ||
4126 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); | ||
4127 | } | ||
4128 | 4123 | ||
4129 | seg_setup(VCPU_SREG_DS); | 4124 | seg_setup(VCPU_SREG_DS); |
4130 | seg_setup(VCPU_SREG_ES); | 4125 | seg_setup(VCPU_SREG_ES); |
@@ -4147,10 +4142,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
4147 | vmcs_writel(GUEST_SYSENTER_EIP, 0); | 4142 | vmcs_writel(GUEST_SYSENTER_EIP, 0); |
4148 | 4143 | ||
4149 | vmcs_writel(GUEST_RFLAGS, 0x02); | 4144 | vmcs_writel(GUEST_RFLAGS, 0x02); |
4150 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) | 4145 | kvm_rip_write(vcpu, 0xfff0); |
4151 | kvm_rip_write(vcpu, 0xfff0); | ||
4152 | else | ||
4153 | kvm_rip_write(vcpu, 0); | ||
4154 | 4146 | ||
4155 | vmcs_writel(GUEST_GDTR_BASE, 0); | 4147 | vmcs_writel(GUEST_GDTR_BASE, 0); |
4156 | vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); | 4148 | vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); |