aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2013-04-14 06:12:48 -0400
committerGleb Natapov <gleb@redhat.com>2013-04-14 11:27:09 -0400
commite8457c67a4ec1268ec616bd8be1d9f1cc20f1493 (patch)
tree31d8b6de0839c80fd31a0641fb60d6fdeacf74d7
parent5f3d5799974b89100268ba813cec8db7bd0693fb (diff)
KVM: nVMX: Fix conditions for interrupt injection
If we are entering guest mode, we do not want L0 to interrupt this vmentry with all its side effects on the vmcs. Therefore, injection shall be disallowed during L1->L2 transitions, as in the previous version. However, this check is conceptually independent of nested_exit_on_intr, so decouple it. If L1 traps external interrupts, we can kick the guest from L2 to L1, also just like the previous code worked. But we no longer need to consider L1's idt_vectoring_info_field. It will always be empty at this point. Instead, if L2 has pending events, those are now found in the architectural queues and will, thus, prevent vmx_interrupt_allowed from being called at all. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4fb72a764dbd..5e6391112275 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4325,16 +4325,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4325 4325
4326static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 4326static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4327{ 4327{
4328 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { 4328 if (is_guest_mode(vcpu)) {
4329 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4329 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4330 if (to_vmx(vcpu)->nested.nested_run_pending || 4330
4331 (vmcs12->idt_vectoring_info_field & 4331 if (to_vmx(vcpu)->nested.nested_run_pending)
4332 VECTORING_INFO_VALID_MASK))
4333 return 0; 4332 return 0;
4334 nested_vmx_vmexit(vcpu); 4333 if (nested_exit_on_intr(vcpu)) {
4335 vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; 4334 nested_vmx_vmexit(vcpu);
4336 vmcs12->vm_exit_intr_info = 0; 4335 vmcs12->vm_exit_reason =
4337 /* fall through to normal code, but now in L1, not L2 */ 4336 EXIT_REASON_EXTERNAL_INTERRUPT;
4337 vmcs12->vm_exit_intr_info = 0;
4338 /*
4339 * fall through to normal code, but now in L1, not L2
4340 */
4341 }
4338 } 4342 }
4339 4343
4340 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 4344 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&