aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 84e62acf2dd8..29fd8af5c347 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -198,7 +198,8 @@ struct loaded_vmcs {
198 struct vmcs *vmcs; 198 struct vmcs *vmcs;
199 struct vmcs *shadow_vmcs; 199 struct vmcs *shadow_vmcs;
200 int cpu; 200 int cpu;
201 int launched; 201 bool launched;
202 bool nmi_known_unmasked;
202 struct list_head loaded_vmcss_on_cpu_link; 203 struct list_head loaded_vmcss_on_cpu_link;
203}; 204};
204 205
@@ -2326,6 +2327,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2326 __vmx_load_host_state(to_vmx(vcpu)); 2327 __vmx_load_host_state(to_vmx(vcpu));
2327} 2328}
2328 2329
2330static bool emulation_required(struct kvm_vcpu *vcpu)
2331{
2332 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2333}
2334
2329static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); 2335static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2330 2336
2331/* 2337/*
@@ -2363,6 +2369,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2363 2369
2364static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 2370static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2365{ 2371{
2372 unsigned long old_rflags = vmx_get_rflags(vcpu);
2373
2366 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 2374 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2367 to_vmx(vcpu)->rflags = rflags; 2375 to_vmx(vcpu)->rflags = rflags;
2368 if (to_vmx(vcpu)->rmode.vm86_active) { 2376 if (to_vmx(vcpu)->rmode.vm86_active) {
@@ -2370,6 +2378,9 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2370 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2378 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2371 } 2379 }
2372 vmcs_writel(GUEST_RFLAGS, rflags); 2380 vmcs_writel(GUEST_RFLAGS, rflags);
2381
2382 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2383 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2373} 2384}
2374 2385
2375static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) 2386static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
@@ -3857,11 +3868,6 @@ static __init int alloc_kvm_area(void)
3857 return 0; 3868 return 0;
3858} 3869}
3859 3870
3860static bool emulation_required(struct kvm_vcpu *vcpu)
3861{
3862 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3863}
3864
3865static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 3871static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3866 struct kvm_segment *save) 3872 struct kvm_segment *save)
3867{ 3873{
@@ -5510,10 +5516,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5510{ 5516{
5511 struct vcpu_vmx *vmx = to_vmx(vcpu); 5517 struct vcpu_vmx *vmx = to_vmx(vcpu);
5512 5518
5513 if (!is_guest_mode(vcpu)) { 5519 ++vcpu->stat.nmi_injections;
5514 ++vcpu->stat.nmi_injections; 5520 vmx->loaded_vmcs->nmi_known_unmasked = false;
5515 vmx->nmi_known_unmasked = false;
5516 }
5517 5521
5518 if (vmx->rmode.vm86_active) { 5522 if (vmx->rmode.vm86_active) {
5519 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) 5523 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
@@ -5527,16 +5531,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5527 5531
5528static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 5532static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5529{ 5533{
5530 if (to_vmx(vcpu)->nmi_known_unmasked) 5534 struct vcpu_vmx *vmx = to_vmx(vcpu);
5535 bool masked;
5536
5537 if (vmx->loaded_vmcs->nmi_known_unmasked)
5531 return false; 5538 return false;
5532 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 5539 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5540 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5541 return masked;
5533} 5542}
5534 5543
5535static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 5544static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5536{ 5545{
5537 struct vcpu_vmx *vmx = to_vmx(vcpu); 5546 struct vcpu_vmx *vmx = to_vmx(vcpu);
5538 5547
5539 vmx->nmi_known_unmasked = !masked; 5548 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5540 if (masked) 5549 if (masked)
5541 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5550 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5542 GUEST_INTR_STATE_NMI); 5551 GUEST_INTR_STATE_NMI);
@@ -8736,7 +8745,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
8736 8745
8737 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 8746 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
8738 8747
8739 if (vmx->nmi_known_unmasked) 8748 if (vmx->loaded_vmcs->nmi_known_unmasked)
8740 return; 8749 return;
8741 /* 8750 /*
8742 * Can't use vmx->exit_intr_info since we're not sure what 8751 * Can't use vmx->exit_intr_info since we're not sure what
@@ -8760,7 +8769,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
8760 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 8769 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
8761 GUEST_INTR_STATE_NMI); 8770 GUEST_INTR_STATE_NMI);
8762 else 8771 else
8763 vmx->nmi_known_unmasked = 8772 vmx->loaded_vmcs->nmi_known_unmasked =
8764 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 8773 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
8765 & GUEST_INTR_STATE_NMI); 8774 & GUEST_INTR_STATE_NMI);
8766} 8775}
@@ -10488,6 +10497,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10488{ 10497{
10489 struct vmcs12 *vmcs12; 10498 struct vmcs12 *vmcs12;
10490 struct vcpu_vmx *vmx = to_vmx(vcpu); 10499 struct vcpu_vmx *vmx = to_vmx(vcpu);
10500 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
10491 u32 exit_qual; 10501 u32 exit_qual;
10492 int ret; 10502 int ret;
10493 10503
@@ -10512,6 +10522,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10512 * for misconfigurations which will anyway be caught by the processor 10522 * for misconfigurations which will anyway be caught by the processor
10513 * when using the merged vmcs02. 10523 * when using the merged vmcs02.
10514 */ 10524 */
10525 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
10526 nested_vmx_failValid(vcpu,
10527 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
10528 goto out;
10529 }
10530
10515 if (vmcs12->launch_state == launch) { 10531 if (vmcs12->launch_state == launch) {
10516 nested_vmx_failValid(vcpu, 10532 nested_vmx_failValid(vcpu,
10517 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 10533 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS