diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 43 |
1 files changed, 31 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d26f3c4985b..533a327372c8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
6983 | * Cause the #SS fault with 0 error code in VM86 mode. | 6983 | * Cause the #SS fault with 0 error code in VM86 mode. |
6984 | */ | 6984 | */ |
6985 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { | 6985 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { |
6986 | if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { | 6986 | if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { |
6987 | if (vcpu->arch.halt_request) { | 6987 | if (vcpu->arch.halt_request) { |
6988 | vcpu->arch.halt_request = 0; | 6988 | vcpu->arch.halt_request = 0; |
6989 | return kvm_vcpu_halt(vcpu); | 6989 | return kvm_vcpu_halt(vcpu); |
@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
7054 | 7054 | ||
7055 | if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { | 7055 | if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { |
7056 | WARN_ON_ONCE(!enable_vmware_backdoor); | 7056 | WARN_ON_ONCE(!enable_vmware_backdoor); |
7057 | er = emulate_instruction(vcpu, | 7057 | er = kvm_emulate_instruction(vcpu, |
7058 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); | 7058 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); |
7059 | if (er == EMULATE_USER_EXIT) | 7059 | if (er == EMULATE_USER_EXIT) |
7060 | return 0; | 7060 | return 0; |
@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu) | |||
7157 | ++vcpu->stat.io_exits; | 7157 | ++vcpu->stat.io_exits; |
7158 | 7158 | ||
7159 | if (string) | 7159 | if (string) |
7160 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7160 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7161 | 7161 | ||
7162 | port = exit_qualification >> 16; | 7162 | port = exit_qualification >> 16; |
7163 | size = (exit_qualification & 7) + 1; | 7163 | size = (exit_qualification & 7) + 1; |
@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) | |||
7231 | static int handle_desc(struct kvm_vcpu *vcpu) | 7231 | static int handle_desc(struct kvm_vcpu *vcpu) |
7232 | { | 7232 | { |
7233 | WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); | 7233 | WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); |
7234 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7234 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7235 | } | 7235 | } |
7236 | 7236 | ||
7237 | static int handle_cr(struct kvm_vcpu *vcpu) | 7237 | static int handle_cr(struct kvm_vcpu *vcpu) |
@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu) | |||
7480 | 7480 | ||
7481 | static int handle_invd(struct kvm_vcpu *vcpu) | 7481 | static int handle_invd(struct kvm_vcpu *vcpu) |
7482 | { | 7482 | { |
7483 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7483 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7484 | } | 7484 | } |
7485 | 7485 | ||
7486 | static int handle_invlpg(struct kvm_vcpu *vcpu) | 7486 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) | |||
7547 | return kvm_skip_emulated_instruction(vcpu); | 7547 | return kvm_skip_emulated_instruction(vcpu); |
7548 | } | 7548 | } |
7549 | } | 7549 | } |
7550 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7550 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7551 | } | 7551 | } |
7552 | 7552 | ||
7553 | static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) | 7553 | static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) |
@@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) | |||
7704 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | 7704 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) |
7705 | return kvm_skip_emulated_instruction(vcpu); | 7705 | return kvm_skip_emulated_instruction(vcpu); |
7706 | else | 7706 | else |
7707 | return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, | 7707 | return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == |
7708 | NULL, 0) == EMULATE_DONE; | 7708 | EMULATE_DONE; |
7709 | } | 7709 | } |
7710 | 7710 | ||
7711 | return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); | 7711 | return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); |
@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
7748 | if (kvm_test_request(KVM_REQ_EVENT, vcpu)) | 7748 | if (kvm_test_request(KVM_REQ_EVENT, vcpu)) |
7749 | return 1; | 7749 | return 1; |
7750 | 7750 | ||
7751 | err = emulate_instruction(vcpu, 0); | 7751 | err = kvm_emulate_instruction(vcpu, 0); |
7752 | 7752 | ||
7753 | if (err == EMULATE_USER_EXIT) { | 7753 | if (err == EMULATE_USER_EXIT) { |
7754 | ++vcpu->stat.mmio_exits; | 7754 | ++vcpu->stat.mmio_exits; |
@@ -12537,8 +12537,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) | |||
12537 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 12537 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
12538 | bool from_vmentry = !!exit_qual; | 12538 | bool from_vmentry = !!exit_qual; |
12539 | u32 dummy_exit_qual; | 12539 | u32 dummy_exit_qual; |
12540 | u32 vmcs01_cpu_exec_ctrl; | ||
12540 | int r = 0; | 12541 | int r = 0; |
12541 | 12542 | ||
12543 | vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
12544 | |||
12542 | enter_guest_mode(vcpu); | 12545 | enter_guest_mode(vcpu); |
12543 | 12546 | ||
12544 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | 12547 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) |
@@ -12575,6 +12578,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) | |||
12575 | } | 12578 | } |
12576 | 12579 | ||
12577 | /* | 12580 | /* |
12581 | * If L1 had a pending IRQ/NMI until it executed | ||
12582 | * VMLAUNCH/VMRESUME which wasn't delivered because it was | ||
12583 | * disallowed (e.g. interrupts disabled), L0 needs to | ||
12584 | * evaluate if this pending event should cause an exit from L2 | ||
12585 | * to L1 or delivered directly to L2 (e.g. In case L1 don't | ||
12586 | * intercept EXTERNAL_INTERRUPT). | ||
12587 | * | ||
12588 | * Usually this would be handled by L0 requesting a | ||
12589 | * IRQ/NMI window by setting VMCS accordingly. However, | ||
12590 | * this setting was done on VMCS01 and now VMCS02 is active | ||
12591 | * instead. Thus, we force L0 to perform pending event | ||
12592 | * evaluation by requesting a KVM_REQ_EVENT. | ||
12593 | */ | ||
12594 | if (vmcs01_cpu_exec_ctrl & | ||
12595 | (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) { | ||
12596 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
12597 | } | ||
12598 | |||
12599 | /* | ||
12578 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point | 12600 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point |
12579 | * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet | 12601 | * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet |
12580 | * returned as far as L1 is concerned. It will only return (and set | 12602 | * returned as far as L1 is concerned. It will only return (and set |
@@ -13988,9 +14010,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, | |||
13988 | check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) | 14010 | check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) |
13989 | return -EINVAL; | 14011 | return -EINVAL; |
13990 | 14012 | ||
13991 | if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING) | ||
13992 | vmx->nested.nested_run_pending = 1; | ||
13993 | |||
13994 | vmx->nested.dirty_vmcs12 = true; | 14013 | vmx->nested.dirty_vmcs12 = true; |
13995 | ret = enter_vmx_non_root_mode(vcpu, NULL); | 14014 | ret = enter_vmx_non_root_mode(vcpu, NULL); |
13996 | if (ret) | 14015 | if (ret) |