aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorLadi Prosek <lprosek@redhat.com>2016-11-30 10:03:09 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2016-12-08 09:31:09 -0500
commitee146c1c100dbe9ca92252be2e901b957476b253 (patch)
treee43f2b2d96b96a45d8c612c7a778c5c96c453b15 /arch/x86/kvm/vmx.c
parent7ca29de21362de242025fbc1c22436e19e39dddc (diff)
KVM: nVMX: propagate errors from prepare_vmcs02
It is possible that prepare_vmcs02 fails to load the guest state. This patch adds the proper error handling for such a case. L1 will receive an INVALID_STATE vmexit with the appropriate exit qualification if it happens. A failure to set guest CR3 is the only error propagated from prepare_vmcs02 at the moment. Signed-off-by: Ladi Prosek <lprosek@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bcad2eb11404..39a389f17f4a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9976,8 +9976,11 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9976 * needs. In addition to modifying the active vmcs (which is vmcs02), this 9976 * needs. In addition to modifying the active vmcs (which is vmcs02), this
9977 * function also has additional necessary side-effects, like setting various 9977 * function also has additional necessary side-effects, like setting various
9978 * vcpu->arch fields. 9978 * vcpu->arch fields.
9979 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
9980 * is assigned to entry_failure_code on failure.
9979 */ 9981 */
9980static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 9982static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
9983 unsigned long *entry_failure_code)
9981{ 9984{
9982 struct vcpu_vmx *vmx = to_vmx(vcpu); 9985 struct vcpu_vmx *vmx = to_vmx(vcpu);
9983 u32 exec_control; 9986 u32 exec_control;
@@ -10306,8 +10309,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10306 nested_ept_enabled) { 10309 nested_ept_enabled) {
10307 vcpu->arch.cr3 = vmcs12->guest_cr3; 10310 vcpu->arch.cr3 = vmcs12->guest_cr3;
10308 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 10311 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
10309 } else 10312 } else {
10310 kvm_set_cr3(vcpu, vmcs12->guest_cr3); 10313 if (kvm_set_cr3(vcpu, vmcs12->guest_cr3)) {
10314 *entry_failure_code = ENTRY_FAIL_DEFAULT;
10315 return 1;
10316 }
10317 }
10311 10318
10312 kvm_mmu_reset_context(vcpu); 10319 kvm_mmu_reset_context(vcpu);
10313 10320
@@ -10326,6 +10333,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10326 10333
10327 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 10334 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
10328 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); 10335 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
10336 return 0;
10329} 10337}
10330 10338
10331/* 10339/*
@@ -10340,6 +10348,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10340 struct loaded_vmcs *vmcs02; 10348 struct loaded_vmcs *vmcs02;
10341 bool ia32e; 10349 bool ia32e;
10342 u32 msr_entry_idx; 10350 u32 msr_entry_idx;
10351 unsigned long exit_qualification;
10343 10352
10344 if (!nested_vmx_check_permission(vcpu)) 10353 if (!nested_vmx_check_permission(vcpu))
10345 return 1; 10354 return 1;
@@ -10502,7 +10511,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10502 10511
10503 vmx_segment_cache_clear(vmx); 10512 vmx_segment_cache_clear(vmx);
10504 10513
10505 prepare_vmcs02(vcpu, vmcs12); 10514 if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
10515 leave_guest_mode(vcpu);
10516 vmx_load_vmcs01(vcpu);
10517 nested_vmx_entry_failure(vcpu, vmcs12,
10518 EXIT_REASON_INVALID_STATE, exit_qualification);
10519 return 1;
10520 }
10506 10521
10507 msr_entry_idx = nested_vmx_load_msr(vcpu, 10522 msr_entry_idx = nested_vmx_load_msr(vcpu,
10508 vmcs12->vm_entry_msr_load_addr, 10523 vmcs12->vm_entry_msr_load_addr,