diff options
| -rw-r--r-- | arch/x86/kvm/vmx.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 283aa8601833..98e82ee1e699 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -7258,9 +7258,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) | |||
| 7258 | static int handle_vmclear(struct kvm_vcpu *vcpu) | 7258 | static int handle_vmclear(struct kvm_vcpu *vcpu) |
| 7259 | { | 7259 | { |
| 7260 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7260 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 7261 | u32 zero = 0; | ||
| 7261 | gpa_t vmptr; | 7262 | gpa_t vmptr; |
| 7262 | struct vmcs12 *vmcs12; | ||
| 7263 | struct page *page; | ||
| 7264 | 7263 | ||
| 7265 | if (!nested_vmx_check_permission(vcpu)) | 7264 | if (!nested_vmx_check_permission(vcpu)) |
| 7266 | return 1; | 7265 | return 1; |
| @@ -7271,22 +7270,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
| 7271 | if (vmptr == vmx->nested.current_vmptr) | 7270 | if (vmptr == vmx->nested.current_vmptr) |
| 7272 | nested_release_vmcs12(vmx); | 7271 | nested_release_vmcs12(vmx); |
| 7273 | 7272 | ||
| 7274 | page = nested_get_page(vcpu, vmptr); | 7273 | kvm_vcpu_write_guest(vcpu, |
| 7275 | if (page == NULL) { | 7274 | vmptr + offsetof(struct vmcs12, launch_state), |
| 7276 | /* | 7275 | &zero, sizeof(zero)); |
| 7277 | * For accurate processor emulation, VMCLEAR beyond available | ||
| 7278 | * physical memory should do nothing at all. However, it is | ||
| 7279 | * possible that a nested vmx bug, not a guest hypervisor bug, | ||
| 7280 | * resulted in this case, so let's shut down before doing any | ||
| 7281 | * more damage: | ||
| 7282 | */ | ||
| 7283 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); | ||
| 7284 | return 1; | ||
| 7285 | } | ||
| 7286 | vmcs12 = kmap(page); | ||
| 7287 | vmcs12->launch_state = 0; | ||
| 7288 | kunmap(page); | ||
| 7289 | nested_release_page(page); | ||
| 7290 | 7276 | ||
| 7291 | nested_free_vmcs02(vmx, vmptr); | 7277 | nested_free_vmcs02(vmx, vmptr); |
| 7292 | 7278 | ||
| @@ -9694,10 +9680,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 9694 | return false; | 9680 | return false; |
| 9695 | 9681 | ||
| 9696 | page = nested_get_page(vcpu, vmcs12->msr_bitmap); | 9682 | page = nested_get_page(vcpu, vmcs12->msr_bitmap); |
| 9697 | if (!page) { | 9683 | if (!page) |
| 9698 | WARN_ON(1); | ||
| 9699 | return false; | 9684 | return false; |
| 9700 | } | ||
| 9701 | msr_bitmap_l1 = (unsigned long *)kmap(page); | 9685 | msr_bitmap_l1 = (unsigned long *)kmap(page); |
| 9702 | 9686 | ||
| 9703 | memset(msr_bitmap_l0, 0xff, PAGE_SIZE); | 9687 | memset(msr_bitmap_l0, 0xff, PAGE_SIZE); |
| @@ -11121,8 +11105,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
| 11121 | */ | 11105 | */ |
| 11122 | static void vmx_leave_nested(struct kvm_vcpu *vcpu) | 11106 | static void vmx_leave_nested(struct kvm_vcpu *vcpu) |
| 11123 | { | 11107 | { |
| 11124 | if (is_guest_mode(vcpu)) | 11108 | if (is_guest_mode(vcpu)) { |
| 11109 | to_vmx(vcpu)->nested.nested_run_pending = 0; | ||
| 11125 | nested_vmx_vmexit(vcpu, -1, 0, 0); | 11110 | nested_vmx_vmexit(vcpu, -1, 0, 0); |
| 11111 | } | ||
| 11126 | free_nested(to_vmx(vcpu)); | 11112 | free_nested(to_vmx(vcpu)); |
| 11127 | } | 11113 | } |
| 11128 | 11114 | ||
