aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/vmx.c57
-rw-r--r--arch/x86/kvm/x86.c3
3 files changed, 45 insertions, 16 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8d1587092851..7892530cbacf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1071,6 +1071,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1071void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 1071void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1072 1072
1073int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1073int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1074bool pdptrs_changed(struct kvm_vcpu *vcpu);
1074 1075
1075int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1076int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1076 const void *val, int bytes); 1077 const void *val, int bytes);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 39a389f17f4a..a0d6e59f4f34 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9969,6 +9969,44 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9969} 9969}
9970 9970
9971/* 9971/*
9972 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
9973 * emulating VM entry into a guest with EPT enabled.
9974 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
9975 * is assigned to entry_failure_code on failure.
9976 */
9977static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
9978 unsigned long *entry_failure_code)
9979{
9980 unsigned long invalid_mask;
9981
9982 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
9983 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
9984 if (cr3 & invalid_mask) {
9985 *entry_failure_code = ENTRY_FAIL_DEFAULT;
9986 return 1;
9987 }
9988
9989 /*
9990 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
9991 * must not be dereferenced.
9992 */
9993 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
9994 !nested_ept) {
9995 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
9996 *entry_failure_code = ENTRY_FAIL_PDPTE;
9997 return 1;
9998 }
9999 }
10000
10001 vcpu->arch.cr3 = cr3;
10002 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
10003 }
10004
10005 kvm_mmu_reset_context(vcpu);
10006 return 0;
10007}
10008
10009/*
9972 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 10010 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
9973 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 10011 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
9974 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 10012 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
@@ -10300,21 +10338,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10300 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 10338 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
10301 vmx_set_efer(vcpu, vcpu->arch.efer); 10339 vmx_set_efer(vcpu, vcpu->arch.efer);
10302 10340
10303 /* 10341 /* Shadow page tables on either EPT or shadow page tables. */
10304 * Shadow page tables on either EPT or shadow page tables. 10342 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
10305 * If PAE and EPT are both on, CR3 is not used by the CPU and must not 10343 entry_failure_code))
10306 * be dereferenced. 10344 return 1;
10307 */
10308 if (is_pae(vcpu) && is_paging(vcpu) && !is_long_mode(vcpu) &&
10309 nested_ept_enabled) {
10310 vcpu->arch.cr3 = vmcs12->guest_cr3;
10311 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
10312 } else {
10313 if (kvm_set_cr3(vcpu, vmcs12->guest_cr3)) {
10314 *entry_failure_code = ENTRY_FAIL_DEFAULT;
10315 return 1;
10316 }
10317 }
10318 10345
10319 kvm_mmu_reset_context(vcpu); 10346 kvm_mmu_reset_context(vcpu);
10320 10347
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dd6b41ea61b6..f0aee98e7492 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -566,7 +566,7 @@ out:
566} 566}
567EXPORT_SYMBOL_GPL(load_pdptrs); 567EXPORT_SYMBOL_GPL(load_pdptrs);
568 568
569static bool pdptrs_changed(struct kvm_vcpu *vcpu) 569bool pdptrs_changed(struct kvm_vcpu *vcpu)
570{ 570{
571 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; 571 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
572 bool changed = true; 572 bool changed = true;
@@ -592,6 +592,7 @@ out:
592 592
593 return changed; 593 return changed;
594} 594}
595EXPORT_SYMBOL_GPL(pdptrs_changed);
595 596
596int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 597int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
597{ 598{