aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-10 11:31:03 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:46 -0400
commit4b16184c1ccafa4b0c188c622ea532fb90e6f5b0 (patch)
tree27bac1b008fc41f2b27a70faecf3e1ca1b7473ee /arch/x86/kvm
parent5bd2edc341d11af175e759a546e4335ba3e0584f (diff)
KVM: SVM: Initialize Nested Nested MMU context on VMRUN
This patch adds code to initialize the Nested Nested Paging MMU context when the L1 guest executes a VMRUN instruction and has nested paging enabled in its VMCB. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/svm.c50
2 files changed, 42 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 95cbeed74cf9..6e248d80e350 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2962,6 +2962,7 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2962{ 2962{
2963 mmu_free_roots(vcpu); 2963 mmu_free_roots(vcpu);
2964} 2964}
2965EXPORT_SYMBOL_GPL(kvm_mmu_unload);
2965 2966
2966static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, 2967static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2967 struct kvm_mmu_page *sp, 2968 struct kvm_mmu_page *sp,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9a9a4405b571..3184772dedfe 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -294,6 +294,15 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
294 force_new_asid(vcpu); 294 force_new_asid(vcpu);
295} 295}
296 296
297static int get_npt_level(void)
298{
299#ifdef CONFIG_X86_64
300 return PT64_ROOT_LEVEL;
301#else
302 return PT32E_ROOT_LEVEL;
303#endif
304}
305
297static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 306static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
298{ 307{
299 vcpu->arch.efer = efer; 308 vcpu->arch.efer = efer;
@@ -1630,6 +1639,26 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
1630 nested_svm_vmexit(svm); 1639 nested_svm_vmexit(svm);
1631} 1640}
1632 1641
1642static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1643{
1644 int r;
1645
1646 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1647
1648 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1649 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1650 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1651 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1652 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1653
1654 return r;
1655}
1656
1657static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1658{
1659 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1660}
1661
1633static int nested_svm_check_permissions(struct vcpu_svm *svm) 1662static int nested_svm_check_permissions(struct vcpu_svm *svm)
1634{ 1663{
1635 if (!(svm->vcpu.arch.efer & EFER_SVME) 1664 if (!(svm->vcpu.arch.efer & EFER_SVME)
@@ -1998,6 +2027,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1998 kvm_clear_exception_queue(&svm->vcpu); 2027 kvm_clear_exception_queue(&svm->vcpu);
1999 kvm_clear_interrupt_queue(&svm->vcpu); 2028 kvm_clear_interrupt_queue(&svm->vcpu);
2000 2029
2030 svm->nested.nested_cr3 = 0;
2031
2001 /* Restore selected save entries */ 2032 /* Restore selected save entries */
2002 svm->vmcb->save.es = hsave->save.es; 2033 svm->vmcb->save.es = hsave->save.es;
2003 svm->vmcb->save.cs = hsave->save.cs; 2034 svm->vmcb->save.cs = hsave->save.cs;
@@ -2024,6 +2055,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
2024 2055
2025 nested_svm_unmap(page); 2056 nested_svm_unmap(page);
2026 2057
2058 nested_svm_uninit_mmu_context(&svm->vcpu);
2027 kvm_mmu_reset_context(&svm->vcpu); 2059 kvm_mmu_reset_context(&svm->vcpu);
2028 kvm_mmu_load(&svm->vcpu); 2060 kvm_mmu_load(&svm->vcpu);
2029 2061
@@ -2071,6 +2103,9 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
2071 if (vmcb->control.asid == 0) 2103 if (vmcb->control.asid == 0)
2072 return false; 2104 return false;
2073 2105
2106 if (vmcb->control.nested_ctl && !npt_enabled)
2107 return false;
2108
2074 return true; 2109 return true;
2075} 2110}
2076 2111
@@ -2143,6 +2178,12 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2143 else 2178 else
2144 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; 2179 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2145 2180
2181 if (nested_vmcb->control.nested_ctl) {
2182 kvm_mmu_unload(&svm->vcpu);
2183 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2184 nested_svm_init_mmu_context(&svm->vcpu);
2185 }
2186
2146 /* Load the nested guest state */ 2187 /* Load the nested guest state */
2147 svm->vmcb->save.es = nested_vmcb->save.es; 2188 svm->vmcb->save.es = nested_vmcb->save.es;
2148 svm->vmcb->save.cs = nested_vmcb->save.cs; 2189 svm->vmcb->save.cs = nested_vmcb->save.cs;
@@ -3415,15 +3456,6 @@ static bool svm_cpu_has_accelerated_tpr(void)
3415 return false; 3456 return false;
3416} 3457}
3417 3458
3418static int get_npt_level(void)
3419{
3420#ifdef CONFIG_X86_64
3421 return PT64_ROOT_LEVEL;
3422#else
3423 return PT32E_ROOT_LEVEL;
3424#endif
3425}
3426
3427static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 3459static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3428{ 3460{
3429 return 0; 3461 return 0;