aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c27
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/vmx.c41
3 files changed, 69 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a215c41b5176..992fde984e25 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3795,6 +3795,33 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3795} 3795}
3796EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3796EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3797 3797
3798int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
3799 bool execonly)
3800{
3801 ASSERT(vcpu);
3802 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3803
3804 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
3805
3806 context->nx = true;
3807 context->new_cr3 = paging_new_cr3;
3808 context->page_fault = ept_page_fault;
3809 context->gva_to_gpa = ept_gva_to_gpa;
3810 context->sync_page = ept_sync_page;
3811 context->invlpg = ept_invlpg;
3812 context->update_pte = ept_update_pte;
3813 context->free = paging_free;
3814 context->root_level = context->shadow_root_level;
3815 context->root_hpa = INVALID_PAGE;
3816 context->direct_map = false;
3817
3818 update_permission_bitmask(vcpu, context, true);
3819 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
3820
3821 return 0;
3822}
3823EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
3824
3798static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 3825static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
3799{ 3826{
3800 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); 3827 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 5b59c573aba7..77e044a0f5f7 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,6 +71,8 @@ enum {
71 71
72int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 72int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
73int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 73int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
74int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
75 bool execonly);
74 76
75static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
76{ 78{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0d18ed31671c..2ae0aa4461e8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1046,6 +1046,11 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
1046 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; 1046 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1047} 1047}
1048 1048
1049static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1050{
1051 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1052}
1053
1049static inline bool is_exception(u32 intr_info) 1054static inline bool is_exception(u32 intr_info)
1050{ 1055{
1051 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) 1056 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -7367,6 +7372,33 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
7367 vmcs12->guest_physical_address = fault->address; 7372 vmcs12->guest_physical_address = fault->address;
7368} 7373}
7369 7374
7375/* Callbacks for nested_ept_init_mmu_context: */
7376
7377static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
7378{
7379 /* return the page table to be shadowed - in our case, EPT12 */
7380 return get_vmcs12(vcpu)->ept_pointer;
7381}
7382
7383static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
7384{
7385 int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
7386 nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
7387
7388 vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
7389 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
7390 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
7391
7392 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
7393
7394 return r;
7395}
7396
7397static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
7398{
7399 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
7400}
7401
7370/* 7402/*
7371 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 7403 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
7372 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 7404 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -7587,6 +7619,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7587 vmx_flush_tlb(vcpu); 7619 vmx_flush_tlb(vcpu);
7588 } 7620 }
7589 7621
7622 if (nested_cpu_has_ept(vmcs12)) {
7623 kvm_mmu_unload(vcpu);
7624 nested_ept_init_mmu_context(vcpu);
7625 }
7626
7590 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) 7627 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
7591 vcpu->arch.efer = vmcs12->guest_ia32_efer; 7628 vcpu->arch.efer = vmcs12->guest_ia32_efer;
7592 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 7629 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
@@ -8059,7 +8096,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8059 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 8096 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
8060 kvm_set_cr4(vcpu, vmcs12->host_cr4); 8097 kvm_set_cr4(vcpu, vmcs12->host_cr4);
8061 8098
8062 /* shadow page tables on either EPT or shadow page tables */ 8099 if (nested_cpu_has_ept(vmcs12))
8100 nested_ept_uninit_mmu_context(vcpu);
8101
8063 kvm_set_cr3(vcpu, vmcs12->host_cr3); 8102 kvm_set_cr3(vcpu, vmcs12->host_cr3);
8064 kvm_mmu_reset_context(vcpu); 8103 kvm_mmu_reset_context(vcpu);
8065 8104