aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c37
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/x86.c17
3 files changed, 54 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1e215e8b9377..a26f13bd34e0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2784,11 +2784,46 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2784 return r; 2784 return r;
2785} 2785}
2786 2786
2787static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
2788{
2789 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
2790
2791 g_context->get_cr3 = get_cr3;
2792 g_context->inject_page_fault = kvm_inject_page_fault;
2793
2794 /*
2795 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
2796 * translation of l2_gpa to l1_gpa addresses is done using the
2797 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
2798 * functions between mmu and nested_mmu are swapped.
2799 */
2800 if (!is_paging(vcpu)) {
2801 g_context->root_level = 0;
2802 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
2803 } else if (is_long_mode(vcpu)) {
2804 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
2805 g_context->root_level = PT64_ROOT_LEVEL;
2806 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2807 } else if (is_pae(vcpu)) {
2808 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
2809 g_context->root_level = PT32E_ROOT_LEVEL;
2810 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2811 } else {
2812 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
2813 g_context->root_level = PT32_ROOT_LEVEL;
2814 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
2815 }
2816
2817 return 0;
2818}
2819
2787static int init_kvm_mmu(struct kvm_vcpu *vcpu) 2820static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2788{ 2821{
2789 vcpu->arch.update_pte.pfn = bad_pfn; 2822 vcpu->arch.update_pte.pfn = bad_pfn;
2790 2823
2791 if (tdp_enabled) 2824 if (mmu_is_nested(vcpu))
2825 return init_kvm_nested_mmu(vcpu);
2826 else if (tdp_enabled)
2792 return init_kvm_tdp_mmu(vcpu); 2827 return init_kvm_tdp_mmu(vcpu);
2793 else 2828 else
2794 return init_kvm_softmmu(vcpu); 2829 return init_kvm_softmmu(vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 7086ca85d3e7..513abbb5ff46 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -47,6 +47,7 @@
47#define PFERR_USER_MASK (1U << 2) 47#define PFERR_USER_MASK (1U << 2)
48#define PFERR_RSVD_MASK (1U << 3) 48#define PFERR_RSVD_MASK (1U << 3)
49#define PFERR_FETCH_MASK (1U << 4) 49#define PFERR_FETCH_MASK (1U << 4)
50#define PFERR_NESTED_MASK (1U << 31)
50 51
51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 52int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
52int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 53int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 46843ed36dc1..e4c76bf86081 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3489,6 +3489,22 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3489 return gpa; 3489 return gpa;
3490} 3490}
3491 3491
3492static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3493{
3494 gpa_t t_gpa;
3495 u32 error;
3496
3497 BUG_ON(!mmu_is_nested(vcpu));
3498
3499 /* NPT walks are always user-walks */
3500 access |= PFERR_USER_MASK;
3501 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
3502 if (t_gpa == UNMAPPED_GVA)
3503 vcpu->arch.fault.error_code |= PFERR_NESTED_MASK;
3504
3505 return t_gpa;
3506}
3507
3492gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3508gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3493{ 3509{
3494 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3510 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
@@ -5704,6 +5720,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5704 vcpu->arch.walk_mmu = &vcpu->arch.mmu; 5720 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
5705 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 5721 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5706 vcpu->arch.mmu.translate_gpa = translate_gpa; 5722 vcpu->arch.mmu.translate_gpa = translate_gpa;
5723 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5707 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 5724 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5708 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5725 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5709 else 5726 else