aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h7
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c15
5 files changed, 21 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 307e3cfa28ad..b31a3417a405 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -265,6 +265,7 @@ struct kvm_mmu {
265 void (*new_cr3)(struct kvm_vcpu *vcpu); 265 void (*new_cr3)(struct kvm_vcpu *vcpu);
266 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 266 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
267 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 267 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
268 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
268 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 269 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
269 bool prefault); 270 bool prefault);
270 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 271 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 3377d53fcd36..544076c4f44b 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -45,13 +45,6 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
45 return vcpu->arch.walk_mmu->pdptrs[index]; 45 return vcpu->arch.walk_mmu->pdptrs[index];
46} 46}
47 47
48static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
49{
50 load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
51
52 return mmu->pdptrs[index];
53}
54
55static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) 48static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
56{ 49{
57 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; 50 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8e8da7960dbe..f1b36cf3e3d0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2770,7 +2770,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2770 2770
2771 ASSERT(!VALID_PAGE(root)); 2771 ASSERT(!VALID_PAGE(root));
2772 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 2772 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2773 pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i); 2773 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
2774 if (!is_present_gpte(pdptr)) { 2774 if (!is_present_gpte(pdptr)) {
2775 vcpu->arch.mmu.pae_root[i] = 0; 2775 vcpu->arch.mmu.pae_root[i] = 0;
2776 continue; 2776 continue;
@@ -3318,6 +3318,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3318 context->direct_map = true; 3318 context->direct_map = true;
3319 context->set_cr3 = kvm_x86_ops->set_tdp_cr3; 3319 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
3320 context->get_cr3 = get_cr3; 3320 context->get_cr3 = get_cr3;
3321 context->get_pdptr = kvm_pdptr_read;
3321 context->inject_page_fault = kvm_inject_page_fault; 3322 context->inject_page_fault = kvm_inject_page_fault;
3322 context->nx = is_nx(vcpu); 3323 context->nx = is_nx(vcpu);
3323 3324
@@ -3376,6 +3377,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
3376 3377
3377 vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; 3378 vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
3378 vcpu->arch.walk_mmu->get_cr3 = get_cr3; 3379 vcpu->arch.walk_mmu->get_cr3 = get_cr3;
3380 vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
3379 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 3381 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3380 3382
3381 return r; 3383 return r;
@@ -3386,6 +3388,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3386 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; 3388 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
3387 3389
3388 g_context->get_cr3 = get_cr3; 3390 g_context->get_cr3 = get_cr3;
3391 g_context->get_pdptr = kvm_pdptr_read;
3389 g_context->inject_page_fault = kvm_inject_page_fault; 3392 g_context->inject_page_fault = kvm_inject_page_fault;
3390 3393
3391 /* 3394 /*
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 507e2b844cfa..f6dd9feb201b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -163,7 +163,7 @@ retry_walk:
163 163
164#if PTTYPE == 64 164#if PTTYPE == 64
165 if (walker->level == PT32E_ROOT_LEVEL) { 165 if (walker->level == PT32E_ROOT_LEVEL) {
166 pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); 166 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
167 trace_kvm_mmu_paging_element(pte, walker->level); 167 trace_kvm_mmu_paging_element(pte, walker->level);
168 if (!is_present_gpte(pte)) 168 if (!is_present_gpte(pte))
169 goto error; 169 goto error;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2b24a88f2c67..f043168a5ab1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1844,6 +1844,20 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1844 return svm->nested.nested_cr3; 1844 return svm->nested.nested_cr3;
1845} 1845}
1846 1846
1847static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1848{
1849 struct vcpu_svm *svm = to_svm(vcpu);
1850 u64 cr3 = svm->nested.nested_cr3;
1851 u64 pdpte;
1852 int ret;
1853
1854 ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
1855 offset_in_page(cr3) + index * 8, 8);
1856 if (ret)
1857 return 0;
1858 return pdpte;
1859}
1860
1847static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, 1861static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1848 unsigned long root) 1862 unsigned long root)
1849{ 1863{
@@ -1875,6 +1889,7 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1875 1889
1876 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; 1890 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1877 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; 1891 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1892 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
1878 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; 1893 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1879 vcpu->arch.mmu.shadow_root_level = get_npt_level(); 1894 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1880 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 1895 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;