aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h9
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h7
-rw-r--r--arch/x86/kvm/x86.c9
4 files changed, 16 insertions, 11 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c9896518e54d..2dbde3b7446c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -262,7 +262,8 @@ struct kvm_mmu {
262 struct x86_exception *fault); 262 struct x86_exception *fault);
263 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 263 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
264 struct x86_exception *exception); 264 struct x86_exception *exception);
265 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); 265 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
266 struct x86_exception *exception);
266 int (*sync_page)(struct kvm_vcpu *vcpu, 267 int (*sync_page)(struct kvm_vcpu *vcpu,
267 struct kvm_mmu_page *sp); 268 struct kvm_mmu_page *sp);
268 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 269 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
@@ -923,7 +924,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
923int kvm_mmu_load(struct kvm_vcpu *vcpu); 924int kvm_mmu_load(struct kvm_vcpu *vcpu);
924void kvm_mmu_unload(struct kvm_vcpu *vcpu); 925void kvm_mmu_unload(struct kvm_vcpu *vcpu);
925void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 926void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
926gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); 927gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
928 struct x86_exception *exception);
927gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 929gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
928 struct x86_exception *exception); 930 struct x86_exception *exception);
929gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 931gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -943,7 +945,8 @@ void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
943void kvm_enable_tdp(void); 945void kvm_enable_tdp(void);
944void kvm_disable_tdp(void); 946void kvm_disable_tdp(void);
945 947
946static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) 948static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
949 struct x86_exception *exception)
947{ 950{
948 return gpa; 951 return gpa;
949} 952}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5b93a597e0c8..76398fe15df2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3200,7 +3200,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
3200{ 3200{
3201 if (exception) 3201 if (exception)
3202 exception->error_code = 0; 3202 exception->error_code = 0;
3203 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); 3203 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3204} 3204}
3205 3205
3206static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) 3206static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index df1a044d92de..0ab6c65a2821 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -321,7 +321,8 @@ retry_walk:
321 walker->pte_gpa[walker->level - 1] = pte_gpa; 321 walker->pte_gpa[walker->level - 1] = pte_gpa;
322 322
323 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), 323 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
324 PFERR_USER_MASK|PFERR_WRITE_MASK); 324 PFERR_USER_MASK|PFERR_WRITE_MASK,
325 &walker->fault);
325 326
326 /* 327 /*
327 * FIXME: This can happen if emulation (for of an INS/OUTS 328 * FIXME: This can happen if emulation (for of an INS/OUTS
@@ -334,7 +335,7 @@ retry_walk:
334 * fields. 335 * fields.
335 */ 336 */
336 if (unlikely(real_gfn == UNMAPPED_GVA)) 337 if (unlikely(real_gfn == UNMAPPED_GVA))
337 goto error; 338 return 0;
338 339
339 real_gfn = gpa_to_gfn(real_gfn); 340 real_gfn = gpa_to_gfn(real_gfn);
340 341
@@ -376,7 +377,7 @@ retry_walk:
376 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) 377 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
377 gfn += pse36_gfn_delta(pte); 378 gfn += pse36_gfn_delta(pte);
378 379
379 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access); 380 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
380 if (real_gpa == UNMAPPED_GVA) 381 if (real_gpa == UNMAPPED_GVA)
381 return 0; 382 return 0;
382 383
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 354194671902..7b25aa2725f8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -459,11 +459,12 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
459 gfn_t ngfn, void *data, int offset, int len, 459 gfn_t ngfn, void *data, int offset, int len,
460 u32 access) 460 u32 access)
461{ 461{
462 struct x86_exception exception;
462 gfn_t real_gfn; 463 gfn_t real_gfn;
463 gpa_t ngpa; 464 gpa_t ngpa;
464 465
465 ngpa = gfn_to_gpa(ngfn); 466 ngpa = gfn_to_gpa(ngfn);
466 real_gfn = mmu->translate_gpa(vcpu, ngpa, access); 467 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
467 if (real_gfn == UNMAPPED_GVA) 468 if (real_gfn == UNMAPPED_GVA)
468 return -EFAULT; 469 return -EFAULT;
469 470
@@ -4065,16 +4066,16 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
4065 kvm_x86_ops->get_segment(vcpu, var, seg); 4066 kvm_x86_ops->get_segment(vcpu, var, seg);
4066} 4067}
4067 4068
4068gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) 4069gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
4070 struct x86_exception *exception)
4069{ 4071{
4070 gpa_t t_gpa; 4072 gpa_t t_gpa;
4071 struct x86_exception exception;
4072 4073
4073 BUG_ON(!mmu_is_nested(vcpu)); 4074 BUG_ON(!mmu_is_nested(vcpu));
4074 4075
4075 /* NPT walks are always user-walks */ 4076 /* NPT walks are always user-walks */
4076 access |= PFERR_USER_MASK; 4077 access |= PFERR_USER_MASK;
4077 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); 4078 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4078 4079
4079 return t_gpa; 4080 return t_gpa;
4080} 4081}