aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/x86.c17
3 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4915b7c8f2ec..1b3eb8a0a1bc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -286,9 +286,22 @@ struct kvm_vcpu_arch {
286 u64 ia32_misc_enable_msr; 286 u64 ia32_misc_enable_msr;
287 bool tpr_access_reporting; 287 bool tpr_access_reporting;
288 288
289 /*
290 * Paging state of the vcpu
291 *
292 * If the vcpu runs in guest mode with two level paging this still saves
293 * the paging mode of the l1 guest. This context is always used to
294 * handle faults.
295 */
289 struct kvm_mmu mmu; 296 struct kvm_mmu mmu;
290 297
291 /* 298 /*
299 * Pointer to the mmu context currently used for
300 * gva_to_gpa translations.
301 */
302 struct kvm_mmu *walk_mmu;
303
304 /*
292 * This struct is filled with the necessary information to propagate a 305 * This struct is filled with the necessary information to propagate a
293 * page fault into the guest 306 * page fault into the guest
294 */ 307 */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 99367274b97c..cb06adac92b1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2708,7 +2708,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu,
2708 2708
2709static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 2709static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2710{ 2710{
2711 struct kvm_mmu *context = &vcpu->arch.mmu; 2711 struct kvm_mmu *context = vcpu->arch.walk_mmu;
2712 2712
2713 context->new_cr3 = nonpaging_new_cr3; 2713 context->new_cr3 = nonpaging_new_cr3;
2714 context->page_fault = tdp_page_fault; 2714 context->page_fault = tdp_page_fault;
@@ -2767,11 +2767,11 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
2767 2767
2768static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 2768static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2769{ 2769{
2770 int r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu); 2770 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
2771 2771
2772 vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3; 2772 vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
2773 vcpu->arch.mmu.get_cr3 = get_cr3; 2773 vcpu->arch.walk_mmu->get_cr3 = get_cr3;
2774 vcpu->arch.mmu.inject_page_fault = kvm_inject_page_fault; 2774 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
2775 2775
2776 return r; 2776 return r;
2777} 2777}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2364c2cad891..4196fc719142 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3456,27 +3456,27 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3456gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3456gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3457{ 3457{
3458 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3458 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3459 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3459 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3460} 3460}
3461 3461
3462 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3462 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3463{ 3463{
3464 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3464 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3465 access |= PFERR_FETCH_MASK; 3465 access |= PFERR_FETCH_MASK;
3466 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3466 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3467} 3467}
3468 3468
3469gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3469gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3470{ 3470{
3471 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3471 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3472 access |= PFERR_WRITE_MASK; 3472 access |= PFERR_WRITE_MASK;
3473 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3473 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3474} 3474}
3475 3475
3476/* uses this to access any guest's mapped memory without checking CPL */ 3476/* uses this to access any guest's mapped memory without checking CPL */
3477gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3477gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3478{ 3478{
3479 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error); 3479 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
3480} 3480}
3481 3481
3482static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3482static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
@@ -3487,7 +3487,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3487 int r = X86EMUL_CONTINUE; 3487 int r = X86EMUL_CONTINUE;
3488 3488
3489 while (bytes) { 3489 while (bytes) {
3490 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error); 3490 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3491 error);
3491 unsigned offset = addr & (PAGE_SIZE-1); 3492 unsigned offset = addr & (PAGE_SIZE-1);
3492 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3493 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3493 int ret; 3494 int ret;
@@ -3542,8 +3543,9 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
3542 int r = X86EMUL_CONTINUE; 3543 int r = X86EMUL_CONTINUE;
3543 3544
3544 while (bytes) { 3545 while (bytes) {
3545 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, 3546 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3546 PFERR_WRITE_MASK, error); 3547 PFERR_WRITE_MASK,
3548 error);
3547 unsigned offset = addr & (PAGE_SIZE-1); 3549 unsigned offset = addr & (PAGE_SIZE-1);
3548 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3550 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3549 int ret; 3551 int ret;
@@ -5663,6 +5665,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5663 kvm = vcpu->kvm; 5665 kvm = vcpu->kvm;
5664 5666
5665 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 5667 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
5668 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
5666 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 5669 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5667 vcpu->arch.mmu.translate_gpa = translate_gpa; 5670 vcpu->arch.mmu.translate_gpa = translate_gpa;
5668 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 5671 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))