aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-10 11:30:49 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:35 -0400
commit14dfe855f978181cd611ec018e5ceba860a98545 (patch)
treee81ec5a9162a2588f12c21de415ab8778c655c1f /arch/x86/kvm/x86.c
parentc30a358d33e0e111f06e54a4a4125371e6b6693c (diff)
KVM: X86: Introduce pointer to mmu context used for gva_to_gpa
This patch introduces the walk_mmu pointer which points to the mmu-context currently used for gva_to_gpa translations. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2364c2cad891..4196fc719142 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3456,27 +3456,27 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3456gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3456gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3457{ 3457{
3458 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3458 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3459 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3459 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3460} 3460}
3461 3461
3462 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3462 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3463{ 3463{
3464 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3464 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3465 access |= PFERR_FETCH_MASK; 3465 access |= PFERR_FETCH_MASK;
3466 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3466 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3467} 3467}
3468 3468
3469gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3469gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3470{ 3470{
3471 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3471 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3472 access |= PFERR_WRITE_MASK; 3472 access |= PFERR_WRITE_MASK;
3473 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3473 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3474} 3474}
3475 3475
3476/* uses this to access any guest's mapped memory without checking CPL */ 3476/* uses this to access any guest's mapped memory without checking CPL */
3477gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3477gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3478{ 3478{
3479 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error); 3479 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
3480} 3480}
3481 3481
3482static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3482static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
@@ -3487,7 +3487,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3487 int r = X86EMUL_CONTINUE; 3487 int r = X86EMUL_CONTINUE;
3488 3488
3489 while (bytes) { 3489 while (bytes) {
3490 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error); 3490 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3491 error);
3491 unsigned offset = addr & (PAGE_SIZE-1); 3492 unsigned offset = addr & (PAGE_SIZE-1);
3492 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3493 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3493 int ret; 3494 int ret;
@@ -3542,8 +3543,9 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
3542 int r = X86EMUL_CONTINUE; 3543 int r = X86EMUL_CONTINUE;
3543 3544
3544 while (bytes) { 3545 while (bytes) {
3545 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, 3546 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3546 PFERR_WRITE_MASK, error); 3547 PFERR_WRITE_MASK,
3548 error);
3547 unsigned offset = addr & (PAGE_SIZE-1); 3549 unsigned offset = addr & (PAGE_SIZE-1);
3548 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3550 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3549 int ret; 3551 int ret;
@@ -5663,6 +5665,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5663 kvm = vcpu->kvm; 5665 kvm = vcpu->kvm;
5664 5666
5665 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 5667 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
5668 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
5666 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 5669 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5667 vcpu->arch.mmu.translate_gpa = translate_gpa; 5670 vcpu->arch.mmu.translate_gpa = translate_gpa;
5668 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 5671 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))