aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-11-22 10:53:26 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:59 -0500
commitab9ae3138789afacd133a9c4b3d7a3f1578e25c7 (patch)
tree27c81b48c395dffd2049b37bafef940573ae6841 /arch
parent35d3d4a1dd2c1ffd6f2481f6d8ad6c358bb22f07 (diff)
KVM: Push struct x86_exception info the various gva_to_gpa variants
Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h14
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/paging_tmpl.h19
-rw-r--r--arch/x86/kvm/x86.c52
4 files changed, 51 insertions, 47 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 14524781de13..9980a2484624 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -245,7 +245,7 @@ struct kvm_mmu {
245 void (*inject_page_fault)(struct kvm_vcpu *vcpu); 245 void (*inject_page_fault)(struct kvm_vcpu *vcpu);
246 void (*free)(struct kvm_vcpu *vcpu); 246 void (*free)(struct kvm_vcpu *vcpu);
247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
248 u32 *error); 248 struct x86_exception *exception);
249 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); 249 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
250 void (*prefetch_page)(struct kvm_vcpu *vcpu, 250 void (*prefetch_page)(struct kvm_vcpu *vcpu,
251 struct kvm_mmu_page *page); 251 struct kvm_mmu_page *page);
@@ -708,10 +708,14 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
708int kvm_mmu_load(struct kvm_vcpu *vcpu); 708int kvm_mmu_load(struct kvm_vcpu *vcpu);
709void kvm_mmu_unload(struct kvm_vcpu *vcpu); 709void kvm_mmu_unload(struct kvm_vcpu *vcpu);
710void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 710void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
711gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 711gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
712gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 712 struct x86_exception *exception);
713gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 713gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
714gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 714 struct x86_exception *exception);
715gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
716 struct x86_exception *exception);
717gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
718 struct x86_exception *exception);
715 719
716int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 720int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
717 721
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 53ff31f3dc6f..9ce041469a8e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2567,18 +2567,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2567} 2567}
2568 2568
2569static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, 2569static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2570 u32 access, u32 *error) 2570 u32 access, struct x86_exception *exception)
2571{ 2571{
2572 if (error) 2572 if (exception)
2573 *error = 0; 2573 exception->error_code = 0;
2574 return vaddr; 2574 return vaddr;
2575} 2575}
2576 2576
2577static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, 2577static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2578 u32 access, u32 *error) 2578 u32 access,
2579 struct x86_exception *exception)
2579{ 2580{
2580 if (error) 2581 if (exception)
2581 *error = 0; 2582 exception->error_code = 0;
2582 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); 2583 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2583} 2584}
2584 2585
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 2b3d66c7b68d..3ac39de444ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -677,7 +677,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
677} 677}
678 678
679static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, 679static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
680 u32 *error) 680 struct x86_exception *exception)
681{ 681{
682 struct guest_walker walker; 682 struct guest_walker walker;
683 gpa_t gpa = UNMAPPED_GVA; 683 gpa_t gpa = UNMAPPED_GVA;
@@ -688,14 +688,18 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
688 if (r) { 688 if (r) {
689 gpa = gfn_to_gpa(walker.gfn); 689 gpa = gfn_to_gpa(walker.gfn);
690 gpa |= vaddr & ~PAGE_MASK; 690 gpa |= vaddr & ~PAGE_MASK;
691 } else if (error) 691 } else if (exception) {
692 *error = walker.error_code; 692 exception->vector = PF_VECTOR;
693 exception->error_code_valid = true;
694 exception->error_code = walker.error_code;
695 }
693 696
694 return gpa; 697 return gpa;
695} 698}
696 699
697static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, 700static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
698 u32 access, u32 *error) 701 u32 access,
702 struct x86_exception *exception)
699{ 703{
700 struct guest_walker walker; 704 struct guest_walker walker;
701 gpa_t gpa = UNMAPPED_GVA; 705 gpa_t gpa = UNMAPPED_GVA;
@@ -706,8 +710,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
706 if (r) { 710 if (r) {
707 gpa = gfn_to_gpa(walker.gfn); 711 gpa = gfn_to_gpa(walker.gfn);
708 gpa |= vaddr & ~PAGE_MASK; 712 gpa |= vaddr & ~PAGE_MASK;
709 } else if (error) 713 } else if (exception) {
710 *error = walker.error_code; 714 exception->vector = PF_VECTOR;
715 exception->error_code_valid = true;
716 exception->error_code = walker.error_code;
717 }
711 718
712 return gpa; 719 return gpa;
713} 720}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8311ed909c49..a7a7decba43f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3603,51 +3603,47 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3603static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) 3603static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3604{ 3604{
3605 gpa_t t_gpa; 3605 gpa_t t_gpa;
3606 u32 error; 3606 struct x86_exception exception;
3607 3607
3608 BUG_ON(!mmu_is_nested(vcpu)); 3608 BUG_ON(!mmu_is_nested(vcpu));
3609 3609
3610 /* NPT walks are always user-walks */ 3610 /* NPT walks are always user-walks */
3611 access |= PFERR_USER_MASK; 3611 access |= PFERR_USER_MASK;
3612 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error); 3612 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3613 if (t_gpa == UNMAPPED_GVA) 3613 if (t_gpa == UNMAPPED_GVA)
3614 vcpu->arch.fault.nested = true; 3614 vcpu->arch.fault.nested = true;
3615 3615
3616 return t_gpa; 3616 return t_gpa;
3617} 3617}
3618 3618
3619gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3619gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3620 struct x86_exception *exception)
3620{ 3621{
3621 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3622 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3622 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); 3623 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3623} 3624}
3624 3625
3625 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3626 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3627 struct x86_exception *exception)
3626{ 3628{
3627 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3629 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3628 access |= PFERR_FETCH_MASK; 3630 access |= PFERR_FETCH_MASK;
3629 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); 3631 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3630} 3632}
3631 3633
3632gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3634gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
3635 struct x86_exception *exception)
3633{ 3636{
3634 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3637 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3635 access |= PFERR_WRITE_MASK; 3638 access |= PFERR_WRITE_MASK;
3636 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); 3639 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3637} 3640}
3638 3641
3639/* uses this to access any guest's mapped memory without checking CPL */ 3642/* uses this to access any guest's mapped memory without checking CPL */
3640gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3643gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
3641{ 3644 struct x86_exception *exception)
3642 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
3643}
3644
3645static int make_page_fault(struct x86_exception *exception, u32 error)
3646{ 3645{
3647 exception->vector = PF_VECTOR; 3646 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
3648 exception->error_code_valid = true;
3649 exception->error_code = error;
3650 return X86EMUL_PROPAGATE_FAULT;
3651} 3647}
3652 3648
3653static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3649static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
@@ -3656,17 +3652,16 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3656{ 3652{
3657 void *data = val; 3653 void *data = val;
3658 int r = X86EMUL_CONTINUE; 3654 int r = X86EMUL_CONTINUE;
3659 u32 error;
3660 3655
3661 while (bytes) { 3656 while (bytes) {
3662 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, 3657 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3663 &error); 3658 exception);
3664 unsigned offset = addr & (PAGE_SIZE-1); 3659 unsigned offset = addr & (PAGE_SIZE-1);
3665 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3660 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3666 int ret; 3661 int ret;
3667 3662
3668 if (gpa == UNMAPPED_GVA) 3663 if (gpa == UNMAPPED_GVA)
3669 return make_page_fault(exception, error); 3664 return X86EMUL_PROPAGATE_FAULT;
3670 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); 3665 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3671 if (ret < 0) { 3666 if (ret < 0) {
3672 r = X86EMUL_IO_NEEDED; 3667 r = X86EMUL_IO_NEEDED;
@@ -3715,18 +3710,17 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
3715{ 3710{
3716 void *data = val; 3711 void *data = val;
3717 int r = X86EMUL_CONTINUE; 3712 int r = X86EMUL_CONTINUE;
3718 u32 error;
3719 3713
3720 while (bytes) { 3714 while (bytes) {
3721 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 3715 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3722 PFERR_WRITE_MASK, 3716 PFERR_WRITE_MASK,
3723 &error); 3717 exception);
3724 unsigned offset = addr & (PAGE_SIZE-1); 3718 unsigned offset = addr & (PAGE_SIZE-1);
3725 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3719 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3726 int ret; 3720 int ret;
3727 3721
3728 if (gpa == UNMAPPED_GVA) 3722 if (gpa == UNMAPPED_GVA)
3729 return make_page_fault(exception, error); 3723 return X86EMUL_PROPAGATE_FAULT;
3730 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); 3724 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3731 if (ret < 0) { 3725 if (ret < 0) {
3732 r = X86EMUL_IO_NEEDED; 3726 r = X86EMUL_IO_NEEDED;
@@ -3748,7 +3742,6 @@ static int emulator_read_emulated(unsigned long addr,
3748 struct kvm_vcpu *vcpu) 3742 struct kvm_vcpu *vcpu)
3749{ 3743{
3750 gpa_t gpa; 3744 gpa_t gpa;
3751 u32 error_code;
3752 3745
3753 if (vcpu->mmio_read_completed) { 3746 if (vcpu->mmio_read_completed) {
3754 memcpy(val, vcpu->mmio_data, bytes); 3747 memcpy(val, vcpu->mmio_data, bytes);
@@ -3758,10 +3751,10 @@ static int emulator_read_emulated(unsigned long addr,
3758 return X86EMUL_CONTINUE; 3751 return X86EMUL_CONTINUE;
3759 } 3752 }
3760 3753
3761 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code); 3754 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
3762 3755
3763 if (gpa == UNMAPPED_GVA) 3756 if (gpa == UNMAPPED_GVA)
3764 return make_page_fault(exception, error_code); 3757 return X86EMUL_PROPAGATE_FAULT;
3765 3758
3766 /* For APIC access vmexit */ 3759 /* For APIC access vmexit */
3767 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3760 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -3810,12 +3803,11 @@ static int emulator_write_emulated_onepage(unsigned long addr,
3810 struct kvm_vcpu *vcpu) 3803 struct kvm_vcpu *vcpu)
3811{ 3804{
3812 gpa_t gpa; 3805 gpa_t gpa;
3813 u32 error_code;
3814 3806
3815 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code); 3807 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
3816 3808
3817 if (gpa == UNMAPPED_GVA) 3809 if (gpa == UNMAPPED_GVA)
3818 return make_page_fault(exception, error_code); 3810 return X86EMUL_PROPAGATE_FAULT;
3819 3811
3820 /* For APIC access vmexit */ 3812 /* For APIC access vmexit */
3821 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3813 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)