diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:30:55 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:40 -0400 |
commit | d4f8cf664e4c1fd579df6b6e6378335c9f79d790 (patch) | |
tree | e5f5ce71daa78ebf2bae27d355f1e9a11d59ef77 | |
parent | 02f59dc9f1f51d2148d87d48f84adb455a4fd697 (diff) |
KVM: MMU: Propagate the right fault back to the guest after gva_to_gpa
This patch implements logic to make sure that either a
page-fault/page-fault-vmexit or a nested-page-fault-vmexit
is propagated back to the guest.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 18 |
2 files changed, 18 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 08bc383083ff..574db6d1532a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -660,6 +660,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu); | |||
660 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | 660 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
661 | gfn_t gfn, void *data, int offset, int len, | 661 | gfn_t gfn, void *data, int offset, int len, |
662 | u32 access); | 662 | u32 access); |
663 | void kvm_propagate_fault(struct kvm_vcpu *vcpu); | ||
663 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); | 664 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
664 | 665 | ||
665 | int kvm_pic_set_irq(void *opaque, int irq, int level); | 666 | int kvm_pic_set_irq(void *opaque, int irq, int level); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e4c76bf86081..0281d920e9ed 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -338,6 +338,22 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu) | |||
338 | kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); | 338 | kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); |
339 | } | 339 | } |
340 | 340 | ||
341 | void kvm_propagate_fault(struct kvm_vcpu *vcpu) | ||
342 | { | ||
343 | u32 nested, error; | ||
344 | |||
345 | error = vcpu->arch.fault.error_code; | ||
346 | nested = error & PFERR_NESTED_MASK; | ||
347 | error = error & ~PFERR_NESTED_MASK; | ||
348 | |||
349 | vcpu->arch.fault.error_code = error; | ||
350 | |||
351 | if (mmu_is_nested(vcpu) && !nested) | ||
352 | vcpu->arch.nested_mmu.inject_page_fault(vcpu); | ||
353 | else | ||
354 | vcpu->arch.mmu.inject_page_fault(vcpu); | ||
355 | } | ||
356 | |||
341 | void kvm_inject_nmi(struct kvm_vcpu *vcpu) | 357 | void kvm_inject_nmi(struct kvm_vcpu *vcpu) |
342 | { | 358 | { |
343 | vcpu->arch.nmi_pending = 1; | 359 | vcpu->arch.nmi_pending = 1; |
@@ -4140,7 +4156,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) | |||
4140 | { | 4156 | { |
4141 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 4157 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
4142 | if (ctxt->exception == PF_VECTOR) | 4158 | if (ctxt->exception == PF_VECTOR) |
4143 | kvm_inject_page_fault(vcpu); | 4159 | kvm_propagate_fault(vcpu); |
4144 | else if (ctxt->error_code_valid) | 4160 | else if (ctxt->error_code_valid) |
4145 | kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); | 4161 | kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); |
4146 | else | 4162 | else |