aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-04-22 06:33:13 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:19:26 -0400
commitce7ddec4bbbc08f0c2901cc103773aed864b09fd (patch)
treed1cc4b3d0cd472ff30c3c796e6537dd41ec4e268 /arch/x86/include
parentc2c63a493924e09a1984d1374a0e60dfd54fc0b0 (diff)
KVM: x86: Allow marking an exception as reinjected
This patch adds logic to kvm/x86 which allows to mark an injected exception as reinjected. This allows to remove an ugly hack from svm_complete_interrupts that prevented exceptions from being reinjected at all in the nested case. The hack was necessary because an reinjected exception into the nested guest could cause a nested vmexit emulation. But reinjected exceptions must not intercept. The downside of the hack is that a exception that in injected could get lost. This patch fixes the problem and puts the code for it into generic x86 files because. Nested-VMX will likely have the same problem and could reuse the code. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/kvm_host.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 357573af974f..3f0007b076da 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -312,6 +312,7 @@ struct kvm_vcpu_arch {
312 struct kvm_queued_exception { 312 struct kvm_queued_exception {
313 bool pending; 313 bool pending;
314 bool has_error_code; 314 bool has_error_code;
315 bool reinject;
315 u8 nr; 316 u8 nr;
316 u32 error_code; 317 u32 error_code;
317 } exception; 318 } exception;
@@ -514,7 +515,8 @@ struct kvm_x86_ops {
514 void (*set_irq)(struct kvm_vcpu *vcpu); 515 void (*set_irq)(struct kvm_vcpu *vcpu);
515 void (*set_nmi)(struct kvm_vcpu *vcpu); 516 void (*set_nmi)(struct kvm_vcpu *vcpu);
516 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 517 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
517 bool has_error_code, u32 error_code); 518 bool has_error_code, u32 error_code,
519 bool reinject);
518 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 520 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
519 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 521 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
520 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 522 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -617,6 +619,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
617 619
618void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 620void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
619void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 621void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
622void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
623void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
620void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 624void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
621 u32 error_code); 625 u32 error_code);
622bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 626bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);