aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-20 08:06:17 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:54 -0400
commitb463a6f744a263fccd7da14db1afdc880371a280 (patch)
tree30dbb8d47f4a3a6b2036dd890d03cb53081eadef /arch/x86
parent83422e17c19d61399cab7dbf9bf40ff9af2a7dd2 (diff)
KVM: Non-atomic interrupt injection
Change the interrupt injection code to work from preemptible, interrupts enabled context. This works by adding a ->cancel_injection() operation that undoes an injection in case we were not able to actually enter the guest (this condition could never happen with atomic injection). Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/svm.c12
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c36
4 files changed, 40 insertions, 20 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b43686a44877..80224bf5d4f8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -552,6 +552,7 @@ struct kvm_x86_ops {
552 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 552 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
553 bool has_error_code, u32 error_code, 553 bool has_error_code, u32 error_code,
554 bool reinject); 554 bool reinject);
555 void (*cancel_injection)(struct kvm_vcpu *vcpu);
555 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 556 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
556 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 557 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
557 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 558 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d2ea65d3537..1a85fc507cf7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3261,6 +3261,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
3261 } 3261 }
3262} 3262}
3263 3263
3264static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3265{
3266 struct vcpu_svm *svm = to_svm(vcpu);
3267 struct vmcb_control_area *control = &svm->vmcb->control;
3268
3269 control->exit_int_info = control->event_inj;
3270 control->exit_int_info_err = control->event_inj_err;
3271 control->event_inj = 0;
3272 svm_complete_interrupts(svm);
3273}
3274
3264#ifdef CONFIG_X86_64 3275#ifdef CONFIG_X86_64
3265#define R "r" 3276#define R "r"
3266#else 3277#else
@@ -3631,6 +3642,7 @@ static struct kvm_x86_ops svm_x86_ops = {
3631 .set_irq = svm_set_irq, 3642 .set_irq = svm_set_irq,
3632 .set_nmi = svm_inject_nmi, 3643 .set_nmi = svm_inject_nmi,
3633 .queue_exception = svm_queue_exception, 3644 .queue_exception = svm_queue_exception,
3645 .cancel_injection = svm_cancel_injection,
3634 .interrupt_allowed = svm_interrupt_allowed, 3646 .interrupt_allowed = svm_interrupt_allowed,
3635 .nmi_allowed = svm_nmi_allowed, 3647 .nmi_allowed = svm_nmi_allowed,
3636 .get_nmi_mask = svm_get_nmi_mask, 3648 .get_nmi_mask = svm_get_nmi_mask,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3237f6cc930d..70af3db372d7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3895,6 +3895,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3895 IDT_VECTORING_ERROR_CODE); 3895 IDT_VECTORING_ERROR_CODE);
3896} 3896}
3897 3897
3898static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
3899{
3900 __vmx_complete_interrupts(to_vmx(vcpu),
3901 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
3902 VM_ENTRY_INSTRUCTION_LEN,
3903 VM_ENTRY_EXCEPTION_ERROR_CODE);
3904
3905 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
3906}
3907
3898/* 3908/*
3899 * Failure to inject an interrupt should give us the information 3909 * Failure to inject an interrupt should give us the information
3900 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs 3910 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
@@ -4348,6 +4358,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
4348 .set_irq = vmx_inject_irq, 4358 .set_irq = vmx_inject_irq,
4349 .set_nmi = vmx_inject_nmi, 4359 .set_nmi = vmx_inject_nmi,
4350 .queue_exception = vmx_queue_exception, 4360 .queue_exception = vmx_queue_exception,
4361 .cancel_injection = vmx_cancel_injection,
4351 .interrupt_allowed = vmx_interrupt_allowed, 4362 .interrupt_allowed = vmx_interrupt_allowed,
4352 .nmi_allowed = vmx_nmi_allowed, 4363 .nmi_allowed = vmx_nmi_allowed,
4353 .get_nmi_mask = vmx_get_nmi_mask, 4364 .get_nmi_mask = vmx_get_nmi_mask,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e7198036db61..a465bd29f381 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5005,7 +5005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5005 int r; 5005 int r;
5006 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 5006 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5007 vcpu->run->request_interrupt_window; 5007 vcpu->run->request_interrupt_window;
5008 bool req_event;
5009 5008
5010 if (vcpu->requests) { 5009 if (vcpu->requests) {
5011 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 5010 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5041,6 +5040,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5041 if (unlikely(r)) 5040 if (unlikely(r))
5042 goto out; 5041 goto out;
5043 5042
5043 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5044 inject_pending_event(vcpu);
5045
5046 /* enable NMI/IRQ window open exits if needed */
5047 if (vcpu->arch.nmi_pending)
5048 kvm_x86_ops->enable_nmi_window(vcpu);
5049 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5050 kvm_x86_ops->enable_irq_window(vcpu);
5051
5052 if (kvm_lapic_enabled(vcpu)) {
5053 update_cr8_intercept(vcpu);
5054 kvm_lapic_sync_to_vapic(vcpu);
5055 }
5056 }
5057
5044 preempt_disable(); 5058 preempt_disable();
5045 5059
5046 kvm_x86_ops->prepare_guest_switch(vcpu); 5060 kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5053,35 +5067,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5053 5067
5054 local_irq_disable(); 5068 local_irq_disable();
5055 5069
5056 req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
5057
5058 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests 5070 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
5059 || need_resched() || signal_pending(current)) { 5071 || need_resched() || signal_pending(current)) {
5060 if (req_event)
5061 kvm_make_request(KVM_REQ_EVENT, vcpu);
5062 atomic_set(&vcpu->guest_mode, 0); 5072 atomic_set(&vcpu->guest_mode, 0);
5063 smp_wmb(); 5073 smp_wmb();
5064 local_irq_enable(); 5074 local_irq_enable();
5065 preempt_enable(); 5075 preempt_enable();
5076 kvm_x86_ops->cancel_injection(vcpu);
5066 r = 1; 5077 r = 1;
5067 goto out; 5078 goto out;
5068 } 5079 }
5069 5080
5070 if (req_event || req_int_win) {
5071 inject_pending_event(vcpu);
5072
5073 /* enable NMI/IRQ window open exits if needed */
5074 if (vcpu->arch.nmi_pending)
5075 kvm_x86_ops->enable_nmi_window(vcpu);
5076 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5077 kvm_x86_ops->enable_irq_window(vcpu);
5078
5079 if (kvm_lapic_enabled(vcpu)) {
5080 update_cr8_intercept(vcpu);
5081 kvm_lapic_sync_to_vapic(vcpu);
5082 }
5083 }
5084
5085 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5081 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5086 5082
5087 kvm_guest_enter(); 5083 kvm_guest_enter();