aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-20 08:06:17 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:54 -0400
commitb463a6f744a263fccd7da14db1afdc880371a280 (patch)
tree30dbb8d47f4a3a6b2036dd890d03cb53081eadef /arch/x86/kvm/x86.c
parent83422e17c19d61399cab7dbf9bf40ff9af2a7dd2 (diff)
KVM: Non-atomic interrupt injection
Change the interrupt injection code to work from preemptible, interrupts enabled context. This works by adding a ->cancel_injection() operation that undoes an injection in case we were not able to actually enter the guest (this condition could never happen with atomic injection). Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c36
1 files changed, 16 insertions, 20 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e7198036db61..a465bd29f381 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5005,7 +5005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5005 int r; 5005 int r;
5006 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 5006 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5007 vcpu->run->request_interrupt_window; 5007 vcpu->run->request_interrupt_window;
5008 bool req_event;
5009 5008
5010 if (vcpu->requests) { 5009 if (vcpu->requests) {
5011 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 5010 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5041,6 +5040,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5041 if (unlikely(r)) 5040 if (unlikely(r))
5042 goto out; 5041 goto out;
5043 5042
5043 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5044 inject_pending_event(vcpu);
5045
5046 /* enable NMI/IRQ window open exits if needed */
5047 if (vcpu->arch.nmi_pending)
5048 kvm_x86_ops->enable_nmi_window(vcpu);
5049 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5050 kvm_x86_ops->enable_irq_window(vcpu);
5051
5052 if (kvm_lapic_enabled(vcpu)) {
5053 update_cr8_intercept(vcpu);
5054 kvm_lapic_sync_to_vapic(vcpu);
5055 }
5056 }
5057
5044 preempt_disable(); 5058 preempt_disable();
5045 5059
5046 kvm_x86_ops->prepare_guest_switch(vcpu); 5060 kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5053,35 +5067,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5053 5067
5054 local_irq_disable(); 5068 local_irq_disable();
5055 5069
5056 req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
5057
5058 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests 5070 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
5059 || need_resched() || signal_pending(current)) { 5071 || need_resched() || signal_pending(current)) {
5060 if (req_event)
5061 kvm_make_request(KVM_REQ_EVENT, vcpu);
5062 atomic_set(&vcpu->guest_mode, 0); 5072 atomic_set(&vcpu->guest_mode, 0);
5063 smp_wmb(); 5073 smp_wmb();
5064 local_irq_enable(); 5074 local_irq_enable();
5065 preempt_enable(); 5075 preempt_enable();
5076 kvm_x86_ops->cancel_injection(vcpu);
5066 r = 1; 5077 r = 1;
5067 goto out; 5078 goto out;
5068 } 5079 }
5069 5080
5070 if (req_event || req_int_win) {
5071 inject_pending_event(vcpu);
5072
5073 /* enable NMI/IRQ window open exits if needed */
5074 if (vcpu->arch.nmi_pending)
5075 kvm_x86_ops->enable_nmi_window(vcpu);
5076 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5077 kvm_x86_ops->enable_irq_window(vcpu);
5078
5079 if (kvm_lapic_enabled(vcpu)) {
5080 update_cr8_intercept(vcpu);
5081 kvm_lapic_sync_to_vapic(vcpu);
5082 }
5083 }
5084
5085 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5081 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5086 5082
5087 kvm_guest_enter(); 5083 kvm_guest_enter();