aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-27 05:30:24 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:50 -0400
commit3842d135ff246b6543f1df77f5600e12094a6845 (patch)
tree7b65456a0527fc3ea753a49c528643fd3b52a7d6 /arch/x86/kvm/x86.c
parentb0bc3ee2b54fcea0df42cc9aa05103b1ccd89db0 (diff)
KVM: Check for pending events before attempting injection
Instead of blindly attempting to inject an event before each guest entry, check for a possible event first in vcpu->requests. Sites that can trigger event injection are modified to set KVM_REQ_EVENT: - interrupt, nmi window opening - ppr updates - i8259 output changes - local apic irr changes - rflags updates - gif flag set - event set on exit This improves non-injecting entry performance, and sets the stage for non-atomic injection. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c41
1 files changed, 32 insertions, 9 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3ff0a8ff275c..e7198036db61 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -284,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
284 u32 prev_nr; 284 u32 prev_nr;
285 int class1, class2; 285 int class1, class2;
286 286
287 kvm_make_request(KVM_REQ_EVENT, vcpu);
288
287 if (!vcpu->arch.exception.pending) { 289 if (!vcpu->arch.exception.pending) {
288 queue: 290 queue:
289 vcpu->arch.exception.pending = true; 291 vcpu->arch.exception.pending = true;
@@ -356,6 +358,7 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu)
356 358
357void kvm_inject_nmi(struct kvm_vcpu *vcpu) 359void kvm_inject_nmi(struct kvm_vcpu *vcpu)
358{ 360{
361 kvm_make_request(KVM_REQ_EVENT, vcpu);
359 vcpu->arch.nmi_pending = 1; 362 vcpu->arch.nmi_pending = 1;
360} 363}
361EXPORT_SYMBOL_GPL(kvm_inject_nmi); 364EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@ -2418,6 +2421,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2418 return -ENXIO; 2421 return -ENXIO;
2419 2422
2420 kvm_queue_interrupt(vcpu, irq->irq, false); 2423 kvm_queue_interrupt(vcpu, irq->irq, false);
2424 kvm_make_request(KVM_REQ_EVENT, vcpu);
2421 2425
2422 return 0; 2426 return 0;
2423} 2427}
@@ -2571,6 +2575,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2571 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) 2575 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2572 vcpu->arch.sipi_vector = events->sipi_vector; 2576 vcpu->arch.sipi_vector = events->sipi_vector;
2573 2577
2578 kvm_make_request(KVM_REQ_EVENT, vcpu);
2579
2574 return 0; 2580 return 0;
2575} 2581}
2576 2582
@@ -4329,6 +4335,7 @@ done:
4329 4335
4330 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); 4336 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4331 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 4337 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4338 kvm_make_request(KVM_REQ_EVENT, vcpu);
4332 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 4339 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4333 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 4340 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4334 4341
@@ -4998,6 +5005,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4998 int r; 5005 int r;
4999 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 5006 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5000 vcpu->run->request_interrupt_window; 5007 vcpu->run->request_interrupt_window;
5008 bool req_event;
5001 5009
5002 if (vcpu->requests) { 5010 if (vcpu->requests) {
5003 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 5011 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5045,8 +5053,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5045 5053
5046 local_irq_disable(); 5054 local_irq_disable();
5047 5055
5056 req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
5057
5048 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests 5058 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
5049 || need_resched() || signal_pending(current)) { 5059 || need_resched() || signal_pending(current)) {
5060 if (req_event)
5061 kvm_make_request(KVM_REQ_EVENT, vcpu);
5050 atomic_set(&vcpu->guest_mode, 0); 5062 atomic_set(&vcpu->guest_mode, 0);
5051 smp_wmb(); 5063 smp_wmb();
5052 local_irq_enable(); 5064 local_irq_enable();
@@ -5055,17 +5067,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5055 goto out; 5067 goto out;
5056 } 5068 }
5057 5069
5058 inject_pending_event(vcpu); 5070 if (req_event || req_int_win) {
5071 inject_pending_event(vcpu);
5059 5072
5060 /* enable NMI/IRQ window open exits if needed */ 5073 /* enable NMI/IRQ window open exits if needed */
5061 if (vcpu->arch.nmi_pending) 5074 if (vcpu->arch.nmi_pending)
5062 kvm_x86_ops->enable_nmi_window(vcpu); 5075 kvm_x86_ops->enable_nmi_window(vcpu);
5063 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) 5076 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5064 kvm_x86_ops->enable_irq_window(vcpu); 5077 kvm_x86_ops->enable_irq_window(vcpu);
5065 5078
5066 if (kvm_lapic_enabled(vcpu)) { 5079 if (kvm_lapic_enabled(vcpu)) {
5067 update_cr8_intercept(vcpu); 5080 update_cr8_intercept(vcpu);
5068 kvm_lapic_sync_to_vapic(vcpu); 5081 kvm_lapic_sync_to_vapic(vcpu);
5082 }
5069 } 5083 }
5070 5084
5071 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5085 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -5305,6 +5319,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5305 5319
5306 vcpu->arch.exception.pending = false; 5320 vcpu->arch.exception.pending = false;
5307 5321
5322 kvm_make_request(KVM_REQ_EVENT, vcpu);
5323
5308 return 0; 5324 return 0;
5309} 5325}
5310 5326
@@ -5368,6 +5384,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5368 struct kvm_mp_state *mp_state) 5384 struct kvm_mp_state *mp_state)
5369{ 5385{
5370 vcpu->arch.mp_state = mp_state->mp_state; 5386 vcpu->arch.mp_state = mp_state->mp_state;
5387 kvm_make_request(KVM_REQ_EVENT, vcpu);
5371 return 0; 5388 return 0;
5372} 5389}
5373 5390
@@ -5389,6 +5406,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5389 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 5406 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5390 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 5407 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5391 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 5408 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5409 kvm_make_request(KVM_REQ_EVENT, vcpu);
5392 return EMULATE_DONE; 5410 return EMULATE_DONE;
5393} 5411}
5394EXPORT_SYMBOL_GPL(kvm_task_switch); 5412EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -5459,6 +5477,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5459 !is_protmode(vcpu)) 5477 !is_protmode(vcpu))
5460 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5478 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5461 5479
5480 kvm_make_request(KVM_REQ_EVENT, vcpu);
5481
5462 return 0; 5482 return 0;
5463} 5483}
5464 5484
@@ -5691,6 +5711,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5691 vcpu->arch.dr6 = DR6_FIXED_1; 5711 vcpu->arch.dr6 = DR6_FIXED_1;
5692 vcpu->arch.dr7 = DR7_FIXED_1; 5712 vcpu->arch.dr7 = DR7_FIXED_1;
5693 5713
5714 kvm_make_request(KVM_REQ_EVENT, vcpu);
5715
5694 return kvm_x86_ops->vcpu_reset(vcpu); 5716 return kvm_x86_ops->vcpu_reset(vcpu);
5695} 5717}
5696 5718
@@ -6001,6 +6023,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6001 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 6023 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6002 rflags |= X86_EFLAGS_TF; 6024 rflags |= X86_EFLAGS_TF;
6003 kvm_x86_ops->set_rflags(vcpu, rflags); 6025 kvm_x86_ops->set_rflags(vcpu, rflags);
6026 kvm_make_request(KVM_REQ_EVENT, vcpu);
6004} 6027}
6005EXPORT_SYMBOL_GPL(kvm_set_rflags); 6028EXPORT_SYMBOL_GPL(kvm_set_rflags);
6006 6029