aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-27 05:30:24 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:50 -0400
commit3842d135ff246b6543f1df77f5600e12094a6845 (patch)
tree7b65456a0527fc3ea753a49c528643fd3b52a7d6 /arch/x86/kvm/svm.c
parentb0bc3ee2b54fcea0df42cc9aa05103b1ccd89db0 (diff)
KVM: Check for pending events before attempting injection
Instead of blindly attempting to inject an event before each guest entry, check for a possible event first in vcpu->requests. Sites that can trigger event injection are modified to set KVM_REQ_EVENT: - interrupt, nmi window opening - ppr updates - i8259 output changes - local apic irr changes - rflags updates - gif flag set - event set on exit This improves non-injecting entry performance, and sets the stage for non-atomic injection. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e0f4da07f987..1d2ea65d3537 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2371,6 +2371,7 @@ static int stgi_interception(struct vcpu_svm *svm)
2371 2371
2372 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 2372 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2373 skip_emulated_instruction(&svm->vcpu); 2373 skip_emulated_instruction(&svm->vcpu);
2374 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2374 2375
2375 enable_gif(svm); 2376 enable_gif(svm);
2376 2377
@@ -2763,6 +2764,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
2763{ 2764{
2764 struct kvm_run *kvm_run = svm->vcpu.run; 2765 struct kvm_run *kvm_run = svm->vcpu.run;
2765 2766
2767 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2766 svm_clear_vintr(svm); 2768 svm_clear_vintr(svm);
2767 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 2769 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2768 /* 2770 /*
@@ -3209,8 +3211,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
3209 3211
3210 svm->int3_injected = 0; 3212 svm->int3_injected = 0;
3211 3213
3212 if (svm->vcpu.arch.hflags & HF_IRET_MASK) 3214 if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
3213 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); 3215 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3216 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3217 }
3214 3218
3215 svm->vcpu.arch.nmi_injected = false; 3219 svm->vcpu.arch.nmi_injected = false;
3216 kvm_clear_exception_queue(&svm->vcpu); 3220 kvm_clear_exception_queue(&svm->vcpu);
@@ -3219,6 +3223,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
3219 if (!(exitintinfo & SVM_EXITINTINFO_VALID)) 3223 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3220 return; 3224 return;
3221 3225
3226 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3227
3222 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; 3228 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3223 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; 3229 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3224 3230