aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@Intel.com>2013-04-11 07:25:15 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2013-04-16 15:32:40 -0400
commita20ed54d6e470bf0d28921b7aadb6ca0da0ff0c3 (patch)
tree14b3e8c9b4e686ec321967f5b517571e417b83a1 /arch/x86/kvm
parentcf9e65b773394c5ad8fa7455c43268bc8ec2109f (diff)
KVM: VMX: Add the deliver posted interrupt algorithm
Only deliver the posted interrupt when target vcpu is running and there is no previous interrupt pending in pir. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/lapic.c13
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c64
4 files changed, 83 insertions, 1 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d197579435d0..dbf74c922aa1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -318,6 +318,19 @@ static u8 count_vectors(void *bitmap)
318 return count; 318 return count;
319} 319}
320 320
321void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
322{
323 u32 i, pir_val;
324 struct kvm_lapic *apic = vcpu->arch.apic;
325
326 for (i = 0; i <= 7; i++) {
327 pir_val = xchg(&pir[i], 0);
328 if (pir_val)
329 *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
330 }
331}
332EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
333
321static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) 334static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
322{ 335{
323 apic->irr_pending = true; 336 apic->irr_pending = true;
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 7fe0c9180ea1..c730ac9fe801 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -54,6 +54,7 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
54void kvm_apic_set_version(struct kvm_vcpu *vcpu); 54void kvm_apic_set_version(struct kvm_vcpu *vcpu);
55 55
56void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr); 56void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
57void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
57int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); 58int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
58int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); 59int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
59int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, 60int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2f8fe3f06837..d6713e18bbc1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3577,6 +3577,11 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3577 return; 3577 return;
3578} 3578}
3579 3579
3580static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3581{
3582 return;
3583}
3584
3580static int svm_nmi_allowed(struct kvm_vcpu *vcpu) 3585static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3581{ 3586{
3582 struct vcpu_svm *svm = to_svm(vcpu); 3587 struct vcpu_svm *svm = to_svm(vcpu);
@@ -4305,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4305 .vm_has_apicv = svm_vm_has_apicv, 4310 .vm_has_apicv = svm_vm_has_apicv,
4306 .load_eoi_exitmap = svm_load_eoi_exitmap, 4311 .load_eoi_exitmap = svm_load_eoi_exitmap,
4307 .hwapic_isr_update = svm_hwapic_isr_update, 4312 .hwapic_isr_update = svm_hwapic_isr_update,
4313 .sync_pir_to_irr = svm_sync_pir_to_irr,
4308 4314
4309 .set_tss_addr = svm_set_tss_addr, 4315 .set_tss_addr = svm_set_tss_addr,
4310 .get_tdp_level = get_npt_level, 4316 .get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8535519d0352..3a14d8a0ee46 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -375,6 +375,23 @@ struct pi_desc {
375 u32 rsvd[7]; 375 u32 rsvd[7];
376} __aligned(64); 376} __aligned(64);
377 377
378static bool pi_test_and_set_on(struct pi_desc *pi_desc)
379{
380 return test_and_set_bit(POSTED_INTR_ON,
381 (unsigned long *)&pi_desc->control);
382}
383
384static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
385{
386 return test_and_clear_bit(POSTED_INTR_ON,
387 (unsigned long *)&pi_desc->control);
388}
389
390static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
391{
392 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
393}
394
378struct vcpu_vmx { 395struct vcpu_vmx {
379 struct kvm_vcpu vcpu; 396 struct kvm_vcpu vcpu;
380 unsigned long host_rsp; 397 unsigned long host_rsp;
@@ -639,6 +656,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
639 struct kvm_segment *var, int seg); 656 struct kvm_segment *var, int seg);
640static bool guest_state_valid(struct kvm_vcpu *vcpu); 657static bool guest_state_valid(struct kvm_vcpu *vcpu);
641static u32 vmx_segment_access_rights(struct kvm_segment *var); 658static u32 vmx_segment_access_rights(struct kvm_segment *var);
659static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
642 660
643static DEFINE_PER_CPU(struct vmcs *, vmxarea); 661static DEFINE_PER_CPU(struct vmcs *, vmxarea);
644static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 662static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -2846,8 +2864,11 @@ static __init int hardware_setup(void)
2846 2864
2847 if (enable_apicv) 2865 if (enable_apicv)
2848 kvm_x86_ops->update_cr8_intercept = NULL; 2866 kvm_x86_ops->update_cr8_intercept = NULL;
2849 else 2867 else {
2850 kvm_x86_ops->hwapic_irr_update = NULL; 2868 kvm_x86_ops->hwapic_irr_update = NULL;
2869 kvm_x86_ops->deliver_posted_interrupt = NULL;
2870 kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
2871 }
2851 2872
2852 if (nested) 2873 if (nested)
2853 nested_vmx_setup_ctls_msrs(); 2874 nested_vmx_setup_ctls_msrs();
@@ -3909,6 +3930,45 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
3909} 3930}
3910 3931
3911/* 3932/*
3933 * Send interrupt to vcpu via posted interrupt way.
3934 * 1. If target vcpu is running(non-root mode), send posted interrupt
3935 * notification to vcpu and hardware will sync PIR to vIRR atomically.
3936 * 2. If target vcpu isn't running(root mode), kick it to pick up the
3937 * interrupt from PIR in next vmentry.
3938 */
3939static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
3940{
3941 struct vcpu_vmx *vmx = to_vmx(vcpu);
3942 int r;
3943
3944 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
3945 return;
3946
3947 r = pi_test_and_set_on(&vmx->pi_desc);
3948 kvm_make_request(KVM_REQ_EVENT, vcpu);
3949 if (!r && (vcpu->mode == IN_GUEST_MODE))
3950 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
3951 POSTED_INTR_VECTOR);
3952 else
3953 kvm_vcpu_kick(vcpu);
3954}
3955
3956static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3957{
3958 struct vcpu_vmx *vmx = to_vmx(vcpu);
3959
3960 if (!pi_test_and_clear_on(&vmx->pi_desc))
3961 return;
3962
3963 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
3964}
3965
3966static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
3967{
3968 return;
3969}
3970
3971/*
3912 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 3972 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
3913 * will not change in the lifetime of the guest. 3973 * will not change in the lifetime of the guest.
3914 * Note that host-state that does change is set elsewhere. E.g., host-state 3974 * Note that host-state that does change is set elsewhere. E.g., host-state
@@ -7784,6 +7844,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
7784 .load_eoi_exitmap = vmx_load_eoi_exitmap, 7844 .load_eoi_exitmap = vmx_load_eoi_exitmap,
7785 .hwapic_irr_update = vmx_hwapic_irr_update, 7845 .hwapic_irr_update = vmx_hwapic_irr_update,
7786 .hwapic_isr_update = vmx_hwapic_isr_update, 7846 .hwapic_isr_update = vmx_hwapic_isr_update,
7847 .sync_pir_to_irr = vmx_sync_pir_to_irr,
7848 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
7787 7849
7788 .set_tss_addr = vmx_set_tss_addr, 7850 .set_tss_addr = vmx_set_tss_addr,
7789 .get_tdp_level = get_ept_level, 7851 .get_tdp_level = get_ept_level,