aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@Intel.com>2013-01-24 21:18:51 -0500
committerGleb Natapov <gleb@redhat.com>2013-01-29 03:48:19 -0500
commitc7c9c56ca26f7b9458711b2d78b60b60e0d38ba7 (patch)
treefeab61b2f3a5587dd502a9d2bf4b27a8b2ebe507
parent8d14695f9542e9e0195d6e41ddaa52c32322adf5 (diff)
x86, apicv: add virtual interrupt delivery support
Virtual interrupt delivery avoids KVM to inject vAPIC interrupts manually, which is fully taken care of by the hardware. This needs some special awareness into existing interrupr injection path: - for pending interrupt, instead of direct injection, we may need update architecture specific indicators before resuming to guest. - A pending interrupt, which is masked by ISR, should be also considered in above update action, since hardware will decide when to inject it at right time. Current has_interrupt and get_interrupt only returns a valid vector from injection p.o.v. Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r--arch/ia64/kvm/lapic.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/include/asm/vmx.h11
-rw-r--r--arch/x86/kvm/irq.c56
-rw-r--r--arch/x86/kvm/lapic.c106
-rw-r--r--arch/x86/kvm/lapic.h27
-rw-r--r--arch/x86/kvm/svm.c18
-rw-r--r--arch/x86/kvm/vmx.c119
-rw-r--r--arch/x86/kvm/x86.c23
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--virt/kvm/ioapic.c39
-rw-r--r--virt/kvm/ioapic.h4
-rw-r--r--virt/kvm/irq_comm.c25
-rw-r--r--virt/kvm/kvm_main.c5
14 files changed, 407 insertions, 40 deletions
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index c5f92a926a9a..c3e2935b6db4 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -27,4 +27,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
27#define kvm_apic_present(x) (true) 27#define kvm_apic_present(x) (true)
28#define kvm_lapic_enabled(x) (true) 28#define kvm_lapic_enabled(x) (true)
29 29
30static inline bool kvm_apic_vid_enabled(void)
31{
32 /* IA64 has no apicv supporting, do nothing here */
33 return false;
34}
35
30#endif 36#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d42c2839be98..635a74d22409 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -699,6 +699,10 @@ struct kvm_x86_ops {
699 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 699 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
700 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 700 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
701 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 701 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
702 int (*vm_has_apicv)(struct kvm *kvm);
703 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
704 void (*hwapic_isr_update)(struct kvm *kvm, int isr);
705 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
702 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); 706 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
703 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 707 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
704 int (*get_tdp_level)(void); 708 int (*get_tdp_level)(void);
@@ -994,6 +998,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva);
994int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 998int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
995void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 999void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
996int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 1000int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
1001int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
997int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 1002int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
998int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 1003int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
999int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1004int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 0a54df0b36fc..694586ca6456 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -62,6 +62,7 @@
62#define EXIT_REASON_MCE_DURING_VMENTRY 41 62#define EXIT_REASON_MCE_DURING_VMENTRY 41
63#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 63#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
64#define EXIT_REASON_APIC_ACCESS 44 64#define EXIT_REASON_APIC_ACCESS 44
65#define EXIT_REASON_EOI_INDUCED 45
65#define EXIT_REASON_EPT_VIOLATION 48 66#define EXIT_REASON_EPT_VIOLATION 48
66#define EXIT_REASON_EPT_MISCONFIG 49 67#define EXIT_REASON_EPT_MISCONFIG 49
67#define EXIT_REASON_WBINVD 54 68#define EXIT_REASON_WBINVD 54
@@ -144,6 +145,7 @@
144#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 145#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
145#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 146#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
146#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 147#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
148#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
147#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 149#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
148#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 150#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
149 151
@@ -181,6 +183,7 @@ enum vmcs_field {
181 GUEST_GS_SELECTOR = 0x0000080a, 183 GUEST_GS_SELECTOR = 0x0000080a,
182 GUEST_LDTR_SELECTOR = 0x0000080c, 184 GUEST_LDTR_SELECTOR = 0x0000080c,
183 GUEST_TR_SELECTOR = 0x0000080e, 185 GUEST_TR_SELECTOR = 0x0000080e,
186 GUEST_INTR_STATUS = 0x00000810,
184 HOST_ES_SELECTOR = 0x00000c00, 187 HOST_ES_SELECTOR = 0x00000c00,
185 HOST_CS_SELECTOR = 0x00000c02, 188 HOST_CS_SELECTOR = 0x00000c02,
186 HOST_SS_SELECTOR = 0x00000c04, 189 HOST_SS_SELECTOR = 0x00000c04,
@@ -208,6 +211,14 @@ enum vmcs_field {
208 APIC_ACCESS_ADDR_HIGH = 0x00002015, 211 APIC_ACCESS_ADDR_HIGH = 0x00002015,
209 EPT_POINTER = 0x0000201a, 212 EPT_POINTER = 0x0000201a,
210 EPT_POINTER_HIGH = 0x0000201b, 213 EPT_POINTER_HIGH = 0x0000201b,
214 EOI_EXIT_BITMAP0 = 0x0000201c,
215 EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
216 EOI_EXIT_BITMAP1 = 0x0000201e,
217 EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
218 EOI_EXIT_BITMAP2 = 0x00002020,
219 EOI_EXIT_BITMAP2_HIGH = 0x00002021,
220 EOI_EXIT_BITMAP3 = 0x00002022,
221 EOI_EXIT_BITMAP3_HIGH = 0x00002023,
211 GUEST_PHYSICAL_ADDRESS = 0x00002400, 222 GUEST_PHYSICAL_ADDRESS = 0x00002400,
212 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, 223 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
213 VMCS_LINK_POINTER = 0x00002800, 224 VMCS_LINK_POINTER = 0x00002800,
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index b111aee815f8..484bc874688b 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -38,6 +38,38 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
38EXPORT_SYMBOL(kvm_cpu_has_pending_timer); 38EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
39 39
40/* 40/*
41 * check if there is pending interrupt from
42 * non-APIC source without intack.
43 */
44static int kvm_cpu_has_extint(struct kvm_vcpu *v)
45{
46 if (kvm_apic_accept_pic_intr(v))
47 return pic_irqchip(v->kvm)->output; /* PIC */
48 else
49 return 0;
50}
51
52/*
53 * check if there is injectable interrupt:
54 * when virtual interrupt delivery enabled,
55 * interrupt from apic will handled by hardware,
56 * we don't need to check it here.
57 */
58int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
59{
60 if (!irqchip_in_kernel(v->kvm))
61 return v->arch.interrupt.pending;
62
63 if (kvm_cpu_has_extint(v))
64 return 1;
65
66 if (kvm_apic_vid_enabled(v->kvm))
67 return 0;
68
69 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
70}
71
72/*
41 * check if there is pending interrupt without 73 * check if there is pending interrupt without
42 * intack. 74 * intack.
43 */ 75 */
@@ -46,27 +78,41 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
46 if (!irqchip_in_kernel(v->kvm)) 78 if (!irqchip_in_kernel(v->kvm))
47 return v->arch.interrupt.pending; 79 return v->arch.interrupt.pending;
48 80
49 if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output) 81 if (kvm_cpu_has_extint(v))
50 return pic_irqchip(v->kvm)->output; /* PIC */ 82 return 1;
51 83
52 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ 84 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
53} 85}
54EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); 86EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
55 87
56/* 88/*
89 * Read pending interrupt(from non-APIC source)
90 * vector and intack.
91 */
92static int kvm_cpu_get_extint(struct kvm_vcpu *v)
93{
94 if (kvm_cpu_has_extint(v))
95 return kvm_pic_read_irq(v->kvm); /* PIC */
96 return -1;
97}
98
99/*
57 * Read pending interrupt vector and intack. 100 * Read pending interrupt vector and intack.
58 */ 101 */
59int kvm_cpu_get_interrupt(struct kvm_vcpu *v) 102int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
60{ 103{
104 int vector;
105
61 if (!irqchip_in_kernel(v->kvm)) 106 if (!irqchip_in_kernel(v->kvm))
62 return v->arch.interrupt.nr; 107 return v->arch.interrupt.nr;
63 108
64 if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output) 109 vector = kvm_cpu_get_extint(v);
65 return kvm_pic_read_irq(v->kvm); /* PIC */ 110
111 if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
112 return vector; /* PIC */
66 113
67 return kvm_get_apic_interrupt(v); /* APIC */ 114 return kvm_get_apic_interrupt(v); /* APIC */
68} 115}
69EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
70 116
71void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) 117void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
72{ 118{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index f69fc5077a89..02b51dd4e4ad 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -145,21 +145,51 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
145 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; 145 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
146} 146}
147 147
148static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr) 148void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
149 struct kvm_lapic_irq *irq,
150 u64 *eoi_exit_bitmap)
149{ 151{
150 u16 cid; 152 struct kvm_lapic **dst;
151 ldr >>= 32 - map->ldr_bits; 153 struct kvm_apic_map *map;
152 cid = (ldr >> map->cid_shift) & map->cid_mask; 154 unsigned long bitmap = 1;
155 int i;
153 156
154 BUG_ON(cid >= ARRAY_SIZE(map->logical_map)); 157 rcu_read_lock();
158 map = rcu_dereference(vcpu->kvm->arch.apic_map);
155 159
156 return cid; 160 if (unlikely(!map)) {
157} 161 __set_bit(irq->vector, (unsigned long *)eoi_exit_bitmap);
162 goto out;
163 }
158 164
159static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) 165 if (irq->dest_mode == 0) { /* physical mode */
160{ 166 if (irq->delivery_mode == APIC_DM_LOWEST ||
161 ldr >>= (32 - map->ldr_bits); 167 irq->dest_id == 0xff) {
162 return ldr & map->lid_mask; 168 __set_bit(irq->vector,
169 (unsigned long *)eoi_exit_bitmap);
170 goto out;
171 }
172 dst = &map->phys_map[irq->dest_id & 0xff];
173 } else {
174 u32 mda = irq->dest_id << (32 - map->ldr_bits);
175
176 dst = map->logical_map[apic_cluster_id(map, mda)];
177
178 bitmap = apic_logical_id(map, mda);
179 }
180
181 for_each_set_bit(i, &bitmap, 16) {
182 if (!dst[i])
183 continue;
184 if (dst[i]->vcpu == vcpu) {
185 __set_bit(irq->vector,
186 (unsigned long *)eoi_exit_bitmap);
187 break;
188 }
189 }
190
191out:
192 rcu_read_unlock();
163} 193}
164 194
165static void recalculate_apic_map(struct kvm *kvm) 195static void recalculate_apic_map(struct kvm *kvm)
@@ -225,6 +255,8 @@ out:
225 255
226 if (old) 256 if (old)
227 kfree_rcu(old, rcu); 257 kfree_rcu(old, rcu);
258
259 kvm_ioapic_make_eoibitmap_request(kvm);
228} 260}
229 261
230static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) 262static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
@@ -340,6 +372,10 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
340{ 372{
341 int result; 373 int result;
342 374
375 /*
376 * Note that irr_pending is just a hint. It will be always
377 * true with virtual interrupt delivery enabled.
378 */
343 if (!apic->irr_pending) 379 if (!apic->irr_pending)
344 return -1; 380 return -1;
345 381
@@ -456,6 +492,8 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
456static inline int apic_find_highest_isr(struct kvm_lapic *apic) 492static inline int apic_find_highest_isr(struct kvm_lapic *apic)
457{ 493{
458 int result; 494 int result;
495
496 /* Note that isr_count is always 1 with vid enabled */
459 if (!apic->isr_count) 497 if (!apic->isr_count)
460 return -1; 498 return -1;
461 if (likely(apic->highest_isr_cache != -1)) 499 if (likely(apic->highest_isr_cache != -1))
@@ -735,6 +773,19 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
735 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; 773 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
736} 774}
737 775
776static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
777{
778 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
779 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
780 int trigger_mode;
781 if (apic_test_vector(vector, apic->regs + APIC_TMR))
782 trigger_mode = IOAPIC_LEVEL_TRIG;
783 else
784 trigger_mode = IOAPIC_EDGE_TRIG;
785 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
786 }
787}
788
738static int apic_set_eoi(struct kvm_lapic *apic) 789static int apic_set_eoi(struct kvm_lapic *apic)
739{ 790{
740 int vector = apic_find_highest_isr(apic); 791 int vector = apic_find_highest_isr(apic);
@@ -751,19 +802,26 @@ static int apic_set_eoi(struct kvm_lapic *apic)
751 apic_clear_isr(vector, apic); 802 apic_clear_isr(vector, apic);
752 apic_update_ppr(apic); 803 apic_update_ppr(apic);
753 804
754 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 805 kvm_ioapic_send_eoi(apic, vector);
755 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
756 int trigger_mode;
757 if (apic_test_vector(vector, apic->regs + APIC_TMR))
758 trigger_mode = IOAPIC_LEVEL_TRIG;
759 else
760 trigger_mode = IOAPIC_EDGE_TRIG;
761 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
762 }
763 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 806 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
764 return vector; 807 return vector;
765} 808}
766 809
810/*
811 * this interface assumes a trap-like exit, which has already finished
812 * desired side effect including vISR and vPPR update.
813 */
814void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
815{
816 struct kvm_lapic *apic = vcpu->arch.apic;
817
818 trace_kvm_eoi(apic, vector);
819
820 kvm_ioapic_send_eoi(apic, vector);
821 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
822}
823EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
824
767static void apic_send_ipi(struct kvm_lapic *apic) 825static void apic_send_ipi(struct kvm_lapic *apic)
768{ 826{
769 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); 827 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
@@ -1375,8 +1433,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1375 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 1433 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1376 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1434 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1377 } 1435 }
1378 apic->irr_pending = false; 1436 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
1379 apic->isr_count = 0; 1437 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
1380 apic->highest_isr_cache = -1; 1438 apic->highest_isr_cache = -1;
1381 update_divide_count(apic); 1439 update_divide_count(apic);
1382 atomic_set(&apic->lapic_timer.pending, 0); 1440 atomic_set(&apic->lapic_timer.pending, 0);
@@ -1591,8 +1649,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1591 update_divide_count(apic); 1649 update_divide_count(apic);
1592 start_apic_timer(apic); 1650 start_apic_timer(apic);
1593 apic->irr_pending = true; 1651 apic->irr_pending = true;
1594 apic->isr_count = count_vectors(apic->regs + APIC_ISR); 1652 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
1653 1 : count_vectors(apic->regs + APIC_ISR);
1595 apic->highest_isr_cache = -1; 1654 apic->highest_isr_cache = -1;
1655 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
1596 kvm_make_request(KVM_REQ_EVENT, vcpu); 1656 kvm_make_request(KVM_REQ_EVENT, vcpu);
1597} 1657}
1598 1658
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 22a5397b638c..1676d34ddb4e 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -65,6 +65,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
65void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); 65void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
66 66
67void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 67void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
68void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
68 69
69void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 70void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
70void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 71void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
@@ -131,4 +132,30 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
131 return apic->vcpu->arch.apic_base & X2APIC_ENABLE; 132 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
132} 133}
133 134
135static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
136{
137 return kvm_x86_ops->vm_has_apicv(kvm);
138}
139
140static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
141{
142 u16 cid;
143 ldr >>= 32 - map->ldr_bits;
144 cid = (ldr >> map->cid_shift) & map->cid_mask;
145
146 BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
147
148 return cid;
149}
150
151static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
152{
153 ldr >>= (32 - map->ldr_bits);
154 return ldr & map->lid_mask;
155}
156
157void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
158 struct kvm_lapic_irq *irq,
159 u64 *eoi_bitmap);
160
134#endif 161#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 38407e9fd1bd..e1b1ce21bc00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3576,6 +3576,21 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3576 return; 3576 return;
3577} 3577}
3578 3578
3579static int svm_vm_has_apicv(struct kvm *kvm)
3580{
3581 return 0;
3582}
3583
3584static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
3585{
3586 return;
3587}
3588
3589static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3590{
3591 return;
3592}
3593
3579static int svm_nmi_allowed(struct kvm_vcpu *vcpu) 3594static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3580{ 3595{
3581 struct vcpu_svm *svm = to_svm(vcpu); 3596 struct vcpu_svm *svm = to_svm(vcpu);
@@ -4296,6 +4311,9 @@ static struct kvm_x86_ops svm_x86_ops = {
4296 .enable_irq_window = enable_irq_window, 4311 .enable_irq_window = enable_irq_window,
4297 .update_cr8_intercept = update_cr8_intercept, 4312 .update_cr8_intercept = update_cr8_intercept,
4298 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, 4313 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4314 .vm_has_apicv = svm_vm_has_apicv,
4315 .load_eoi_exitmap = svm_load_eoi_exitmap,
4316 .hwapic_isr_update = svm_hwapic_isr_update,
4299 4317
4300 .set_tss_addr = svm_set_tss_addr, 4318 .set_tss_addr = svm_set_tss_addr,
4301 .get_tdp_level = get_npt_level, 4319 .get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3ce8a1629330..0cf74a641dec 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,8 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
84static bool __read_mostly fasteoi = 1; 84static bool __read_mostly fasteoi = 1;
85module_param(fasteoi, bool, S_IRUGO); 85module_param(fasteoi, bool, S_IRUGO);
86 86
87static bool __read_mostly enable_apicv_reg = 1; 87static bool __read_mostly enable_apicv_reg_vid = 1;
88module_param(enable_apicv_reg, bool, S_IRUGO); 88module_param(enable_apicv_reg_vid, bool, S_IRUGO);
89 89
90/* 90/*
91 * If nested=1, nested virtualization is supported, i.e., guests may use 91 * If nested=1, nested virtualization is supported, i.e., guests may use
@@ -781,6 +781,12 @@ static inline bool cpu_has_vmx_apic_register_virt(void)
781 SECONDARY_EXEC_APIC_REGISTER_VIRT; 781 SECONDARY_EXEC_APIC_REGISTER_VIRT;
782} 782}
783 783
784static inline bool cpu_has_vmx_virtual_intr_delivery(void)
785{
786 return vmcs_config.cpu_based_2nd_exec_ctrl &
787 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
788}
789
784static inline bool cpu_has_vmx_flexpriority(void) 790static inline bool cpu_has_vmx_flexpriority(void)
785{ 791{
786 return cpu_has_vmx_tpr_shadow() && 792 return cpu_has_vmx_tpr_shadow() &&
@@ -2571,7 +2577,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2571 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 2577 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2572 SECONDARY_EXEC_RDTSCP | 2578 SECONDARY_EXEC_RDTSCP |
2573 SECONDARY_EXEC_ENABLE_INVPCID | 2579 SECONDARY_EXEC_ENABLE_INVPCID |
2574 SECONDARY_EXEC_APIC_REGISTER_VIRT; 2580 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2581 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
2575 if (adjust_vmx_controls(min2, opt2, 2582 if (adjust_vmx_controls(min2, opt2,
2576 MSR_IA32_VMX_PROCBASED_CTLS2, 2583 MSR_IA32_VMX_PROCBASED_CTLS2,
2577 &_cpu_based_2nd_exec_control) < 0) 2584 &_cpu_based_2nd_exec_control) < 0)
@@ -2586,7 +2593,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2586 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2593 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2587 _cpu_based_2nd_exec_control &= ~( 2594 _cpu_based_2nd_exec_control &= ~(
2588 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2595 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2589 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 2596 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2597 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2590 2598
2591 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2599 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2592 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2600 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
@@ -2785,8 +2793,14 @@ static __init int hardware_setup(void)
2785 if (!cpu_has_vmx_ple()) 2793 if (!cpu_has_vmx_ple())
2786 ple_gap = 0; 2794 ple_gap = 0;
2787 2795
2788 if (!cpu_has_vmx_apic_register_virt()) 2796 if (!cpu_has_vmx_apic_register_virt() ||
2789 enable_apicv_reg = 0; 2797 !cpu_has_vmx_virtual_intr_delivery())
2798 enable_apicv_reg_vid = 0;
2799
2800 if (enable_apicv_reg_vid)
2801 kvm_x86_ops->update_cr8_intercept = NULL;
2802 else
2803 kvm_x86_ops->hwapic_irr_update = NULL;
2790 2804
2791 if (nested) 2805 if (nested)
2792 nested_vmx_setup_ctls_msrs(); 2806 nested_vmx_setup_ctls_msrs();
@@ -3928,6 +3942,11 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3928 return exec_control; 3942 return exec_control;
3929} 3943}
3930 3944
3945static int vmx_vm_has_apicv(struct kvm *kvm)
3946{
3947 return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
3948}
3949
3931static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) 3950static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3932{ 3951{
3933 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 3952 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
@@ -3945,8 +3964,9 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3945 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 3964 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
3946 if (!ple_gap) 3965 if (!ple_gap)
3947 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 3966 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
3948 if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm)) 3967 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
3949 exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT; 3968 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
3969 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
3950 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 3970 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
3951 return exec_control; 3971 return exec_control;
3952} 3972}
@@ -3992,6 +4012,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
3992 vmx_secondary_exec_control(vmx)); 4012 vmx_secondary_exec_control(vmx));
3993 } 4013 }
3994 4014
4015 if (enable_apicv_reg_vid) {
4016 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4017 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4018 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4019 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4020
4021 vmcs_write16(GUEST_INTR_STATUS, 0);
4022 }
4023
3995 if (ple_gap) { 4024 if (ple_gap) {
3996 vmcs_write32(PLE_GAP, ple_gap); 4025 vmcs_write32(PLE_GAP, ple_gap);
3997 vmcs_write32(PLE_WINDOW, ple_window); 4026 vmcs_write32(PLE_WINDOW, ple_window);
@@ -4906,6 +4935,16 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
4906 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 4935 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4907} 4936}
4908 4937
4938static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
4939{
4940 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4941 int vector = exit_qualification & 0xff;
4942
4943 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
4944 kvm_apic_set_eoi_accelerated(vcpu, vector);
4945 return 1;
4946}
4947
4909static int handle_apic_write(struct kvm_vcpu *vcpu) 4948static int handle_apic_write(struct kvm_vcpu *vcpu)
4910{ 4949{
4911 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4950 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -5851,6 +5890,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
5851 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5890 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
5852 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 5891 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
5853 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 5892 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
5893 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
5854 [EXIT_REASON_WBINVD] = handle_wbinvd, 5894 [EXIT_REASON_WBINVD] = handle_wbinvd,
5855 [EXIT_REASON_XSETBV] = handle_xsetbv, 5895 [EXIT_REASON_XSETBV] = handle_xsetbv,
5856 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 5896 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
@@ -6208,7 +6248,8 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
6208 * There is not point to enable virtualize x2apic without enable 6248 * There is not point to enable virtualize x2apic without enable
6209 * apicv 6249 * apicv
6210 */ 6250 */
6211 if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg) 6251 if (!cpu_has_vmx_virtualize_x2apic_mode() ||
6252 !vmx_vm_has_apicv(vcpu->kvm))
6212 return; 6253 return;
6213 6254
6214 if (!vm_need_tpr_shadow(vcpu->kvm)) 6255 if (!vm_need_tpr_shadow(vcpu->kvm))
@@ -6228,6 +6269,56 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
6228 vmx_set_msr_bitmap(vcpu); 6269 vmx_set_msr_bitmap(vcpu);
6229} 6270}
6230 6271
6272static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
6273{
6274 u16 status;
6275 u8 old;
6276
6277 if (!vmx_vm_has_apicv(kvm))
6278 return;
6279
6280 if (isr == -1)
6281 isr = 0;
6282
6283 status = vmcs_read16(GUEST_INTR_STATUS);
6284 old = status >> 8;
6285 if (isr != old) {
6286 status &= 0xff;
6287 status |= isr << 8;
6288 vmcs_write16(GUEST_INTR_STATUS, status);
6289 }
6290}
6291
6292static void vmx_set_rvi(int vector)
6293{
6294 u16 status;
6295 u8 old;
6296
6297 status = vmcs_read16(GUEST_INTR_STATUS);
6298 old = (u8)status & 0xff;
6299 if ((u8)vector != old) {
6300 status &= ~0xff;
6301 status |= (u8)vector;
6302 vmcs_write16(GUEST_INTR_STATUS, status);
6303 }
6304}
6305
6306static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6307{
6308 if (max_irr == -1)
6309 return;
6310
6311 vmx_set_rvi(max_irr);
6312}
6313
6314static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6315{
6316 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6317 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6318 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6319 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6320}
6321
6231static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) 6322static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
6232{ 6323{
6233 u32 exit_intr_info; 6324 u32 exit_intr_info;
@@ -7492,6 +7583,10 @@ static struct kvm_x86_ops vmx_x86_ops = {
7492 .enable_irq_window = enable_irq_window, 7583 .enable_irq_window = enable_irq_window,
7493 .update_cr8_intercept = update_cr8_intercept, 7584 .update_cr8_intercept = update_cr8_intercept,
7494 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, 7585 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
7586 .vm_has_apicv = vmx_vm_has_apicv,
7587 .load_eoi_exitmap = vmx_load_eoi_exitmap,
7588 .hwapic_irr_update = vmx_hwapic_irr_update,
7589 .hwapic_isr_update = vmx_hwapic_isr_update,
7495 7590
7496 .set_tss_addr = vmx_set_tss_addr, 7591 .set_tss_addr = vmx_set_tss_addr,
7497 .get_tdp_level = get_ept_level, 7592 .get_tdp_level = get_ept_level,
@@ -7594,7 +7689,7 @@ static int __init vmx_init(void)
7594 memcpy(vmx_msr_bitmap_longmode_x2apic, 7689 memcpy(vmx_msr_bitmap_longmode_x2apic,
7595 vmx_msr_bitmap_longmode, PAGE_SIZE); 7690 vmx_msr_bitmap_longmode, PAGE_SIZE);
7596 7691
7597 if (enable_apicv_reg) { 7692 if (enable_apicv_reg_vid) {
7598 for (msr = 0x800; msr <= 0x8ff; msr++) 7693 for (msr = 0x800; msr <= 0x8ff; msr++)
7599 vmx_disable_intercept_msr_read_x2apic(msr); 7694 vmx_disable_intercept_msr_read_x2apic(msr);
7600 7695
@@ -7606,6 +7701,10 @@ static int __init vmx_init(void)
7606 vmx_enable_intercept_msr_read_x2apic(0x839); 7701 vmx_enable_intercept_msr_read_x2apic(0x839);
7607 /* TPR */ 7702 /* TPR */
7608 vmx_disable_intercept_msr_write_x2apic(0x808); 7703 vmx_disable_intercept_msr_write_x2apic(0x808);
7704 /* EOI */
7705 vmx_disable_intercept_msr_write_x2apic(0x80b);
7706 /* SELF-IPI */
7707 vmx_disable_intercept_msr_write_x2apic(0x83f);
7609 } 7708 }
7610 7709
7611 if (enable_ept) { 7710 if (enable_ept) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b9f55299ed7e..cf512e70c797 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5565,7 +5565,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
5565 vcpu->arch.nmi_injected = true; 5565 vcpu->arch.nmi_injected = true;
5566 kvm_x86_ops->set_nmi(vcpu); 5566 kvm_x86_ops->set_nmi(vcpu);
5567 } 5567 }
5568 } else if (kvm_cpu_has_interrupt(vcpu)) { 5568 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
5569 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5569 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5570 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5570 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5571 false); 5571 false);
@@ -5633,6 +5633,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
5633#endif 5633#endif
5634} 5634}
5635 5635
5636static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
5637{
5638 u64 eoi_exit_bitmap[4];
5639
5640 memset(eoi_exit_bitmap, 0, 32);
5641
5642 kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
5643 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
5644}
5645
5636static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 5646static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5637{ 5647{
5638 int r; 5648 int r;
@@ -5686,6 +5696,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5686 kvm_handle_pmu_event(vcpu); 5696 kvm_handle_pmu_event(vcpu);
5687 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 5697 if (kvm_check_request(KVM_REQ_PMI, vcpu))
5688 kvm_deliver_pmi(vcpu); 5698 kvm_deliver_pmi(vcpu);
5699 if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
5700 update_eoi_exitmap(vcpu);
5689 } 5701 }
5690 5702
5691 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 5703 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -5694,10 +5706,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5694 /* enable NMI/IRQ window open exits if needed */ 5706 /* enable NMI/IRQ window open exits if needed */
5695 if (vcpu->arch.nmi_pending) 5707 if (vcpu->arch.nmi_pending)
5696 kvm_x86_ops->enable_nmi_window(vcpu); 5708 kvm_x86_ops->enable_nmi_window(vcpu);
5697 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) 5709 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
5698 kvm_x86_ops->enable_irq_window(vcpu); 5710 kvm_x86_ops->enable_irq_window(vcpu);
5699 5711
5700 if (kvm_lapic_enabled(vcpu)) { 5712 if (kvm_lapic_enabled(vcpu)) {
5713 /*
5714 * Update architecture specific hints for APIC
5715 * virtual interrupt delivery.
5716 */
5717 if (kvm_x86_ops->hwapic_irr_update)
5718 kvm_x86_ops->hwapic_irr_update(vcpu,
5719 kvm_lapic_find_highest_irr(vcpu));
5701 update_cr8_intercept(vcpu); 5720 update_cr8_intercept(vcpu);
5702 kvm_lapic_sync_to_vapic(vcpu); 5721 kvm_lapic_sync_to_vapic(vcpu);
5703 } 5722 }
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4dd7d7531e69..0350e0d5e031 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -123,6 +123,7 @@ static inline bool is_error_page(struct page *page)
123#define KVM_REQ_MASTERCLOCK_UPDATE 19 123#define KVM_REQ_MASTERCLOCK_UPDATE 19
124#define KVM_REQ_MCLOCK_INPROGRESS 20 124#define KVM_REQ_MCLOCK_INPROGRESS 20
125#define KVM_REQ_EPR_EXIT 21 125#define KVM_REQ_EPR_EXIT 21
126#define KVM_REQ_EOIBITMAP 22
126 127
127#define KVM_USERSPACE_IRQ_SOURCE_ID 0 128#define KVM_USERSPACE_IRQ_SOURCE_ID 0
128#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 129#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -538,6 +539,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
538void kvm_flush_remote_tlbs(struct kvm *kvm); 539void kvm_flush_remote_tlbs(struct kvm *kvm);
539void kvm_reload_remote_mmus(struct kvm *kvm); 540void kvm_reload_remote_mmus(struct kvm *kvm);
540void kvm_make_mclock_inprogress_request(struct kvm *kvm); 541void kvm_make_mclock_inprogress_request(struct kvm *kvm);
542void kvm_make_update_eoibitmap_request(struct kvm *kvm);
541 543
542long kvm_arch_dev_ioctl(struct file *filp, 544long kvm_arch_dev_ioctl(struct file *filp,
543 unsigned int ioctl, unsigned long arg); 545 unsigned int ioctl, unsigned long arg);
@@ -691,6 +693,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
691int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); 693int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
692int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 694int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
693 int irq_source_id, int level); 695 int irq_source_id, int level);
696bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
694void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 697void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
695void kvm_register_irq_ack_notifier(struct kvm *kvm, 698void kvm_register_irq_ack_notifier(struct kvm *kvm,
696 struct kvm_irq_ack_notifier *kian); 699 struct kvm_irq_ack_notifier *kian);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index f3abbef46c42..ce82b9401958 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -35,6 +35,7 @@
35#include <linux/hrtimer.h> 35#include <linux/hrtimer.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/export.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/page.h> 40#include <asm/page.h>
40#include <asm/current.h> 41#include <asm/current.h>
@@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
115 smp_wmb(); 116 smp_wmb();
116} 117}
117 118
119void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
120 u64 *eoi_exit_bitmap)
121{
122 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
123 union kvm_ioapic_redirect_entry *e;
124 struct kvm_lapic_irq irqe;
125 int index;
126
127 spin_lock(&ioapic->lock);
128 /* traverse ioapic entry to set eoi exit bitmap*/
129 for (index = 0; index < IOAPIC_NUM_PINS; index++) {
130 e = &ioapic->redirtbl[index];
131 if (!e->fields.mask &&
132 (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
133 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
134 index))) {
135 irqe.dest_id = e->fields.dest_id;
136 irqe.vector = e->fields.vector;
137 irqe.dest_mode = e->fields.dest_mode;
138 irqe.delivery_mode = e->fields.delivery_mode << 8;
139 kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap);
140 }
141 }
142 spin_unlock(&ioapic->lock);
143}
144EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap);
145
146void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm)
147{
148 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
149
150 if (!kvm_apic_vid_enabled(kvm) || !ioapic)
151 return;
152 kvm_make_update_eoibitmap_request(kvm);
153}
154
118static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 155static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
119{ 156{
120 unsigned index; 157 unsigned index;
@@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
156 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG 193 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
157 && ioapic->irr & (1 << index)) 194 && ioapic->irr & (1 << index))
158 ioapic_service(ioapic, index); 195 ioapic_service(ioapic, index);
196 kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
159 break; 197 break;
160 } 198 }
161} 199}
@@ -455,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
455 spin_lock(&ioapic->lock); 493 spin_lock(&ioapic->lock);
456 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); 494 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
457 update_handled_vectors(ioapic); 495 update_handled_vectors(ioapic);
496 kvm_ioapic_make_eoibitmap_request(kvm);
458 spin_unlock(&ioapic->lock); 497 spin_unlock(&ioapic->lock);
459 return 0; 498 return 0;
460} 499}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index a30abfe6ed16..0400a466c50c 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
82 struct kvm_lapic_irq *irq); 82 struct kvm_lapic_irq *irq);
83int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); 83int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
84int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); 84int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
85void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm);
86void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
87 u64 *eoi_exit_bitmap);
88
85 89
86#endif 90#endif
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 656fa455e154..ff6d40e2c06d 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/export.h>
25#include <trace/events/kvm.h> 26#include <trace/events/kvm.h>
26 27
27#include <asm/msidef.h> 28#include <asm/msidef.h>
@@ -237,6 +238,28 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
237 return ret; 238 return ret;
238} 239}
239 240
241bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
242{
243 struct kvm_irq_ack_notifier *kian;
244 struct hlist_node *n;
245 int gsi;
246
247 rcu_read_lock();
248 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
249 if (gsi != -1)
250 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
251 link)
252 if (kian->gsi == gsi) {
253 rcu_read_unlock();
254 return true;
255 }
256
257 rcu_read_unlock();
258
259 return false;
260}
261EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
262
240void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) 263void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
241{ 264{
242 struct kvm_irq_ack_notifier *kian; 265 struct kvm_irq_ack_notifier *kian;
@@ -261,6 +284,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
261 mutex_lock(&kvm->irq_lock); 284 mutex_lock(&kvm->irq_lock);
262 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); 285 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
263 mutex_unlock(&kvm->irq_lock); 286 mutex_unlock(&kvm->irq_lock);
287 kvm_ioapic_make_eoibitmap_request(kvm);
264} 288}
265 289
266void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 290void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -270,6 +294,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
270 hlist_del_init_rcu(&kian->link); 294 hlist_del_init_rcu(&kian->link);
271 mutex_unlock(&kvm->irq_lock); 295 mutex_unlock(&kvm->irq_lock);
272 synchronize_rcu(); 296 synchronize_rcu();
297 kvm_ioapic_make_eoibitmap_request(kvm);
273} 298}
274 299
275int kvm_request_irq_source_id(struct kvm *kvm) 300int kvm_request_irq_source_id(struct kvm *kvm)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3fec2cdd951b..abc23e27173d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
217 make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 217 make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
218} 218}
219 219
220void kvm_make_update_eoibitmap_request(struct kvm *kvm)
221{
222 make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
223}
224
220int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 225int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
221{ 226{
222 struct page *page; 227 struct page *page;