aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/irq.c26
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/kvm/x86.h6
5 files changed, 33 insertions, 11 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 74b5b3e518df..949c977bc4c9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -574,7 +574,7 @@ struct kvm_vcpu_arch {
574 } exception; 574 } exception;
575 575
576 struct kvm_queued_interrupt { 576 struct kvm_queued_interrupt {
577 bool pending; 577 bool injected;
578 bool soft; 578 bool soft;
579 u8 nr; 579 u8 nr;
580 } interrupt; 580 } interrupt;
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index f171051eecf3..faa264822cee 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -73,8 +73,19 @@ static int kvm_cpu_has_extint(struct kvm_vcpu *v)
73 */ 73 */
74int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) 74int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
75{ 75{
76 /*
77 * FIXME: interrupt.injected represents an interrupt that it's
78 * side-effects have already been applied (e.g. bit from IRR
79 * already moved to ISR). Therefore, it is incorrect to rely
80 * on interrupt.injected to know if there is a pending
81 * interrupt in the user-mode LAPIC.
82 * This leads to nVMX/nSVM not be able to distinguish
83 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
84 * pending interrupt or should re-inject an injected
85 * interrupt.
86 */
76 if (!lapic_in_kernel(v)) 87 if (!lapic_in_kernel(v))
77 return v->arch.interrupt.pending; 88 return v->arch.interrupt.injected;
78 89
79 if (kvm_cpu_has_extint(v)) 90 if (kvm_cpu_has_extint(v))
80 return 1; 91 return 1;
@@ -91,8 +102,19 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
91 */ 102 */
92int kvm_cpu_has_interrupt(struct kvm_vcpu *v) 103int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
93{ 104{
105 /*
106 * FIXME: interrupt.injected represents an interrupt that it's
107 * side-effects have already been applied (e.g. bit from IRR
108 * already moved to ISR). Therefore, it is incorrect to rely
109 * on interrupt.injected to know if there is a pending
110 * interrupt in the user-mode LAPIC.
111 * This leads to nVMX/nSVM not be able to distinguish
112 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
113 * pending interrupt or should re-inject an injected
114 * interrupt.
115 */
94 if (!lapic_in_kernel(v)) 116 if (!lapic_in_kernel(v))
95 return v->arch.interrupt.pending; 117 return v->arch.interrupt.injected;
96 118
97 if (kvm_cpu_has_extint(v)) 119 if (kvm_cpu_has_extint(v))
98 return 1; 120 return 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a0492cdd4891..7d2537ab262b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11629,7 +11629,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
11629 } else if (vcpu->arch.nmi_injected) { 11629 } else if (vcpu->arch.nmi_injected) {
11630 vmcs12->idt_vectoring_info_field = 11630 vmcs12->idt_vectoring_info_field =
11631 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 11631 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
11632 } else if (vcpu->arch.interrupt.pending) { 11632 } else if (vcpu->arch.interrupt.injected) {
11633 nr = vcpu->arch.interrupt.nr; 11633 nr = vcpu->arch.interrupt.nr;
11634 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 11634 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
11635 11635
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dfa135bb0e5a..d15dc8cd6b9d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3312,7 +3312,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3312 events->exception.error_code = vcpu->arch.exception.error_code; 3312 events->exception.error_code = vcpu->arch.exception.error_code;
3313 3313
3314 events->interrupt.injected = 3314 events->interrupt.injected =
3315 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; 3315 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
3316 events->interrupt.nr = vcpu->arch.interrupt.nr; 3316 events->interrupt.nr = vcpu->arch.interrupt.nr;
3317 events->interrupt.soft = 0; 3317 events->interrupt.soft = 0;
3318 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); 3318 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
@@ -3365,7 +3365,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3365 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 3365 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3366 vcpu->arch.exception.error_code = events->exception.error_code; 3366 vcpu->arch.exception.error_code = events->exception.error_code;
3367 3367
3368 vcpu->arch.interrupt.pending = events->interrupt.injected; 3368 vcpu->arch.interrupt.injected = events->interrupt.injected;
3369 vcpu->arch.interrupt.nr = events->interrupt.nr; 3369 vcpu->arch.interrupt.nr = events->interrupt.nr;
3370 vcpu->arch.interrupt.soft = events->interrupt.soft; 3370 vcpu->arch.interrupt.soft = events->interrupt.soft;
3371 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 3371 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
@@ -6767,7 +6767,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6767 return 0; 6767 return 0;
6768 } 6768 }
6769 6769
6770 if (vcpu->arch.interrupt.pending) { 6770 if (vcpu->arch.interrupt.injected) {
6771 kvm_x86_ops->set_irq(vcpu); 6771 kvm_x86_ops->set_irq(vcpu);
6772 return 0; 6772 return 0;
6773 } 6773 }
@@ -7818,7 +7818,7 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7818 7818
7819 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 7819 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
7820 7820
7821 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) 7821 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
7822 set_bit(vcpu->arch.interrupt.nr, 7822 set_bit(vcpu->arch.interrupt.nr,
7823 (unsigned long *)sregs->interrupt_bitmap); 7823 (unsigned long *)sregs->interrupt_bitmap);
7824} 7824}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 42350391b62f..1e8617414ee4 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -55,19 +55,19 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
55static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 55static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
56 bool soft) 56 bool soft)
57{ 57{
58 vcpu->arch.interrupt.pending = true; 58 vcpu->arch.interrupt.injected = true;
59 vcpu->arch.interrupt.soft = soft; 59 vcpu->arch.interrupt.soft = soft;
60 vcpu->arch.interrupt.nr = vector; 60 vcpu->arch.interrupt.nr = vector;
61} 61}
62 62
63static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 63static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
64{ 64{
65 vcpu->arch.interrupt.pending = false; 65 vcpu->arch.interrupt.injected = false;
66} 66}
67 67
68static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 68static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
69{ 69{
70 return vcpu->arch.exception.injected || vcpu->arch.interrupt.pending || 70 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
71 vcpu->arch.nmi_injected; 71 vcpu->arch.nmi_injected;
72} 72}
73 73