aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2013-03-13 07:42:34 -0400
committerGleb Natapov <gleb@redhat.com>2013-03-13 10:08:10 -0400
commit66450a21f99636af4fafac2afd33f1a40631bc3a (patch)
tree81a71a5ad44edcb7317567b2a922e9a861bb2bb8 /arch/x86
parent5d218814328da91a27e982748443e7e375e11396 (diff)
KVM: x86: Rework INIT and SIPI handling
A VCPU sending INIT or SIPI to some other VCPU races for setting the remote VCPU's mp_state. When we were unlucky, KVM_MP_STATE_INIT_RECEIVED was overwritten by kvm_emulate_halt and, thus, got lost. This introduces APIC events for those two signals, keeping them in kvm_apic until kvm_apic_accept_events is run over the target vcpu context. kvm_apic_has_events reports to kvm_arch_vcpu_runnable if there are pending events, thus if vcpu blocking should end. The patch comes with the side effect of effectively obsoleting KVM_MP_STATE_SIPI_RECEIVED. We still accept it from user space, but immediately translate it to KVM_MP_STATE_INIT_RECEIVED + KVM_APIC_SIPI. The vcpu itself will no longer enter the KVM_MP_STATE_SIPI_RECEIVED state. That also means we no longer exit to user space after receiving a SIPI event. Furthermore, we already reset the VCPU on INIT, only fixing up the code segment later on when SIPI arrives. Moreover, we fix INIT handling for the BSP: it never enter wait-for-SIPI but directly starts over on INIT. Tested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/lapic.c48
-rw-r--r--arch/x86/kvm/lapic.h11
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c58
6 files changed, 93 insertions, 45 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 348d85965ead..ef7f4a5cf8c7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -345,7 +345,6 @@ struct kvm_vcpu_arch {
345 unsigned long apic_attention; 345 unsigned long apic_attention;
346 int32_t apic_arb_prio; 346 int32_t apic_arb_prio;
347 int mp_state; 347 int mp_state;
348 int sipi_vector;
349 u64 ia32_misc_enable_msr; 348 u64 ia32_misc_enable_msr;
350 bool tpr_access_reporting; 349 bool tpr_access_reporting;
351 350
@@ -819,6 +818,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
819 818
820void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 819void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
821int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 820int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
821void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector);
822 822
823int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 823int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
824 int reason, bool has_error_code, u32 error_code); 824 int reason, bool has_error_code, u32 error_code);
@@ -1002,6 +1002,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1002int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 1002int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1003int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 1003int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1004int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1004int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1005void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
1005 1006
1006void kvm_define_shared_msr(unsigned index, u32 msr); 1007void kvm_define_shared_msr(unsigned index, u32 msr);
1007void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1008void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd4e4ad..a8e9369f41c5 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -731,7 +731,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
731 case APIC_DM_INIT: 731 case APIC_DM_INIT:
732 if (!trig_mode || level) { 732 if (!trig_mode || level) {
733 result = 1; 733 result = 1;
734 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 734 /* assumes that there are only KVM_APIC_INIT/SIPI */
735 apic->pending_events = (1UL << KVM_APIC_INIT);
736 /* make sure pending_events is visible before sending
737 * the request */
738 smp_wmb();
735 kvm_make_request(KVM_REQ_EVENT, vcpu); 739 kvm_make_request(KVM_REQ_EVENT, vcpu);
736 kvm_vcpu_kick(vcpu); 740 kvm_vcpu_kick(vcpu);
737 } else { 741 } else {
@@ -743,13 +747,13 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
743 case APIC_DM_STARTUP: 747 case APIC_DM_STARTUP:
744 apic_debug("SIPI to vcpu %d vector 0x%02x\n", 748 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
745 vcpu->vcpu_id, vector); 749 vcpu->vcpu_id, vector);
746 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 750 result = 1;
747 result = 1; 751 apic->sipi_vector = vector;
748 vcpu->arch.sipi_vector = vector; 752 /* make sure sipi_vector is visible for the receiver */
749 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; 753 smp_wmb();
750 kvm_make_request(KVM_REQ_EVENT, vcpu); 754 set_bit(KVM_APIC_SIPI, &apic->pending_events);
751 kvm_vcpu_kick(vcpu); 755 kvm_make_request(KVM_REQ_EVENT, vcpu);
752 } 756 kvm_vcpu_kick(vcpu);
753 break; 757 break;
754 758
755 case APIC_DM_EXTINT: 759 case APIC_DM_EXTINT:
@@ -1860,6 +1864,34 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1860 addr); 1864 addr);
1861} 1865}
1862 1866
1867void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
1868{
1869 struct kvm_lapic *apic = vcpu->arch.apic;
1870 unsigned int sipi_vector;
1871
1872 if (!kvm_vcpu_has_lapic(vcpu))
1873 return;
1874
1875 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
1876 kvm_lapic_reset(vcpu);
1877 kvm_vcpu_reset(vcpu);
1878 if (kvm_vcpu_is_bsp(apic->vcpu))
1879 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1880 else
1881 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
1882 }
1883 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) &&
1884 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
1885 /* evaluate pending_events before reading the vector */
1886 smp_rmb();
1887 sipi_vector = apic->sipi_vector;
1888 pr_debug("vcpu %d received sipi with vector # %x\n",
1889 vcpu->vcpu_id, sipi_vector);
1890 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
1891 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1892 }
1893}
1894
1863void kvm_lapic_init(void) 1895void kvm_lapic_init(void)
1864{ 1896{
1865 /* do not patch jump label more than once per second */ 1897 /* do not patch jump label more than once per second */
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 1676d34ddb4e..2c721b986eec 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -5,6 +5,9 @@
5 5
6#include <linux/kvm_host.h> 6#include <linux/kvm_host.h>
7 7
8#define KVM_APIC_INIT 0
9#define KVM_APIC_SIPI 1
10
8struct kvm_timer { 11struct kvm_timer {
9 struct hrtimer timer; 12 struct hrtimer timer;
10 s64 period; /* unit: ns */ 13 s64 period; /* unit: ns */
@@ -32,6 +35,8 @@ struct kvm_lapic {
32 void *regs; 35 void *regs;
33 gpa_t vapic_addr; 36 gpa_t vapic_addr;
34 struct page *vapic_page; 37 struct page *vapic_page;
38 unsigned long pending_events;
39 unsigned int sipi_vector;
35}; 40};
36int kvm_create_lapic(struct kvm_vcpu *vcpu); 41int kvm_create_lapic(struct kvm_vcpu *vcpu);
37void kvm_free_lapic(struct kvm_vcpu *vcpu); 42void kvm_free_lapic(struct kvm_vcpu *vcpu);
@@ -39,6 +44,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
39int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); 44int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
40int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); 45int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
41int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); 46int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
47void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
42void kvm_lapic_reset(struct kvm_vcpu *vcpu); 48void kvm_lapic_reset(struct kvm_vcpu *vcpu);
43u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); 49u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
44void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); 50void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
@@ -158,4 +164,9 @@ void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
158 struct kvm_lapic_irq *irq, 164 struct kvm_lapic_irq *irq,
159 u64 *eoi_bitmap); 165 u64 *eoi_bitmap);
160 166
167static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
168{
169 return vcpu->arch.apic->pending_events;
170}
171
161#endif 172#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 907e4280116d..7219a4012a0e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1199,12 +1199,6 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
1199 1199
1200 init_vmcb(svm); 1200 init_vmcb(svm);
1201 1201
1202 if (!kvm_vcpu_is_bsp(vcpu)) {
1203 kvm_rip_write(vcpu, 0);
1204 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1205 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
1206 }
1207
1208 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); 1202 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1209 kvm_register_write(vcpu, VCPU_REGS_RDX, eax); 1203 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1210} 1204}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f588171be177..af1ffaf20892 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4119,12 +4119,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4119 vmx_segment_cache_clear(vmx); 4119 vmx_segment_cache_clear(vmx);
4120 4120
4121 seg_setup(VCPU_SREG_CS); 4121 seg_setup(VCPU_SREG_CS);
4122 if (kvm_vcpu_is_bsp(&vmx->vcpu)) 4122 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4123 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4124 else {
4125 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
4126 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
4127 }
4128 4123
4129 seg_setup(VCPU_SREG_DS); 4124 seg_setup(VCPU_SREG_DS);
4130 seg_setup(VCPU_SREG_ES); 4125 seg_setup(VCPU_SREG_ES);
@@ -4147,10 +4142,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4147 vmcs_writel(GUEST_SYSENTER_EIP, 0); 4142 vmcs_writel(GUEST_SYSENTER_EIP, 0);
4148 4143
4149 vmcs_writel(GUEST_RFLAGS, 0x02); 4144 vmcs_writel(GUEST_RFLAGS, 0x02);
4150 if (kvm_vcpu_is_bsp(&vmx->vcpu)) 4145 kvm_rip_write(vcpu, 0xfff0);
4151 kvm_rip_write(vcpu, 0xfff0);
4152 else
4153 kvm_rip_write(vcpu, 0);
4154 4146
4155 vmcs_writel(GUEST_GDTR_BASE, 0); 4147 vmcs_writel(GUEST_GDTR_BASE, 0);
4156 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 4148 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fadd5a750476..61a5bb60af86 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -162,8 +162,6 @@ u64 __read_mostly host_xcr0;
162 162
163static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 163static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
164 164
165static void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
166
167static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 165static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
168{ 166{
169 int i; 167 int i;
@@ -2830,10 +2828,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2830 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2828 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2831 events->nmi.pad = 0; 2829 events->nmi.pad = 0;
2832 2830
2833 events->sipi_vector = vcpu->arch.sipi_vector; 2831 events->sipi_vector = 0; /* never valid when reporting to user space */
2834 2832
2835 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2833 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2836 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2837 | KVM_VCPUEVENT_VALID_SHADOW); 2834 | KVM_VCPUEVENT_VALID_SHADOW);
2838 memset(&events->reserved, 0, sizeof(events->reserved)); 2835 memset(&events->reserved, 0, sizeof(events->reserved));
2839} 2836}
@@ -2864,8 +2861,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2864 vcpu->arch.nmi_pending = events->nmi.pending; 2861 vcpu->arch.nmi_pending = events->nmi.pending;
2865 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 2862 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2866 2863
2867 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) 2864 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
2868 vcpu->arch.sipi_vector = events->sipi_vector; 2865 kvm_vcpu_has_lapic(vcpu))
2866 vcpu->arch.apic->sipi_vector = events->sipi_vector;
2869 2867
2870 kvm_make_request(KVM_REQ_EVENT, vcpu); 2868 kvm_make_request(KVM_REQ_EVENT, vcpu);
2871 2869
@@ -5720,6 +5718,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5720 } 5718 }
5721 5719
5722 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 5720 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5721 kvm_apic_accept_events(vcpu);
5722 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
5723 r = 1;
5724 goto out;
5725 }
5726
5723 inject_pending_event(vcpu); 5727 inject_pending_event(vcpu);
5724 5728
5725 /* enable NMI/IRQ window open exits if needed */ 5729 /* enable NMI/IRQ window open exits if needed */
@@ -5854,14 +5858,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5854 int r; 5858 int r;
5855 struct kvm *kvm = vcpu->kvm; 5859 struct kvm *kvm = vcpu->kvm;
5856 5860
5857 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5858 pr_debug("vcpu %d received sipi with vector # %x\n",
5859 vcpu->vcpu_id, vcpu->arch.sipi_vector);
5860 kvm_lapic_reset(vcpu);
5861 kvm_vcpu_reset(vcpu);
5862 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5863 }
5864
5865 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5861 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5866 r = vapic_enter(vcpu); 5862 r = vapic_enter(vcpu);
5867 if (r) { 5863 if (r) {
@@ -5878,8 +5874,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5878 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 5874 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5879 kvm_vcpu_block(vcpu); 5875 kvm_vcpu_block(vcpu);
5880 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5876 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5881 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) 5877 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
5882 { 5878 kvm_apic_accept_events(vcpu);
5883 switch(vcpu->arch.mp_state) { 5879 switch(vcpu->arch.mp_state) {
5884 case KVM_MP_STATE_HALTED: 5880 case KVM_MP_STATE_HALTED:
5885 vcpu->arch.mp_state = 5881 vcpu->arch.mp_state =
@@ -5887,7 +5883,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5887 case KVM_MP_STATE_RUNNABLE: 5883 case KVM_MP_STATE_RUNNABLE:
5888 vcpu->arch.apf.halted = false; 5884 vcpu->arch.apf.halted = false;
5889 break; 5885 break;
5890 case KVM_MP_STATE_SIPI_RECEIVED: 5886 case KVM_MP_STATE_INIT_RECEIVED:
5887 break;
5891 default: 5888 default:
5892 r = -EINTR; 5889 r = -EINTR;
5893 break; 5890 break;
@@ -6022,6 +6019,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6022 6019
6023 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 6020 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
6024 kvm_vcpu_block(vcpu); 6021 kvm_vcpu_block(vcpu);
6022 kvm_apic_accept_events(vcpu);
6025 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 6023 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
6026 r = -EAGAIN; 6024 r = -EAGAIN;
6027 goto out; 6025 goto out;
@@ -6178,6 +6176,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
6178int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 6176int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6179 struct kvm_mp_state *mp_state) 6177 struct kvm_mp_state *mp_state)
6180{ 6178{
6179 kvm_apic_accept_events(vcpu);
6181 mp_state->mp_state = vcpu->arch.mp_state; 6180 mp_state->mp_state = vcpu->arch.mp_state;
6182 return 0; 6181 return 0;
6183} 6182}
@@ -6185,7 +6184,15 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6185int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 6184int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
6186 struct kvm_mp_state *mp_state) 6185 struct kvm_mp_state *mp_state)
6187{ 6186{
6188 vcpu->arch.mp_state = mp_state->mp_state; 6187 if (!kvm_vcpu_has_lapic(vcpu) &&
6188 mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
6189 return -EINVAL;
6190
6191 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
6192 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
6193 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
6194 } else
6195 vcpu->arch.mp_state = mp_state->mp_state;
6189 kvm_make_request(KVM_REQ_EVENT, vcpu); 6196 kvm_make_request(KVM_REQ_EVENT, vcpu);
6190 return 0; 6197 return 0;
6191} 6198}
@@ -6522,7 +6529,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
6522 kvm_x86_ops->vcpu_free(vcpu); 6529 kvm_x86_ops->vcpu_free(vcpu);
6523} 6530}
6524 6531
6525static void kvm_vcpu_reset(struct kvm_vcpu *vcpu) 6532void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
6526{ 6533{
6527 atomic_set(&vcpu->arch.nmi_queued, 0); 6534 atomic_set(&vcpu->arch.nmi_queued, 0);
6528 vcpu->arch.nmi_pending = 0; 6535 vcpu->arch.nmi_pending = 0;
@@ -6552,6 +6559,17 @@ static void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
6552 kvm_x86_ops->vcpu_reset(vcpu); 6559 kvm_x86_ops->vcpu_reset(vcpu);
6553} 6560}
6554 6561
6562void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
6563{
6564 struct kvm_segment cs;
6565
6566 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
6567 cs.selector = vector << 8;
6568 cs.base = vector << 12;
6569 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
6570 kvm_rip_write(vcpu, 0);
6571}
6572
6555int kvm_arch_hardware_enable(void *garbage) 6573int kvm_arch_hardware_enable(void *garbage)
6556{ 6574{
6557 struct kvm *kvm; 6575 struct kvm *kvm;
@@ -6995,7 +7013,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6995 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 7013 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6996 !vcpu->arch.apf.halted) 7014 !vcpu->arch.apf.halted)
6997 || !list_empty_careful(&vcpu->async_pf.done) 7015 || !list_empty_careful(&vcpu->async_pf.done)
6998 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED 7016 || kvm_apic_has_events(vcpu)
6999 || atomic_read(&vcpu->arch.nmi_queued) || 7017 || atomic_read(&vcpu->arch.nmi_queued) ||
7000 (kvm_arch_interrupt_allowed(vcpu) && 7018 (kvm_arch_interrupt_allowed(vcpu) &&
7001 kvm_cpu_has_interrupt(vcpu)); 7019 kvm_cpu_has_interrupt(vcpu));