diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2013-03-13 07:42:34 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-03-13 10:08:10 -0400 |
commit | 66450a21f99636af4fafac2afd33f1a40631bc3a (patch) | |
tree | 81a71a5ad44edcb7317567b2a922e9a861bb2bb8 /arch/x86/include | |
parent | 5d218814328da91a27e982748443e7e375e11396 (diff) |
KVM: x86: Rework INIT and SIPI handling
A VCPU sending INIT or SIPI to some other VCPU races for setting the
remote VCPU's mp_state. When we were unlucky, KVM_MP_STATE_INIT_RECEIVED
was overwritten by kvm_emulate_halt and, thus, got lost.
This introduces APIC events for those two signals, keeping them in
kvm_apic until kvm_apic_accept_events is run over the target vcpu
context. kvm_apic_has_events reports to kvm_arch_vcpu_runnable if there
are pending events, thus if vcpu blocking should end.
The patch comes with the side effect of effectively obsoleting
KVM_MP_STATE_SIPI_RECEIVED. We still accept it from user space, but
immediately translate it to KVM_MP_STATE_INIT_RECEIVED + KVM_APIC_SIPI.
The vcpu itself will no longer enter the KVM_MP_STATE_SIPI_RECEIVED
state. That also means we no longer exit to user space after receiving a
SIPI event.
Furthermore, we already reset the VCPU on INIT, only fixing up the code
segment later on when SIPI arrives. Moreover, we fix INIT handling for
the BSP: it never enter wait-for-SIPI but directly starts over on INIT.
Tested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 348d85965ead..ef7f4a5cf8c7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -345,7 +345,6 @@ struct kvm_vcpu_arch { | |||
345 | unsigned long apic_attention; | 345 | unsigned long apic_attention; |
346 | int32_t apic_arb_prio; | 346 | int32_t apic_arb_prio; |
347 | int mp_state; | 347 | int mp_state; |
348 | int sipi_vector; | ||
349 | u64 ia32_misc_enable_msr; | 348 | u64 ia32_misc_enable_msr; |
350 | bool tpr_access_reporting; | 349 | bool tpr_access_reporting; |
351 | 350 | ||
@@ -819,6 +818,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); | |||
819 | 818 | ||
820 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 819 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
821 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); | 820 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
821 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector); | ||
822 | 822 | ||
823 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, | 823 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
824 | int reason, bool has_error_code, u32 error_code); | 824 | int reason, bool has_error_code, u32 error_code); |
@@ -1002,6 +1002,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); | |||
1002 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 1002 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
1003 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 1003 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
1004 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | 1004 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
1005 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu); | ||
1005 | 1006 | ||
1006 | void kvm_define_shared_msr(unsigned index, u32 msr); | 1007 | void kvm_define_shared_msr(unsigned index, u32 msr); |
1007 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | 1008 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |