aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2013-03-13 07:42:34 -0400
committerGleb Natapov <gleb@redhat.com>2013-03-13 10:08:10 -0400
commit66450a21f99636af4fafac2afd33f1a40631bc3a (patch)
tree81a71a5ad44edcb7317567b2a922e9a861bb2bb8 /arch/x86/include/asm/kvm_host.h
parent5d218814328da91a27e982748443e7e375e11396 (diff)
KVM: x86: Rework INIT and SIPI handling
A VCPU sending INIT or SIPI to some other VCPU races for setting the remote VCPU's mp_state. When we were unlucky, KVM_MP_STATE_INIT_RECEIVED was overwritten by kvm_emulate_halt and, thus, got lost. This introduces APIC events for those two signals, keeping them in kvm_apic until kvm_apic_accept_events is run over the target vcpu context. kvm_apic_has_events reports to kvm_arch_vcpu_runnable if there are pending events, thus if vcpu blocking should end. The patch comes with the side effect of effectively obsoleting KVM_MP_STATE_SIPI_RECEIVED. We still accept it from user space, but immediately translate it to KVM_MP_STATE_INIT_RECEIVED + KVM_APIC_SIPI. The vcpu itself will no longer enter the KVM_MP_STATE_SIPI_RECEIVED state. That also means we no longer exit to user space after receiving a SIPI event. Furthermore, we already reset the VCPU on INIT, only fixing up the code segment later on when SIPI arrives. Moreover, we fix INIT handling for the BSP: it never enter wait-for-SIPI but directly starts over on INIT. Tested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 348d85965ead..ef7f4a5cf8c7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -345,7 +345,6 @@ struct kvm_vcpu_arch {
345 unsigned long apic_attention; 345 unsigned long apic_attention;
346 int32_t apic_arb_prio; 346 int32_t apic_arb_prio;
347 int mp_state; 347 int mp_state;
348 int sipi_vector;
349 u64 ia32_misc_enable_msr; 348 u64 ia32_misc_enable_msr;
350 bool tpr_access_reporting; 349 bool tpr_access_reporting;
351 350
@@ -819,6 +818,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
819 818
820void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 819void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
821int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 820int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
821void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector);
822 822
823int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 823int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
824 int reason, bool has_error_code, u32 error_code); 824 int reason, bool has_error_code, u32 error_code);
@@ -1002,6 +1002,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1002int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 1002int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1003int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 1003int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1004int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1004int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1005void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
1005 1006
1006void kvm_define_shared_msr(unsigned index, u32 msr); 1007void kvm_define_shared_msr(unsigned index, u32 msr);
1007void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1008void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);