aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-07-02 02:28:55 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:13 -0400
commit668f612fa0d8d4120ec5dc0725d7e1ca3152a954 (patch)
treeb1daa43b36d544abe32f0bb787fc459c85a6f766 /arch/x86/kvm/vmx.c
parentcf393f75661f4b17451377b353833eb5502a9688 (diff)
KVM: VMX: Move nmi injection failure processing to vm exit path
Instead of processing nmi injection failure in the vm entry path, move it to the vm exit path (vm_complete_interrupts()). This separates nmi injection from nmi post-processing, and moves the nmi state from the VT state into vcpu state (new variable nmi_injected specifying an injection in progress). Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c49
1 files changed, 32 insertions, 17 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2adfacb00336..ce13b53d21c4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2151,7 +2151,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2151{ 2151{
2152 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2152 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2153 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 2153 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
2154 vcpu->arch.nmi_pending = 0;
2155} 2154}
2156 2155
2157static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 2156static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
@@ -2820,8 +2819,11 @@ static void enable_intr_window(struct kvm_vcpu *vcpu)
2820static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 2819static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
2821{ 2820{
2822 u32 exit_intr_info; 2821 u32 exit_intr_info;
2822 u32 idt_vectoring_info;
2823 bool unblock_nmi; 2823 bool unblock_nmi;
2824 u8 vector; 2824 u8 vector;
2825 int type;
2826 bool idtv_info_valid;
2825 2827
2826 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 2828 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2827 if (cpu_has_virtual_nmis()) { 2829 if (cpu_has_virtual_nmis()) {
@@ -2836,18 +2838,34 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
2836 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 2838 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
2837 GUEST_INTR_STATE_NMI); 2839 GUEST_INTR_STATE_NMI);
2838 } 2840 }
2841
2842 idt_vectoring_info = vmx->idt_vectoring_info;
2843 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
2844 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
2845 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
2846 if (vmx->vcpu.arch.nmi_injected) {
2847 /*
2848 * SDM 3: 25.7.1.2
2849 * Clear bit "block by NMI" before VM entry if a NMI delivery
2850 * faulted.
2851 */
2852 if (idtv_info_valid && type == INTR_TYPE_NMI_INTR)
2853 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
2854 GUEST_INTR_STATE_NMI);
2855 else
2856 vmx->vcpu.arch.nmi_injected = false;
2857 }
2839} 2858}
2840 2859
2841static void vmx_intr_assist(struct kvm_vcpu *vcpu) 2860static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2842{ 2861{
2843 struct vcpu_vmx *vmx = to_vmx(vcpu); 2862 struct vcpu_vmx *vmx = to_vmx(vcpu);
2844 u32 idtv_info_field, intr_info_field, exit_intr_info_field; 2863 u32 idtv_info_field, intr_info_field;
2845 int vector; 2864 int vector;
2846 2865
2847 update_tpr_threshold(vcpu); 2866 update_tpr_threshold(vcpu);
2848 2867
2849 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); 2868 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
2850 exit_intr_info_field = vmcs_read32(VM_EXIT_INTR_INFO);
2851 idtv_info_field = vmx->idt_vectoring_info; 2869 idtv_info_field = vmx->idt_vectoring_info;
2852 if (intr_info_field & INTR_INFO_VALID_MASK) { 2870 if (intr_info_field & INTR_INFO_VALID_MASK) {
2853 if (idtv_info_field & INTR_INFO_VALID_MASK) { 2871 if (idtv_info_field & INTR_INFO_VALID_MASK) {
@@ -2871,17 +2889,6 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2871 2889
2872 KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); 2890 KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
2873 2891
2874 /*
2875 * SDM 3: 25.7.1.2
2876 * Clear bit "block by NMI" before VM entry if a NMI delivery
2877 * faulted.
2878 */
2879 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
2880 == INTR_TYPE_NMI_INTR && cpu_has_virtual_nmis())
2881 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2882 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2883 ~GUEST_INTR_STATE_NMI);
2884
2885 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field 2892 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field
2886 & ~INTR_INFO_RESVD_BITS_MASK); 2893 & ~INTR_INFO_RESVD_BITS_MASK);
2887 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2894 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
@@ -2894,9 +2901,17 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2894 return; 2901 return;
2895 } 2902 }
2896 if (cpu_has_virtual_nmis()) { 2903 if (cpu_has_virtual_nmis()) {
2897 if (vcpu->arch.nmi_pending) { 2904 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
2898 if (vmx_nmi_enabled(vcpu)) 2905 if (vmx_nmi_enabled(vcpu)) {
2899 vmx_inject_nmi(vcpu); 2906 vcpu->arch.nmi_pending = false;
2907 vcpu->arch.nmi_injected = true;
2908 } else {
2909 enable_intr_window(vcpu);
2910 return;
2911 }
2912 }
2913 if (vcpu->arch.nmi_injected) {
2914 vmx_inject_nmi(vcpu);
2900 enable_intr_window(vcpu); 2915 enable_intr_window(vcpu);
2901 return; 2916 return;
2902 } 2917 }