diff options
author | Avi Kivity <avi@qumranet.com> | 2008-07-01 09:20:21 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-10-15 04:15:12 -0400 |
commit | cf393f75661f4b17451377b353833eb5502a9688 (patch) | |
tree | 4ec14e8f1acc5c7b34ecdbae8832b9bffa84e779 /arch/x86/kvm/vmx.c | |
parent | 5b5c6a5a60801effb559e787a947885d9850a7da (diff) |
KVM: Move NMI IRET fault processing to new vmx_complete_interrupts()
Currently most interrupt exit processing is handled on the entry path,
which is confusing. Move the NMI IRET fault processing to a new function,
vmx_complete_interrupts(), which is called on the vmexit path.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5a3a0326c277..2adfacb00336 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2817,6 +2817,27 @@ static void enable_intr_window(struct kvm_vcpu *vcpu) | |||
2817 | enable_irq_window(vcpu); | 2817 | enable_irq_window(vcpu); |
2818 | } | 2818 | } |
2819 | 2819 | ||
2820 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | ||
2821 | { | ||
2822 | u32 exit_intr_info; | ||
2823 | bool unblock_nmi; | ||
2824 | u8 vector; | ||
2825 | |||
2826 | exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | ||
2827 | if (cpu_has_virtual_nmis()) { | ||
2828 | unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; | ||
2829 | vector = exit_intr_info & INTR_INFO_VECTOR_MASK; | ||
2830 | /* | ||
2831 | * SDM 3: 25.7.1.2 | ||
2832 | * Re-set bit "block by NMI" before VM entry if vmexit caused by | ||
2833 | * a guest IRET fault. | ||
2834 | */ | ||
2835 | if (unblock_nmi && vector != DF_VECTOR) | ||
2836 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2837 | GUEST_INTR_STATE_NMI); | ||
2838 | } | ||
2839 | } | ||
2840 | |||
2820 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | 2841 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) |
2821 | { | 2842 | { |
2822 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2843 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
@@ -2873,23 +2894,12 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
2873 | return; | 2894 | return; |
2874 | } | 2895 | } |
2875 | if (cpu_has_virtual_nmis()) { | 2896 | if (cpu_has_virtual_nmis()) { |
2876 | /* | 2897 | if (vcpu->arch.nmi_pending) { |
2877 | * SDM 3: 25.7.1.2 | ||
2878 | * Re-set bit "block by NMI" before VM entry if vmexit caused by | ||
2879 | * a guest IRET fault. | ||
2880 | */ | ||
2881 | if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) && | ||
2882 | (exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8) | ||
2883 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | ||
2884 | vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) | | ||
2885 | GUEST_INTR_STATE_NMI); | ||
2886 | else if (vcpu->arch.nmi_pending) { | ||
2887 | if (vmx_nmi_enabled(vcpu)) | 2898 | if (vmx_nmi_enabled(vcpu)) |
2888 | vmx_inject_nmi(vcpu); | 2899 | vmx_inject_nmi(vcpu); |
2889 | enable_intr_window(vcpu); | 2900 | enable_intr_window(vcpu); |
2890 | return; | 2901 | return; |
2891 | } | 2902 | } |
2892 | |||
2893 | } | 2903 | } |
2894 | if (!kvm_cpu_has_interrupt(vcpu)) | 2904 | if (!kvm_cpu_has_interrupt(vcpu)) |
2895 | return; | 2905 | return; |
@@ -3076,6 +3086,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3076 | KVMTRACE_0D(NMI, vcpu, handler); | 3086 | KVMTRACE_0D(NMI, vcpu, handler); |
3077 | asm("int $2"); | 3087 | asm("int $2"); |
3078 | } | 3088 | } |
3089 | |||
3090 | vmx_complete_interrupts(vmx); | ||
3079 | } | 3091 | } |
3080 | 3092 | ||
3081 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | 3093 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) |