diff options
author | Avi Kivity <avi@redhat.com> | 2011-03-07 09:52:07 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-11 07:56:56 -0400 |
commit | 9d58b93192065f4b2ba6b880e9b0dab0bc11d0ba (patch) | |
tree | 819bc3357ebaa90854d90ffa2ab18948d9f5df39 /arch | |
parent | 69c730289011df706a1c9890d6e6c5ee822623c7 (diff) |
KVM: VMX: Avoid vmx_recover_nmi_blocking() when unneeded
When we haven't injected an interrupt, we don't need to recover
the nmi blocking state (since the guest can't set it by itself).
This allows us to avoid a VMREAD later on.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/vmx.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8f9e77edc016..53bf6ae493e3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -129,6 +129,7 @@ struct vcpu_vmx { | |||
129 | int launched; | 129 | int launched; |
130 | u8 fail; | 130 | u8 fail; |
131 | u8 cpl; | 131 | u8 cpl; |
132 | bool nmi_known_unmasked; | ||
132 | u32 exit_intr_info; | 133 | u32 exit_intr_info; |
133 | u32 idt_vectoring_info; | 134 | u32 idt_vectoring_info; |
134 | ulong rflags; | 135 | ulong rflags; |
@@ -2959,6 +2960,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |||
2959 | } | 2960 | } |
2960 | 2961 | ||
2961 | ++vcpu->stat.nmi_injections; | 2962 | ++vcpu->stat.nmi_injections; |
2963 | vmx->nmi_known_unmasked = false; | ||
2962 | if (vmx->rmode.vm86_active) { | 2964 | if (vmx->rmode.vm86_active) { |
2963 | if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR) != EMULATE_DONE) | 2965 | if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR) != EMULATE_DONE) |
2964 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); | 2966 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
@@ -2983,6 +2985,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) | |||
2983 | { | 2985 | { |
2984 | if (!cpu_has_virtual_nmis()) | 2986 | if (!cpu_has_virtual_nmis()) |
2985 | return to_vmx(vcpu)->soft_vnmi_blocked; | 2987 | return to_vmx(vcpu)->soft_vnmi_blocked; |
2988 | if (to_vmx(vcpu)->nmi_known_unmasked) | ||
2989 | return false; | ||
2986 | return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; | 2990 | return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; |
2987 | } | 2991 | } |
2988 | 2992 | ||
@@ -2996,6 +3000,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | |||
2996 | vmx->vnmi_blocked_time = 0; | 3000 | vmx->vnmi_blocked_time = 0; |
2997 | } | 3001 | } |
2998 | } else { | 3002 | } else { |
3003 | vmx->nmi_known_unmasked = !masked; | ||
2999 | if (masked) | 3004 | if (masked) |
3000 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | 3005 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
3001 | GUEST_INTR_STATE_NMI); | 3006 | GUEST_INTR_STATE_NMI); |
@@ -3527,9 +3532,11 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) | |||
3527 | switch (type) { | 3532 | switch (type) { |
3528 | case INTR_TYPE_NMI_INTR: | 3533 | case INTR_TYPE_NMI_INTR: |
3529 | vcpu->arch.nmi_injected = false; | 3534 | vcpu->arch.nmi_injected = false; |
3530 | if (cpu_has_virtual_nmis()) | 3535 | if (cpu_has_virtual_nmis()) { |
3531 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | 3536 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
3532 | GUEST_INTR_STATE_NMI); | 3537 | GUEST_INTR_STATE_NMI); |
3538 | vmx->nmi_known_unmasked = false; | ||
3539 | } | ||
3533 | break; | 3540 | break; |
3534 | case INTR_TYPE_EXT_INTR: | 3541 | case INTR_TYPE_EXT_INTR: |
3535 | case INTR_TYPE_SOFT_INTR: | 3542 | case INTR_TYPE_SOFT_INTR: |
@@ -3916,6 +3923,8 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) | |||
3916 | idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; | 3923 | idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
3917 | 3924 | ||
3918 | if (cpu_has_virtual_nmis()) { | 3925 | if (cpu_has_virtual_nmis()) { |
3926 | if (vmx->nmi_known_unmasked) | ||
3927 | return; | ||
3919 | unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; | 3928 | unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; |
3920 | vector = exit_intr_info & INTR_INFO_VECTOR_MASK; | 3929 | vector = exit_intr_info & INTR_INFO_VECTOR_MASK; |
3921 | /* | 3930 | /* |
@@ -3932,6 +3941,10 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) | |||
3932 | vector != DF_VECTOR && !idtv_info_valid) | 3941 | vector != DF_VECTOR && !idtv_info_valid) |
3933 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | 3942 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
3934 | GUEST_INTR_STATE_NMI); | 3943 | GUEST_INTR_STATE_NMI); |
3944 | else | ||
3945 | vmx->nmi_known_unmasked = | ||
3946 | !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) | ||
3947 | & GUEST_INTR_STATE_NMI); | ||
3935 | } else if (unlikely(vmx->soft_vnmi_blocked)) | 3948 | } else if (unlikely(vmx->soft_vnmi_blocked)) |
3936 | vmx->vnmi_blocked_time += | 3949 | vmx->vnmi_blocked_time += |
3937 | ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); | 3950 | ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); |
@@ -3970,6 +3983,7 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, | |||
3970 | */ | 3983 | */ |
3971 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | 3984 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
3972 | GUEST_INTR_STATE_NMI); | 3985 | GUEST_INTR_STATE_NMI); |
3986 | vmx->nmi_known_unmasked = true; | ||
3973 | break; | 3987 | break; |
3974 | case INTR_TYPE_SOFT_EXCEPTION: | 3988 | case INTR_TYPE_SOFT_EXCEPTION: |
3975 | vmx->vcpu.arch.event_exit_inst_len = | 3989 | vmx->vcpu.arch.event_exit_inst_len = |