aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-20 07:31:20 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:51 -0400
commit51aa01d13d4a64422cf8095205fc4a02322aca2c (patch)
tree362110e17562f7f1e89c114af5447c8f0275bac3 /arch/x86
parent3842d135ff246b6543f1df77f5600e12094a6845 (diff)
KVM: VMX: Split up vmx_complete_interrupts()
vmx_complete_interrupts() does too much, split it up: - vmx_vcpu_run() gets the "cache important vmcs fields" part - a new vmx_complete_atomic_exit() gets the parts that must be done atomically - a new vmx_recover_nmi_blocking() does what its name says - vmx_complete_interrupts() retains the event injection recovery code This helps in reducing the work done in atomic context. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c39
1 files changed, 27 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2ce2e0b13edb..927d8404505a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -125,6 +125,7 @@ struct vcpu_vmx {
125 unsigned long host_rsp; 125 unsigned long host_rsp;
126 int launched; 126 int launched;
127 u8 fail; 127 u8 fail;
128 u32 exit_intr_info;
128 u32 idt_vectoring_info; 129 u32 idt_vectoring_info;
129 struct shared_msr_entry *guest_msrs; 130 struct shared_msr_entry *guest_msrs;
130 int nmsrs; 131 int nmsrs;
@@ -3775,18 +3776,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3775 vmcs_write32(TPR_THRESHOLD, irr); 3776 vmcs_write32(TPR_THRESHOLD, irr);
3776} 3777}
3777 3778
3778static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 3779static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
3779{ 3780{
3780 u32 exit_intr_info; 3781 u32 exit_intr_info = vmx->exit_intr_info;
3781 u32 idt_vectoring_info = vmx->idt_vectoring_info;
3782 bool unblock_nmi;
3783 u8 vector;
3784 int type;
3785 bool idtv_info_valid;
3786
3787 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
3788
3789 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
3790 3782
3791 /* Handle machine checks before interrupts are enabled */ 3783 /* Handle machine checks before interrupts are enabled */
3792 if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) 3784 if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
@@ -3801,8 +3793,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3801 asm("int $2"); 3793 asm("int $2");
3802 kvm_after_handle_nmi(&vmx->vcpu); 3794 kvm_after_handle_nmi(&vmx->vcpu);
3803 } 3795 }
3796}
3804 3797
3805 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 3798static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
3799{
3800 u32 exit_intr_info = vmx->exit_intr_info;
3801 bool unblock_nmi;
3802 u8 vector;
3803 bool idtv_info_valid;
3804
3805 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3806 3806
3807 if (cpu_has_virtual_nmis()) { 3807 if (cpu_has_virtual_nmis()) {
3808 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 3808 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
@@ -3824,6 +3824,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3824 } else if (unlikely(vmx->soft_vnmi_blocked)) 3824 } else if (unlikely(vmx->soft_vnmi_blocked))
3825 vmx->vnmi_blocked_time += 3825 vmx->vnmi_blocked_time +=
3826 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); 3826 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
3827}
3828
3829static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3830{
3831 u32 idt_vectoring_info = vmx->idt_vectoring_info;
3832 u8 vector;
3833 int type;
3834 bool idtv_info_valid;
3835
3836 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3827 3837
3828 vmx->vcpu.arch.nmi_injected = false; 3838 vmx->vcpu.arch.nmi_injected = false;
3829 kvm_clear_exception_queue(&vmx->vcpu); 3839 kvm_clear_exception_queue(&vmx->vcpu);
@@ -4036,6 +4046,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
4036 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 4046 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
4037 vmx->launched = 1; 4047 vmx->launched = 1;
4038 4048
4049 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
4050 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
4051
4052 vmx_complete_atomic_exit(vmx);
4053 vmx_recover_nmi_blocking(vmx);
4039 vmx_complete_interrupts(vmx); 4054 vmx_complete_interrupts(vmx);
4040} 4055}
4041 4056