aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-30 05:40:26 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:50 -0500
commit02daab21d94dc4cf01b2fd09863d59a436900322 (patch)
treee7caff282dd9019e1b19cd549609c6b991f29152 /arch/x86/kvm/vmx.c
parente8467fda83cdc9de53972fee0cd2e6916cf66f41 (diff)
KVM: Lazify fpu activation and deactivation
Defer fpu deactivation as much as possible - if the guest fpu is loaded, keep it loaded until the next heavyweight exit (where we are forced to unload it). This reduces unnecessary exits. We also defer fpu activation on clts; while clts signals the intent to use the fpu, we can't be sure the guest will actually use it. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index dbcdb55094f7..d11be3fb7c80 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -66,7 +66,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
66#define KVM_GUEST_CR0_MASK \ 66#define KVM_GUEST_CR0_MASK \
67 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 67 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
68#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ 68#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
69 (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) 69 (X86_CR0_WP | X86_CR0_NE | X86_CR0_MP)
70#define KVM_VM_CR0_ALWAYS_ON \ 70#define KVM_VM_CR0_ALWAYS_ON \
71 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 71 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
72#define KVM_CR4_GUEST_OWNED_BITS \ 72#define KVM_CR4_GUEST_OWNED_BITS \
@@ -579,9 +579,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
579{ 579{
580 u32 eb; 580 u32 eb;
581 581
582 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); 582 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR)
583 if (!vcpu->fpu_active) 583 | (1u << NM_VECTOR);
584 eb |= 1u << NM_VECTOR;
585 /* 584 /*
586 * Unconditionally intercept #DB so we can maintain dr6 without 585 * Unconditionally intercept #DB so we can maintain dr6 without
587 * reading it every exit. 586 * reading it every exit.
@@ -595,6 +594,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
595 eb = ~0; 594 eb = ~0;
596 if (enable_ept) 595 if (enable_ept)
597 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 596 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
597 if (vcpu->fpu_active)
598 eb &= ~(1u << NM_VECTOR);
598 vmcs_write32(EXCEPTION_BITMAP, eb); 599 vmcs_write32(EXCEPTION_BITMAP, eb);
599} 600}
600 601
@@ -806,9 +807,6 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
806 807
807static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) 808static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
808{ 809{
809 if (!vcpu->fpu_active)
810 return;
811 vcpu->fpu_active = 0;
812 vmcs_set_bits(GUEST_CR0, X86_CR0_TS); 810 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
813 update_exception_bitmap(vcpu); 811 update_exception_bitmap(vcpu);
814} 812}
@@ -1737,8 +1735,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1737 else 1735 else
1738 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; 1736 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1739 1737
1740 vmx_fpu_deactivate(vcpu);
1741
1742 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 1738 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
1743 enter_pmode(vcpu); 1739 enter_pmode(vcpu);
1744 1740
@@ -1757,12 +1753,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1757 if (enable_ept) 1753 if (enable_ept)
1758 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); 1754 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1759 1755
1756 if (!vcpu->fpu_active)
1757 hw_cr0 |= X86_CR0_TS;
1758
1760 vmcs_writel(CR0_READ_SHADOW, cr0); 1759 vmcs_writel(CR0_READ_SHADOW, cr0);
1761 vmcs_writel(GUEST_CR0, hw_cr0); 1760 vmcs_writel(GUEST_CR0, hw_cr0);
1762 vcpu->arch.cr0 = cr0; 1761 vcpu->arch.cr0 = cr0;
1763
1764 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1765 vmx_fpu_activate(vcpu);
1766} 1762}
1767 1763
1768static u64 construct_eptp(unsigned long root_hpa) 1764static u64 construct_eptp(unsigned long root_hpa)
@@ -1793,8 +1789,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1793 1789
1794 vmx_flush_tlb(vcpu); 1790 vmx_flush_tlb(vcpu);
1795 vmcs_writel(GUEST_CR3, guest_cr3); 1791 vmcs_writel(GUEST_CR3, guest_cr3);
1796 if (kvm_read_cr0_bits(vcpu, X86_CR0_PE))
1797 vmx_fpu_deactivate(vcpu);
1798} 1792}
1799 1793
1800static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1794static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -3002,11 +2996,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
3002 }; 2996 };
3003 break; 2997 break;
3004 case 2: /* clts */ 2998 case 2: /* clts */
3005 vmx_fpu_deactivate(vcpu);
3006 vcpu->arch.cr0 &= ~X86_CR0_TS; 2999 vcpu->arch.cr0 &= ~X86_CR0_TS;
3007 vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu)); 3000 vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu));
3008 trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); 3001 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
3009 vmx_fpu_activate(vcpu);
3010 skip_emulated_instruction(vcpu); 3002 skip_emulated_instruction(vcpu);
3011 return 1; 3003 return 1;
3012 case 1: /*mov from cr*/ 3004 case 1: /*mov from cr*/
@@ -4127,6 +4119,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
4127 .cache_reg = vmx_cache_reg, 4119 .cache_reg = vmx_cache_reg,
4128 .get_rflags = vmx_get_rflags, 4120 .get_rflags = vmx_get_rflags,
4129 .set_rflags = vmx_set_rflags, 4121 .set_rflags = vmx_set_rflags,
4122 .fpu_deactivate = vmx_fpu_deactivate,
4130 4123
4131 .tlb_flush = vmx_flush_tlb, 4124 .tlb_flush = vmx_flush_tlb,
4132 4125