aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2007-04-27 02:29:49 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:31 -0400
commit2ab455ccceb07945368709ba852e49f4c3119331 (patch)
treede79805085d8dfcf5714c45e7873116c110bd182 /drivers/kvm
parent25c4c2762e31a75403eca0dd59f2cab85e3a2532 (diff)
KVM: VMX: Add lazy FPU support for VT
Only save/restore the FPU host state when the guest is actually using the FPU. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/vmx.c61
1 files changed, 56 insertions, 5 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 09608114e29a..5a2a68dec6bf 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -101,6 +101,13 @@ static inline int is_page_fault(u32 intr_info)
101 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); 101 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
102} 102}
103 103
104static inline int is_no_device(u32 intr_info)
105{
106 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
107 INTR_INFO_VALID_MASK)) ==
108 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
109}
110
104static inline int is_external_interrupt(u32 intr_info) 111static inline int is_external_interrupt(u32 intr_info)
105{ 112{
106 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) 113 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -216,6 +223,16 @@ static void vmcs_write64(unsigned long field, u64 value)
216#endif 223#endif
217} 224}
218 225
226static void vmcs_clear_bits(unsigned long field, u32 mask)
227{
228 vmcs_writel(field, vmcs_readl(field) & ~mask);
229}
230
231static void vmcs_set_bits(unsigned long field, u32 mask)
232{
233 vmcs_writel(field, vmcs_readl(field) | mask);
234}
235
219/* 236/*
220 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 237 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
221 * vcpu mutex is already taken. 238 * vcpu mutex is already taken.
@@ -833,6 +850,11 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
833 } 850 }
834#endif 851#endif
835 852
853 if (!(cr0 & CR0_TS_MASK)) {
854 vcpu->fpu_active = 1;
855 vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
856 }
857
836 vmcs_writel(CR0_READ_SHADOW, cr0); 858 vmcs_writel(CR0_READ_SHADOW, cr0);
837 vmcs_writel(GUEST_CR0, 859 vmcs_writel(GUEST_CR0,
838 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); 860 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
@@ -842,6 +864,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
842static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 864static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
843{ 865{
844 vmcs_writel(GUEST_CR3, cr3); 866 vmcs_writel(GUEST_CR3, cr3);
867
868 if (!(vcpu->cr0 & CR0_TS_MASK)) {
869 vcpu->fpu_active = 0;
870 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
871 vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
872 }
845} 873}
846 874
847static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 875static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1368,6 +1396,15 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1368 asm ("int $2"); 1396 asm ("int $2");
1369 return 1; 1397 return 1;
1370 } 1398 }
1399
1400 if (is_no_device(intr_info)) {
1401 vcpu->fpu_active = 1;
1402 vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
1403 if (!(vcpu->cr0 & CR0_TS_MASK))
1404 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
1405 return 1;
1406 }
1407
1371 error_code = 0; 1408 error_code = 0;
1372 rip = vmcs_readl(GUEST_RIP); 1409 rip = vmcs_readl(GUEST_RIP);
1373 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK) 1410 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
@@ -1556,7 +1593,11 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1556 break; 1593 break;
1557 case 2: /* clts */ 1594 case 2: /* clts */
1558 vcpu_load_rsp_rip(vcpu); 1595 vcpu_load_rsp_rip(vcpu);
1559 set_cr0(vcpu, vcpu->cr0 & ~CR0_TS_MASK); 1596 vcpu->fpu_active = 1;
1597 vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
1598 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
1599 vcpu->cr0 &= ~CR0_TS_MASK;
1600 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1560 skip_emulated_instruction(vcpu); 1601 skip_emulated_instruction(vcpu);
1561 return 1; 1602 return 1;
1562 case 1: /*mov from cr*/ 1603 case 1: /*mov from cr*/
@@ -1806,8 +1847,14 @@ again:
1806 if (vcpu->guest_debug.enabled) 1847 if (vcpu->guest_debug.enabled)
1807 kvm_guest_debug_pre(vcpu); 1848 kvm_guest_debug_pre(vcpu);
1808 1849
1809 fx_save(vcpu->host_fx_image); 1850 if (vcpu->fpu_active) {
1810 fx_restore(vcpu->guest_fx_image); 1851 fx_save(vcpu->host_fx_image);
1852 fx_restore(vcpu->guest_fx_image);
1853 }
1854 /*
1855 * Loading guest fpu may have cleared host cr0.ts
1856 */
1857 vmcs_writel(HOST_CR0, read_cr0());
1811 1858
1812#ifdef CONFIG_X86_64 1859#ifdef CONFIG_X86_64
1813 if (is_long_mode(vcpu)) { 1860 if (is_long_mode(vcpu)) {
@@ -1965,8 +2012,11 @@ again:
1965 } 2012 }
1966#endif 2013#endif
1967 2014
1968 fx_save(vcpu->guest_fx_image); 2015 if (vcpu->fpu_active) {
1969 fx_restore(vcpu->host_fx_image); 2016 fx_save(vcpu->guest_fx_image);
2017 fx_restore(vcpu->host_fx_image);
2018 }
2019
1970 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2020 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
1971 2021
1972 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2022 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
@@ -2078,6 +2128,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2078 vmcs_clear(vmcs); 2128 vmcs_clear(vmcs);
2079 vcpu->vmcs = vmcs; 2129 vcpu->vmcs = vmcs;
2080 vcpu->launched = 0; 2130 vcpu->launched = 0;
2131 vcpu->fpu_active = 1;
2081 2132
2082 return 0; 2133 return 0;
2083 2134