diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2007-10-31 18:24:25 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:00 -0500 |
commit | d075206073286dca84768137af0a0bf3d11f0663 (patch) | |
tree | 84d6883c16bba8344203df82c9cd1ee4ba013dfa /drivers/kvm | |
parent | 8776e5194f7bb847906e3561c4dba12ed66ebfb6 (diff) |
KVM: Portability: Move x86 FPU handling to x86.c
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 3 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 107 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 103 |
3 files changed, 108 insertions, 105 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index ef2a6a8328ea..469ca42c2a19 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -633,6 +633,9 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
633 | unsigned int ioctl, unsigned long arg); | 633 | unsigned int ioctl, unsigned long arg); |
634 | void kvm_arch_destroy_vm(struct kvm *kvm); | 634 | void kvm_arch_destroy_vm(struct kvm *kvm); |
635 | 635 | ||
636 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | ||
637 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | ||
638 | |||
636 | __init void kvm_arch_init(void); | 639 | __init void kvm_arch_init(void); |
637 | 640 | ||
638 | static inline void kvm_guest_enter(void) | 641 | static inline void kvm_guest_enter(void) |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 57573ebf02ba..7230f48ba08e 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -73,28 +73,6 @@ static inline int valid_vcpu(int n) | |||
73 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | 73 | return likely(n >= 0 && n < KVM_MAX_VCPUS); |
74 | } | 74 | } |
75 | 75 | ||
76 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | ||
77 | { | ||
78 | if (!vcpu->fpu_active || vcpu->guest_fpu_loaded) | ||
79 | return; | ||
80 | |||
81 | vcpu->guest_fpu_loaded = 1; | ||
82 | fx_save(&vcpu->host_fx_image); | ||
83 | fx_restore(&vcpu->guest_fx_image); | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); | ||
86 | |||
87 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | ||
88 | { | ||
89 | if (!vcpu->guest_fpu_loaded) | ||
90 | return; | ||
91 | |||
92 | vcpu->guest_fpu_loaded = 0; | ||
93 | fx_save(&vcpu->guest_fx_image); | ||
94 | fx_restore(&vcpu->host_fx_image); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); | ||
97 | |||
98 | /* | 76 | /* |
99 | * Switches to specified vcpu, until a matching vcpu_put() | 77 | * Switches to specified vcpu, until a matching vcpu_put() |
100 | */ | 78 | */ |
@@ -294,26 +272,6 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) | |||
294 | return 0; | 272 | return 0; |
295 | } | 273 | } |
296 | 274 | ||
297 | void fx_init(struct kvm_vcpu *vcpu) | ||
298 | { | ||
299 | unsigned after_mxcsr_mask; | ||
300 | |||
301 | /* Initialize guest FPU by resetting ours and saving into guest's */ | ||
302 | preempt_disable(); | ||
303 | fx_save(&vcpu->host_fx_image); | ||
304 | fpu_init(); | ||
305 | fx_save(&vcpu->guest_fx_image); | ||
306 | fx_restore(&vcpu->host_fx_image); | ||
307 | preempt_enable(); | ||
308 | |||
309 | vcpu->cr0 |= X86_CR0_ET; | ||
310 | after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space); | ||
311 | vcpu->guest_fx_image.mxcsr = 0x1f80; | ||
312 | memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask, | ||
313 | 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask); | ||
314 | } | ||
315 | EXPORT_SYMBOL_GPL(fx_init); | ||
316 | |||
317 | /* | 275 | /* |
318 | * Allocate some memory and give it an address in the guest physical address | 276 | * Allocate some memory and give it an address in the guest physical address |
319 | * space. | 277 | * space. |
@@ -1422,67 +1380,6 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | |||
1422 | return 0; | 1380 | return 0; |
1423 | } | 1381 | } |
1424 | 1382 | ||
1425 | /* | ||
1426 | * fxsave fpu state. Taken from x86_64/processor.h. To be killed when | ||
1427 | * we have asm/x86/processor.h | ||
1428 | */ | ||
1429 | struct fxsave { | ||
1430 | u16 cwd; | ||
1431 | u16 swd; | ||
1432 | u16 twd; | ||
1433 | u16 fop; | ||
1434 | u64 rip; | ||
1435 | u64 rdp; | ||
1436 | u32 mxcsr; | ||
1437 | u32 mxcsr_mask; | ||
1438 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
1439 | #ifdef CONFIG_X86_64 | ||
1440 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | ||
1441 | #else | ||
1442 | u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | ||
1443 | #endif | ||
1444 | }; | ||
1445 | |||
1446 | static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1447 | { | ||
1448 | struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; | ||
1449 | |||
1450 | vcpu_load(vcpu); | ||
1451 | |||
1452 | memcpy(fpu->fpr, fxsave->st_space, 128); | ||
1453 | fpu->fcw = fxsave->cwd; | ||
1454 | fpu->fsw = fxsave->swd; | ||
1455 | fpu->ftwx = fxsave->twd; | ||
1456 | fpu->last_opcode = fxsave->fop; | ||
1457 | fpu->last_ip = fxsave->rip; | ||
1458 | fpu->last_dp = fxsave->rdp; | ||
1459 | memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); | ||
1460 | |||
1461 | vcpu_put(vcpu); | ||
1462 | |||
1463 | return 0; | ||
1464 | } | ||
1465 | |||
1466 | static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1467 | { | ||
1468 | struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; | ||
1469 | |||
1470 | vcpu_load(vcpu); | ||
1471 | |||
1472 | memcpy(fxsave->st_space, fpu->fpr, 128); | ||
1473 | fxsave->cwd = fpu->fcw; | ||
1474 | fxsave->swd = fpu->fsw; | ||
1475 | fxsave->twd = fpu->ftwx; | ||
1476 | fxsave->fop = fpu->last_opcode; | ||
1477 | fxsave->rip = fpu->last_ip; | ||
1478 | fxsave->rdp = fpu->last_dp; | ||
1479 | memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); | ||
1480 | |||
1481 | vcpu_put(vcpu); | ||
1482 | |||
1483 | return 0; | ||
1484 | } | ||
1485 | |||
1486 | static long kvm_vcpu_ioctl(struct file *filp, | 1383 | static long kvm_vcpu_ioctl(struct file *filp, |
1487 | unsigned int ioctl, unsigned long arg) | 1384 | unsigned int ioctl, unsigned long arg) |
1488 | { | 1385 | { |
@@ -1613,7 +1510,7 @@ static long kvm_vcpu_ioctl(struct file *filp, | |||
1613 | struct kvm_fpu fpu; | 1510 | struct kvm_fpu fpu; |
1614 | 1511 | ||
1615 | memset(&fpu, 0, sizeof fpu); | 1512 | memset(&fpu, 0, sizeof fpu); |
1616 | r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu); | 1513 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu); |
1617 | if (r) | 1514 | if (r) |
1618 | goto out; | 1515 | goto out; |
1619 | r = -EFAULT; | 1516 | r = -EFAULT; |
@@ -1628,7 +1525,7 @@ static long kvm_vcpu_ioctl(struct file *filp, | |||
1628 | r = -EFAULT; | 1525 | r = -EFAULT; |
1629 | if (copy_from_user(&fpu, argp, sizeof fpu)) | 1526 | if (copy_from_user(&fpu, argp, sizeof fpu)) |
1630 | goto out; | 1527 | goto out; |
1631 | r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu); | 1528 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu); |
1632 | if (r) | 1529 | if (r) |
1633 | goto out; | 1530 | goto out; |
1634 | r = 0; | 1531 | r = 0; |
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index 2cf7ebab50f4..ef1661f10b48 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -1785,3 +1785,106 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
1785 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 1785 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
1786 | } | 1786 | } |
1787 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | 1787 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
1788 | |||
1789 | /* | ||
1790 | * fxsave fpu state. Taken from x86_64/processor.h. To be killed when | ||
1791 | * we have asm/x86/processor.h | ||
1792 | */ | ||
1793 | struct fxsave { | ||
1794 | u16 cwd; | ||
1795 | u16 swd; | ||
1796 | u16 twd; | ||
1797 | u16 fop; | ||
1798 | u64 rip; | ||
1799 | u64 rdp; | ||
1800 | u32 mxcsr; | ||
1801 | u32 mxcsr_mask; | ||
1802 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
1803 | #ifdef CONFIG_X86_64 | ||
1804 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | ||
1805 | #else | ||
1806 | u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | ||
1807 | #endif | ||
1808 | }; | ||
1809 | |||
1810 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1811 | { | ||
1812 | struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; | ||
1813 | |||
1814 | vcpu_load(vcpu); | ||
1815 | |||
1816 | memcpy(fpu->fpr, fxsave->st_space, 128); | ||
1817 | fpu->fcw = fxsave->cwd; | ||
1818 | fpu->fsw = fxsave->swd; | ||
1819 | fpu->ftwx = fxsave->twd; | ||
1820 | fpu->last_opcode = fxsave->fop; | ||
1821 | fpu->last_ip = fxsave->rip; | ||
1822 | fpu->last_dp = fxsave->rdp; | ||
1823 | memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); | ||
1824 | |||
1825 | vcpu_put(vcpu); | ||
1826 | |||
1827 | return 0; | ||
1828 | } | ||
1829 | |||
1830 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1831 | { | ||
1832 | struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; | ||
1833 | |||
1834 | vcpu_load(vcpu); | ||
1835 | |||
1836 | memcpy(fxsave->st_space, fpu->fpr, 128); | ||
1837 | fxsave->cwd = fpu->fcw; | ||
1838 | fxsave->swd = fpu->fsw; | ||
1839 | fxsave->twd = fpu->ftwx; | ||
1840 | fxsave->fop = fpu->last_opcode; | ||
1841 | fxsave->rip = fpu->last_ip; | ||
1842 | fxsave->rdp = fpu->last_dp; | ||
1843 | memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); | ||
1844 | |||
1845 | vcpu_put(vcpu); | ||
1846 | |||
1847 | return 0; | ||
1848 | } | ||
1849 | |||
1850 | void fx_init(struct kvm_vcpu *vcpu) | ||
1851 | { | ||
1852 | unsigned after_mxcsr_mask; | ||
1853 | |||
1854 | /* Initialize guest FPU by resetting ours and saving into guest's */ | ||
1855 | preempt_disable(); | ||
1856 | fx_save(&vcpu->host_fx_image); | ||
1857 | fpu_init(); | ||
1858 | fx_save(&vcpu->guest_fx_image); | ||
1859 | fx_restore(&vcpu->host_fx_image); | ||
1860 | preempt_enable(); | ||
1861 | |||
1862 | vcpu->cr0 |= X86_CR0_ET; | ||
1863 | after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space); | ||
1864 | vcpu->guest_fx_image.mxcsr = 0x1f80; | ||
1865 | memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask, | ||
1866 | 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask); | ||
1867 | } | ||
1868 | EXPORT_SYMBOL_GPL(fx_init); | ||
1869 | |||
1870 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | ||
1871 | { | ||
1872 | if (!vcpu->fpu_active || vcpu->guest_fpu_loaded) | ||
1873 | return; | ||
1874 | |||
1875 | vcpu->guest_fpu_loaded = 1; | ||
1876 | fx_save(&vcpu->host_fx_image); | ||
1877 | fx_restore(&vcpu->guest_fx_image); | ||
1878 | } | ||
1879 | EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); | ||
1880 | |||
1881 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | ||
1882 | { | ||
1883 | if (!vcpu->guest_fpu_loaded) | ||
1884 | return; | ||
1885 | |||
1886 | vcpu->guest_fpu_loaded = 0; | ||
1887 | fx_save(&vcpu->guest_fx_image); | ||
1888 | fx_restore(&vcpu->host_fx_image); | ||
1889 | } | ||
1890 | EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); | ||