aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-02-21 11:04:26 -0500
committerAvi Kivity <avi@qumranet.com>2007-03-04 04:12:42 -0500
commitbccf2150fe62dda5fb09efa2f64d2a234694eb48 (patch)
treeb5e6fc6440b864ddd1c32c4cee1916a0c5484c63 /drivers
parentc5ea76600653b1a242321734435cb1c54778941a (diff)
KVM: Per-vcpu inodes
Allocate a distinct inode for every vcpu in a VM. This has the following benefits: - the filp cachelines are no longer bounced when f_count is incremented on every ioctl() - the API and internal code are distinctly clearer; for example, on the KVM_GET_REGS ioctl, there is no need to copy the vcpu number from userspace and then copy the registers back; the vcpu identity is derived from the fd used to make the call Right now the performance benefits are completely theoretical since (a) we don't support more than one vcpu per VM and (b) virtualization hardware inefficiencies completely everwhelm any cacheline bouncing effects. But both of these will change, and we need to prepare the API today. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h3
-rw-r--r--drivers/kvm/kvm_main.c263
-rw-r--r--drivers/kvm/svm.c3
-rw-r--r--drivers/kvm/vmx.c3
4 files changed, 153 insertions, 119 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 41cc27de4d66..0d122bf889db 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -309,6 +309,7 @@ struct kvm {
309 int busy; 309 int busy;
310 unsigned long rmap_overflow; 310 unsigned long rmap_overflow;
311 struct list_head vm_list; 311 struct list_head vm_list;
312 struct file *filp;
312}; 313};
313 314
314struct kvm_stat { 315struct kvm_stat {
@@ -343,7 +344,7 @@ struct kvm_arch_ops {
343 int (*vcpu_create)(struct kvm_vcpu *vcpu); 344 int (*vcpu_create)(struct kvm_vcpu *vcpu);
344 void (*vcpu_free)(struct kvm_vcpu *vcpu); 345 void (*vcpu_free)(struct kvm_vcpu *vcpu);
345 346
346 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); 347 void (*vcpu_load)(struct kvm_vcpu *vcpu);
347 void (*vcpu_put)(struct kvm_vcpu *vcpu); 348 void (*vcpu_put)(struct kvm_vcpu *vcpu);
348 void (*vcpu_decache)(struct kvm_vcpu *vcpu); 349 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
349 350
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 6fb36c80e3e8..a593d092d85b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -96,6 +96,9 @@ struct segment_descriptor_64 {
96 96
97#endif 97#endif
98 98
99static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
100 unsigned long arg);
101
99static struct inode *kvmfs_inode(struct file_operations *fops) 102static struct inode *kvmfs_inode(struct file_operations *fops)
100{ 103{
101 int error = -ENOMEM; 104 int error = -ENOMEM;
@@ -246,24 +249,30 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
246} 249}
247EXPORT_SYMBOL_GPL(kvm_write_guest); 250EXPORT_SYMBOL_GPL(kvm_write_guest);
248 251
249static int vcpu_slot(struct kvm_vcpu *vcpu) 252/*
253 * Switches to specified vcpu, until a matching vcpu_put()
254 */
255static void vcpu_load(struct kvm_vcpu *vcpu)
250{ 256{
251 return vcpu - vcpu->kvm->vcpus; 257 mutex_lock(&vcpu->mutex);
258 kvm_arch_ops->vcpu_load(vcpu);
252} 259}
253 260
254/* 261/*
255 * Switches to specified vcpu, until a matching vcpu_put() 262 * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
263 * if the slot is not populated.
256 */ 264 */
257static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) 265static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
258{ 266{
259 struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot]; 267 struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
260 268
261 mutex_lock(&vcpu->mutex); 269 mutex_lock(&vcpu->mutex);
262 if (unlikely(!vcpu->vmcs)) { 270 if (!vcpu->vmcs) {
263 mutex_unlock(&vcpu->mutex); 271 mutex_unlock(&vcpu->mutex);
264 return NULL; 272 return NULL;
265 } 273 }
266 return kvm_arch_ops->vcpu_load(vcpu); 274 kvm_arch_ops->vcpu_load(vcpu);
275 return vcpu;
267} 276}
268 277
269static void vcpu_put(struct kvm_vcpu *vcpu) 278static void vcpu_put(struct kvm_vcpu *vcpu)
@@ -336,9 +345,10 @@ static void kvm_free_physmem(struct kvm *kvm)
336 345
337static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 346static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
338{ 347{
339 if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu))) 348 if (!vcpu->vmcs)
340 return; 349 return;
341 350
351 vcpu_load(vcpu);
342 kvm_mmu_destroy(vcpu); 352 kvm_mmu_destroy(vcpu);
343 vcpu_put(vcpu); 353 vcpu_put(vcpu);
344 kvm_arch_ops->vcpu_free(vcpu); 354 kvm_arch_ops->vcpu_free(vcpu);
@@ -725,7 +735,7 @@ raced:
725 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 735 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
726 struct kvm_vcpu *vcpu; 736 struct kvm_vcpu *vcpu;
727 737
728 vcpu = vcpu_load(kvm, i); 738 vcpu = vcpu_load_slot(kvm, i);
729 if (!vcpu) 739 if (!vcpu)
730 continue; 740 continue;
731 kvm_mmu_reset_context(vcpu); 741 kvm_mmu_reset_context(vcpu);
@@ -791,8 +801,9 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
791 if (any) { 801 if (any) {
792 cleared = 0; 802 cleared = 0;
793 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 803 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
794 struct kvm_vcpu *vcpu = vcpu_load(kvm, i); 804 struct kvm_vcpu *vcpu;
795 805
806 vcpu = vcpu_load_slot(kvm, i);
796 if (!vcpu) 807 if (!vcpu)
797 continue; 808 continue;
798 if (!cleared) { 809 if (!cleared) {
@@ -1461,8 +1472,7 @@ void kvm_resched(struct kvm_vcpu *vcpu)
1461{ 1472{
1462 vcpu_put(vcpu); 1473 vcpu_put(vcpu);
1463 cond_resched(); 1474 cond_resched();
1464 /* Cannot fail - no vcpu unplug yet. */ 1475 vcpu_load(vcpu);
1465 vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
1466} 1476}
1467EXPORT_SYMBOL_GPL(kvm_resched); 1477EXPORT_SYMBOL_GPL(kvm_resched);
1468 1478
@@ -1484,17 +1494,11 @@ void save_msrs(struct vmx_msr_entry *e, int n)
1484} 1494}
1485EXPORT_SYMBOL_GPL(save_msrs); 1495EXPORT_SYMBOL_GPL(save_msrs);
1486 1496
1487static int kvm_vm_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run) 1497static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1488{ 1498{
1489 struct kvm_vcpu *vcpu;
1490 int r; 1499 int r;
1491 1500
1492 if (!valid_vcpu(kvm_run->vcpu)) 1501 vcpu_load(vcpu);
1493 return -EINVAL;
1494
1495 vcpu = vcpu_load(kvm, kvm_run->vcpu);
1496 if (!vcpu)
1497 return -ENOENT;
1498 1502
1499 /* re-sync apic's tpr */ 1503 /* re-sync apic's tpr */
1500 vcpu->cr8 = kvm_run->cr8; 1504 vcpu->cr8 = kvm_run->cr8;
@@ -1517,16 +1521,10 @@ static int kvm_vm_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
1517 return r; 1521 return r;
1518} 1522}
1519 1523
1520static int kvm_vm_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs) 1524static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1525 struct kvm_regs *regs)
1521{ 1526{
1522 struct kvm_vcpu *vcpu; 1527 vcpu_load(vcpu);
1523
1524 if (!valid_vcpu(regs->vcpu))
1525 return -EINVAL;
1526
1527 vcpu = vcpu_load(kvm, regs->vcpu);
1528 if (!vcpu)
1529 return -ENOENT;
1530 1528
1531 kvm_arch_ops->cache_regs(vcpu); 1529 kvm_arch_ops->cache_regs(vcpu);
1532 1530
@@ -1563,16 +1561,10 @@ static int kvm_vm_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1563 return 0; 1561 return 0;
1564} 1562}
1565 1563
1566static int kvm_vm_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs) 1564static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1565 struct kvm_regs *regs)
1567{ 1566{
1568 struct kvm_vcpu *vcpu; 1567 vcpu_load(vcpu);
1569
1570 if (!valid_vcpu(regs->vcpu))
1571 return -EINVAL;
1572
1573 vcpu = vcpu_load(kvm, regs->vcpu);
1574 if (!vcpu)
1575 return -ENOENT;
1576 1568
1577 vcpu->regs[VCPU_REGS_RAX] = regs->rax; 1569 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1578 vcpu->regs[VCPU_REGS_RBX] = regs->rbx; 1570 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
@@ -1609,16 +1601,12 @@ static void get_segment(struct kvm_vcpu *vcpu,
1609 return kvm_arch_ops->get_segment(vcpu, var, seg); 1601 return kvm_arch_ops->get_segment(vcpu, var, seg);
1610} 1602}
1611 1603
1612static int kvm_vm_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs) 1604static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1605 struct kvm_sregs *sregs)
1613{ 1606{
1614 struct kvm_vcpu *vcpu;
1615 struct descriptor_table dt; 1607 struct descriptor_table dt;
1616 1608
1617 if (!valid_vcpu(sregs->vcpu)) 1609 vcpu_load(vcpu);
1618 return -EINVAL;
1619 vcpu = vcpu_load(kvm, sregs->vcpu);
1620 if (!vcpu)
1621 return -ENOENT;
1622 1610
1623 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 1611 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1624 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 1612 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -1660,18 +1648,14 @@ static void set_segment(struct kvm_vcpu *vcpu,
1660 return kvm_arch_ops->set_segment(vcpu, var, seg); 1648 return kvm_arch_ops->set_segment(vcpu, var, seg);
1661} 1649}
1662 1650
1663static int kvm_vm_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) 1651static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1652 struct kvm_sregs *sregs)
1664{ 1653{
1665 struct kvm_vcpu *vcpu;
1666 int mmu_reset_needed = 0; 1654 int mmu_reset_needed = 0;
1667 int i; 1655 int i;
1668 struct descriptor_table dt; 1656 struct descriptor_table dt;
1669 1657
1670 if (!valid_vcpu(sregs->vcpu)) 1658 vcpu_load(vcpu);
1671 return -EINVAL;
1672 vcpu = vcpu_load(kvm, sregs->vcpu);
1673 if (!vcpu)
1674 return -ENOENT;
1675 1659
1676 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 1660 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1677 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 1661 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -1777,20 +1761,14 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1777 * 1761 *
1778 * @return number of msrs set successfully. 1762 * @return number of msrs set successfully.
1779 */ 1763 */
1780static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs, 1764static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1781 struct kvm_msr_entry *entries, 1765 struct kvm_msr_entry *entries,
1782 int (*do_msr)(struct kvm_vcpu *vcpu, 1766 int (*do_msr)(struct kvm_vcpu *vcpu,
1783 unsigned index, u64 *data)) 1767 unsigned index, u64 *data))
1784{ 1768{
1785 struct kvm_vcpu *vcpu;
1786 int i; 1769 int i;
1787 1770
1788 if (!valid_vcpu(msrs->vcpu)) 1771 vcpu_load(vcpu);
1789 return -EINVAL;
1790
1791 vcpu = vcpu_load(kvm, msrs->vcpu);
1792 if (!vcpu)
1793 return -ENOENT;
1794 1772
1795 for (i = 0; i < msrs->nmsrs; ++i) 1773 for (i = 0; i < msrs->nmsrs; ++i)
1796 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 1774 if (do_msr(vcpu, entries[i].index, &entries[i].data))
@@ -1806,7 +1784,7 @@ static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
1806 * 1784 *
1807 * @return number of msrs set successfully. 1785 * @return number of msrs set successfully.
1808 */ 1786 */
1809static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs, 1787static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1810 int (*do_msr)(struct kvm_vcpu *vcpu, 1788 int (*do_msr)(struct kvm_vcpu *vcpu,
1811 unsigned index, u64 *data), 1789 unsigned index, u64 *data),
1812 int writeback) 1790 int writeback)
@@ -1834,7 +1812,7 @@ static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
1834 if (copy_from_user(entries, user_msrs->entries, size)) 1812 if (copy_from_user(entries, user_msrs->entries, size))
1835 goto out_free; 1813 goto out_free;
1836 1814
1837 r = n = __msr_io(kvm, &msrs, entries, do_msr); 1815 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1838 if (r < 0) 1816 if (r < 0)
1839 goto out_free; 1817 goto out_free;
1840 1818
@@ -1853,38 +1831,31 @@ out:
1853/* 1831/*
1854 * Translate a guest virtual address to a guest physical address. 1832 * Translate a guest virtual address to a guest physical address.
1855 */ 1833 */
1856static int kvm_vm_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr) 1834static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1835 struct kvm_translation *tr)
1857{ 1836{
1858 unsigned long vaddr = tr->linear_address; 1837 unsigned long vaddr = tr->linear_address;
1859 struct kvm_vcpu *vcpu;
1860 gpa_t gpa; 1838 gpa_t gpa;
1861 1839
1862 vcpu = vcpu_load(kvm, tr->vcpu); 1840 vcpu_load(vcpu);
1863 if (!vcpu) 1841 spin_lock(&vcpu->kvm->lock);
1864 return -ENOENT;
1865 spin_lock(&kvm->lock);
1866 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 1842 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
1867 tr->physical_address = gpa; 1843 tr->physical_address = gpa;
1868 tr->valid = gpa != UNMAPPED_GVA; 1844 tr->valid = gpa != UNMAPPED_GVA;
1869 tr->writeable = 1; 1845 tr->writeable = 1;
1870 tr->usermode = 0; 1846 tr->usermode = 0;
1871 spin_unlock(&kvm->lock); 1847 spin_unlock(&vcpu->kvm->lock);
1872 vcpu_put(vcpu); 1848 vcpu_put(vcpu);
1873 1849
1874 return 0; 1850 return 0;
1875} 1851}
1876 1852
1877static int kvm_vm_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq) 1853static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1854 struct kvm_interrupt *irq)
1878{ 1855{
1879 struct kvm_vcpu *vcpu;
1880
1881 if (!valid_vcpu(irq->vcpu))
1882 return -EINVAL;
1883 if (irq->irq < 0 || irq->irq >= 256) 1856 if (irq->irq < 0 || irq->irq >= 256)
1884 return -EINVAL; 1857 return -EINVAL;
1885 vcpu = vcpu_load(kvm, irq->vcpu); 1858 vcpu_load(vcpu);
1886 if (!vcpu)
1887 return -ENOENT;
1888 1859
1889 set_bit(irq->irq, vcpu->irq_pending); 1860 set_bit(irq->irq, vcpu->irq_pending);
1890 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); 1861 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
@@ -1894,17 +1865,12 @@ static int kvm_vm_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
1894 return 0; 1865 return 0;
1895} 1866}
1896 1867
1897static int kvm_vm_ioctl_debug_guest(struct kvm *kvm, 1868static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1898 struct kvm_debug_guest *dbg) 1869 struct kvm_debug_guest *dbg)
1899{ 1870{
1900 struct kvm_vcpu *vcpu;
1901 int r; 1871 int r;
1902 1872
1903 if (!valid_vcpu(dbg->vcpu)) 1873 vcpu_load(vcpu);
1904 return -EINVAL;
1905 vcpu = vcpu_load(kvm, dbg->vcpu);
1906 if (!vcpu)
1907 return -ENOENT;
1908 1874
1909 r = kvm_arch_ops->set_guest_debug(vcpu, dbg); 1875 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
1910 1876
@@ -1913,6 +1879,59 @@ static int kvm_vm_ioctl_debug_guest(struct kvm *kvm,
1913 return r; 1879 return r;
1914} 1880}
1915 1881
1882static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1883{
1884 struct kvm_vcpu *vcpu = filp->private_data;
1885
1886 fput(vcpu->kvm->filp);
1887 return 0;
1888}
1889
1890static struct file_operations kvm_vcpu_fops = {
1891 .release = kvm_vcpu_release,
1892 .unlocked_ioctl = kvm_vcpu_ioctl,
1893 .compat_ioctl = kvm_vcpu_ioctl,
1894};
1895
1896/*
1897 * Allocates an inode for the vcpu.
1898 */
1899static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1900{
1901 int fd, r;
1902 struct inode *inode;
1903 struct file *file;
1904
1905 atomic_inc(&vcpu->kvm->filp->f_count);
1906 inode = kvmfs_inode(&kvm_vcpu_fops);
1907 if (IS_ERR(inode)) {
1908 r = PTR_ERR(inode);
1909 goto out1;
1910 }
1911
1912 file = kvmfs_file(inode, vcpu);
1913 if (IS_ERR(file)) {
1914 r = PTR_ERR(file);
1915 goto out2;
1916 }
1917
1918 r = get_unused_fd();
1919 if (r < 0)
1920 goto out3;
1921 fd = r;
1922 fd_install(fd, file);
1923
1924 return fd;
1925
1926out3:
1927 fput(file);
1928out2:
1929 iput(inode);
1930out1:
1931 fput(vcpu->kvm->filp);
1932 return r;
1933}
1934
1916/* 1935/*
1917 * Creates some virtual cpus. Good luck creating more than one. 1936 * Creates some virtual cpus. Good luck creating more than one.
1918 */ 1937 */
@@ -1955,7 +1974,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1955 if (r < 0) 1974 if (r < 0)
1956 goto out_free_vcpus; 1975 goto out_free_vcpus;
1957 1976
1958 return 0; 1977 r = create_vcpu_fd(vcpu);
1978 if (r < 0)
1979 goto out_free_vcpus;
1980
1981 return r;
1959 1982
1960out_free_vcpus: 1983out_free_vcpus:
1961 kvm_free_vcpu(vcpu); 1984 kvm_free_vcpu(vcpu);
@@ -1964,26 +1987,21 @@ out:
1964 return r; 1987 return r;
1965} 1988}
1966 1989
1967static long kvm_vm_ioctl(struct file *filp, 1990static long kvm_vcpu_ioctl(struct file *filp,
1968 unsigned int ioctl, unsigned long arg) 1991 unsigned int ioctl, unsigned long arg)
1969{ 1992{
1970 struct kvm *kvm = filp->private_data; 1993 struct kvm_vcpu *vcpu = filp->private_data;
1971 void __user *argp = (void __user *)arg; 1994 void __user *argp = (void __user *)arg;
1972 int r = -EINVAL; 1995 int r = -EINVAL;
1973 1996
1974 switch (ioctl) { 1997 switch (ioctl) {
1975 case KVM_CREATE_VCPU:
1976 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1977 if (r)
1978 goto out;
1979 break;
1980 case KVM_RUN: { 1998 case KVM_RUN: {
1981 struct kvm_run kvm_run; 1999 struct kvm_run kvm_run;
1982 2000
1983 r = -EFAULT; 2001 r = -EFAULT;
1984 if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) 2002 if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
1985 goto out; 2003 goto out;
1986 r = kvm_vm_ioctl_run(kvm, &kvm_run); 2004 r = kvm_vcpu_ioctl_run(vcpu, &kvm_run);
1987 if (r < 0 && r != -EINTR) 2005 if (r < 0 && r != -EINTR)
1988 goto out; 2006 goto out;
1989 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { 2007 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
@@ -1995,10 +2013,8 @@ static long kvm_vm_ioctl(struct file *filp,
1995 case KVM_GET_REGS: { 2013 case KVM_GET_REGS: {
1996 struct kvm_regs kvm_regs; 2014 struct kvm_regs kvm_regs;
1997 2015
1998 r = -EFAULT; 2016 memset(&kvm_regs, 0, sizeof kvm_regs);
1999 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 2017 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
2000 goto out;
2001 r = kvm_vm_ioctl_get_regs(kvm, &kvm_regs);
2002 if (r) 2018 if (r)
2003 goto out; 2019 goto out;
2004 r = -EFAULT; 2020 r = -EFAULT;
@@ -2013,7 +2029,7 @@ static long kvm_vm_ioctl(struct file *filp,
2013 r = -EFAULT; 2029 r = -EFAULT;
2014 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 2030 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
2015 goto out; 2031 goto out;
2016 r = kvm_vm_ioctl_set_regs(kvm, &kvm_regs); 2032 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
2017 if (r) 2033 if (r)
2018 goto out; 2034 goto out;
2019 r = 0; 2035 r = 0;
@@ -2022,10 +2038,8 @@ static long kvm_vm_ioctl(struct file *filp,
2022 case KVM_GET_SREGS: { 2038 case KVM_GET_SREGS: {
2023 struct kvm_sregs kvm_sregs; 2039 struct kvm_sregs kvm_sregs;
2024 2040
2025 r = -EFAULT; 2041 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2026 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 2042 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
2027 goto out;
2028 r = kvm_vm_ioctl_get_sregs(kvm, &kvm_sregs);
2029 if (r) 2043 if (r)
2030 goto out; 2044 goto out;
2031 r = -EFAULT; 2045 r = -EFAULT;
@@ -2040,7 +2054,7 @@ static long kvm_vm_ioctl(struct file *filp,
2040 r = -EFAULT; 2054 r = -EFAULT;
2041 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 2055 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
2042 goto out; 2056 goto out;
2043 r = kvm_vm_ioctl_set_sregs(kvm, &kvm_sregs); 2057 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
2044 if (r) 2058 if (r)
2045 goto out; 2059 goto out;
2046 r = 0; 2060 r = 0;
@@ -2052,7 +2066,7 @@ static long kvm_vm_ioctl(struct file *filp,
2052 r = -EFAULT; 2066 r = -EFAULT;
2053 if (copy_from_user(&tr, argp, sizeof tr)) 2067 if (copy_from_user(&tr, argp, sizeof tr))
2054 goto out; 2068 goto out;
2055 r = kvm_vm_ioctl_translate(kvm, &tr); 2069 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
2056 if (r) 2070 if (r)
2057 goto out; 2071 goto out;
2058 r = -EFAULT; 2072 r = -EFAULT;
@@ -2067,7 +2081,7 @@ static long kvm_vm_ioctl(struct file *filp,
2067 r = -EFAULT; 2081 r = -EFAULT;
2068 if (copy_from_user(&irq, argp, sizeof irq)) 2082 if (copy_from_user(&irq, argp, sizeof irq))
2069 goto out; 2083 goto out;
2070 r = kvm_vm_ioctl_interrupt(kvm, &irq); 2084 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2071 if (r) 2085 if (r)
2072 goto out; 2086 goto out;
2073 r = 0; 2087 r = 0;
@@ -2079,12 +2093,38 @@ static long kvm_vm_ioctl(struct file *filp,
2079 r = -EFAULT; 2093 r = -EFAULT;
2080 if (copy_from_user(&dbg, argp, sizeof dbg)) 2094 if (copy_from_user(&dbg, argp, sizeof dbg))
2081 goto out; 2095 goto out;
2082 r = kvm_vm_ioctl_debug_guest(kvm, &dbg); 2096 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
2083 if (r) 2097 if (r)
2084 goto out; 2098 goto out;
2085 r = 0; 2099 r = 0;
2086 break; 2100 break;
2087 } 2101 }
2102 case KVM_GET_MSRS:
2103 r = msr_io(vcpu, argp, get_msr, 1);
2104 break;
2105 case KVM_SET_MSRS:
2106 r = msr_io(vcpu, argp, do_set_msr, 0);
2107 break;
2108 default:
2109 ;
2110 }
2111out:
2112 return r;
2113}
2114
2115static long kvm_vm_ioctl(struct file *filp,
2116 unsigned int ioctl, unsigned long arg)
2117{
2118 struct kvm *kvm = filp->private_data;
2119 void __user *argp = (void __user *)arg;
2120 int r = -EINVAL;
2121
2122 switch (ioctl) {
2123 case KVM_CREATE_VCPU:
2124 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2125 if (r < 0)
2126 goto out;
2127 break;
2088 case KVM_SET_MEMORY_REGION: { 2128 case KVM_SET_MEMORY_REGION: {
2089 struct kvm_memory_region kvm_mem; 2129 struct kvm_memory_region kvm_mem;
2090 2130
@@ -2107,12 +2147,6 @@ static long kvm_vm_ioctl(struct file *filp,
2107 goto out; 2147 goto out;
2108 break; 2148 break;
2109 } 2149 }
2110 case KVM_GET_MSRS:
2111 r = msr_io(kvm, argp, get_msr, 1);
2112 break;
2113 case KVM_SET_MSRS:
2114 r = msr_io(kvm, argp, do_set_msr, 0);
2115 break;
2116 default: 2150 default:
2117 ; 2151 ;
2118 } 2152 }
@@ -2182,6 +2216,7 @@ static int kvm_dev_ioctl_create_vm(void)
2182 r = PTR_ERR(file); 2216 r = PTR_ERR(file);
2183 goto out3; 2217 goto out3;
2184 } 2218 }
2219 kvm->filp = file;
2185 2220
2186 r = get_unused_fd(); 2221 r = get_unused_fd();
2187 if (r < 0) 2222 if (r < 0)
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 5a200c0b4b48..3d8ea7ac2ecc 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -600,10 +600,9 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
600 kfree(vcpu->svm); 600 kfree(vcpu->svm);
601} 601}
602 602
603static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu) 603static void svm_vcpu_load(struct kvm_vcpu *vcpu)
604{ 604{
605 get_cpu(); 605 get_cpu();
606 return vcpu;
607} 606}
608 607
609static void svm_vcpu_put(struct kvm_vcpu *vcpu) 608static void svm_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index ff956a6302ec..c07178e61122 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -204,7 +204,7 @@ static void vmcs_write64(unsigned long field, u64 value)
204 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 204 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
205 * vcpu mutex is already taken. 205 * vcpu mutex is already taken.
206 */ 206 */
207static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu) 207static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
208{ 208{
209 u64 phys_addr = __pa(vcpu->vmcs); 209 u64 phys_addr = __pa(vcpu->vmcs);
210 int cpu; 210 int cpu;
@@ -242,7 +242,6 @@ static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu)
242 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 242 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
243 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 243 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
244 } 244 }
245 return vcpu;
246} 245}
247 246
248static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 247static void vmx_vcpu_put(struct kvm_vcpu *vcpu)