aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
commit66dcff86ba40eebb5133cccf450878f2bba102ef (patch)
treee7eb49ad9316989a529b00303d2dd2cffa61a7f5 /virt
parent91ed9e8a32d9a76adc59c83f8b40024076cf8a02 (diff)
parent2c4aa55a6af070262cca425745e8e54310e96b8d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini: "3.19 changes for KVM: - spring cleaning: removed support for IA64, and for hardware- assisted virtualization on the PPC970 - ARM, PPC, s390 all had only small fixes For x86: - small performance improvements (though only on weird guests) - usual round of hardware-compliancy fixes from Nadav - APICv fixes - XSAVES support for hosts and guests. XSAVES hosts were broken because the (non-KVM) XSAVES patches inadvertently changed the KVM userspace ABI whenever XSAVES was enabled; hence, this part is going to stable. Guest support is just a matter of exposing the feature and CPUID leaves support" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits) KVM: move APIC types to arch/x86/ KVM: PPC: Book3S: Enable in-kernel XICS emulation by default KVM: PPC: Book3S HV: Improve H_CONFER implementation KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register KVM: PPC: Book3S HV: Remove code for PPC970 processors KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions KVM: PPC: Book3S HV: Simplify locking around stolen time calculations arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function arch: powerpc: kvm: book3s_pr.c: Remove unused function arch: powerpc: kvm: book3s.c: Remove some unused functions arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked KVM: PPC: Book3S HV: ptes are big endian KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI KVM: PPC: Book3S HV: Fix KSM memory corruption KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI KVM: PPC: Book3S HV: Fix computation of tlbie operand KVM: PPC: Book3S HV: Add missing HPTE unlock KVM: PPC: BookE: Improve irq inject tracepoint arm/arm64: KVM: Require in-kernel vgic for the arch timers ...
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c30
-rw-r--r--virt/kvm/arm/vgic.c116
-rw-r--r--virt/kvm/assigned-dev.c1026
-rw-r--r--virt/kvm/eventfd.c7
-rw-r--r--virt/kvm/ioapic.c687
-rw-r--r--virt/kvm/ioapic.h104
-rw-r--r--virt/kvm/iommu.c358
-rw-r--r--virt/kvm/irq_comm.c369
-rw-r--r--virt/kvm/kvm_main.c133
9 files changed, 140 insertions, 2690 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 22fa819a9b6a..1c0772b340d8 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
61 61
62static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) 62static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
63{ 63{
64 int ret;
64 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 65 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
65 66
66 timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; 67 timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
67 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, 68 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
68 timer->irq->irq, 69 timer->irq->irq,
69 timer->irq->level); 70 timer->irq->level);
71 WARN_ON(ret);
70} 72}
71 73
72static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 74static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
307 timer_disarm(timer); 309 timer_disarm(timer);
308} 310}
309 311
310int kvm_timer_init(struct kvm *kvm) 312void kvm_timer_enable(struct kvm *kvm)
311{ 313{
312 if (timecounter && wqueue) { 314 if (kvm->arch.timer.enabled)
313 kvm->arch.timer.cntvoff = kvm_phys_timer_read(); 315 return;
316
317 /*
318 * There is a potential race here between VCPUs starting for the first
319 * time, which may be enabling the timer multiple times. That doesn't
320 * hurt though, because we're just setting a variable to the same
321 * variable that it already was. The important thing is that all
322 * VCPUs have the enabled variable set, before entering the guest, if
323 * the arch timers are enabled.
324 */
325 if (timecounter && wqueue)
314 kvm->arch.timer.enabled = 1; 326 kvm->arch.timer.enabled = 1;
315 } 327}
316 328
317 return 0; 329void kvm_timer_init(struct kvm *kvm)
330{
331 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
318} 332}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index aacdb59f30de..03affc7bf453 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -91,6 +91,7 @@
91#define ACCESS_WRITE_VALUE (3 << 1) 91#define ACCESS_WRITE_VALUE (3 << 1)
92#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 92#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
93 93
94static int vgic_init(struct kvm *kvm);
94static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 95static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); 96static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96static void vgic_update_state(struct kvm *kvm); 97static void vgic_update_state(struct kvm *kvm);
@@ -1607,7 +1608,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1607 } 1608 }
1608} 1609}
1609 1610
1610static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, 1611static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1611 unsigned int irq_num, bool level) 1612 unsigned int irq_num, bool level)
1612{ 1613{
1613 struct vgic_dist *dist = &kvm->arch.vgic; 1614 struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1643,9 +1644,10 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1643 vgic_dist_irq_clear_level(vcpu, irq_num); 1644 vgic_dist_irq_clear_level(vcpu, irq_num);
1644 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) 1645 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1645 vgic_dist_irq_clear_pending(vcpu, irq_num); 1646 vgic_dist_irq_clear_pending(vcpu, irq_num);
1646 } else {
1647 vgic_dist_irq_clear_pending(vcpu, irq_num);
1648 } 1647 }
1648
1649 ret = false;
1650 goto out;
1649 } 1651 }
1650 1652
1651 enabled = vgic_irq_is_enabled(vcpu, irq_num); 1653 enabled = vgic_irq_is_enabled(vcpu, irq_num);
@@ -1672,7 +1674,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1672out: 1674out:
1673 spin_unlock(&dist->lock); 1675 spin_unlock(&dist->lock);
1674 1676
1675 return ret; 1677 return ret ? cpuid : -EINVAL;
1676} 1678}
1677 1679
1678/** 1680/**
@@ -1692,11 +1694,26 @@ out:
1692int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, 1694int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1693 bool level) 1695 bool level)
1694{ 1696{
1695 if (likely(vgic_initialized(kvm)) && 1697 int ret = 0;
1696 vgic_update_irq_pending(kvm, cpuid, irq_num, level)) 1698 int vcpu_id;
1697 vgic_kick_vcpus(kvm);
1698 1699
1699 return 0; 1700 if (unlikely(!vgic_initialized(kvm))) {
1701 mutex_lock(&kvm->lock);
1702 ret = vgic_init(kvm);
1703 mutex_unlock(&kvm->lock);
1704
1705 if (ret)
1706 goto out;
1707 }
1708
1709 vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
1710 if (vcpu_id >= 0) {
1711 /* kick the specified vcpu */
1712 kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
1713 }
1714
1715out:
1716 return ret;
1700} 1717}
1701 1718
1702static irqreturn_t vgic_maintenance_handler(int irq, void *data) 1719static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1726,39 +1743,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1726 1743
1727 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1744 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1728 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1745 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1729 vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); 1746 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1730 1747
1731 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { 1748 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1732 kvm_vgic_vcpu_destroy(vcpu); 1749 kvm_vgic_vcpu_destroy(vcpu);
1733 return -ENOMEM; 1750 return -ENOMEM;
1734 } 1751 }
1735 1752
1736 return 0; 1753 memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
1737}
1738
1739/**
1740 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1741 * @vcpu: pointer to the vcpu struct
1742 *
1743 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1744 * this vcpu and enable the VGIC for this VCPU
1745 */
1746static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1747{
1748 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1749 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1750 int i;
1751
1752 for (i = 0; i < dist->nr_irqs; i++) {
1753 if (i < VGIC_NR_PPIS)
1754 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1755 vcpu->vcpu_id, i, 1);
1756 if (i < VGIC_NR_PRIVATE_IRQS)
1757 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1758 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1759
1760 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1761 }
1762 1754
1763 /* 1755 /*
1764 * Store the number of LRs per vcpu, so we don't have to go 1756 * Store the number of LRs per vcpu, so we don't have to go
@@ -1767,7 +1759,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1767 */ 1759 */
1768 vgic_cpu->nr_lr = vgic->nr_lr; 1760 vgic_cpu->nr_lr = vgic->nr_lr;
1769 1761
1770 vgic_enable(vcpu); 1762 return 0;
1771} 1763}
1772 1764
1773void kvm_vgic_destroy(struct kvm *kvm) 1765void kvm_vgic_destroy(struct kvm *kvm)
@@ -1798,20 +1790,21 @@ void kvm_vgic_destroy(struct kvm *kvm)
1798 dist->irq_spi_cpu = NULL; 1790 dist->irq_spi_cpu = NULL;
1799 dist->irq_spi_target = NULL; 1791 dist->irq_spi_target = NULL;
1800 dist->irq_pending_on_cpu = NULL; 1792 dist->irq_pending_on_cpu = NULL;
1793 dist->nr_cpus = 0;
1801} 1794}
1802 1795
1803/* 1796/*
1804 * Allocate and initialize the various data structures. Must be called 1797 * Allocate and initialize the various data structures. Must be called
1805 * with kvm->lock held! 1798 * with kvm->lock held!
1806 */ 1799 */
1807static int vgic_init_maps(struct kvm *kvm) 1800static int vgic_init(struct kvm *kvm)
1808{ 1801{
1809 struct vgic_dist *dist = &kvm->arch.vgic; 1802 struct vgic_dist *dist = &kvm->arch.vgic;
1810 struct kvm_vcpu *vcpu; 1803 struct kvm_vcpu *vcpu;
1811 int nr_cpus, nr_irqs; 1804 int nr_cpus, nr_irqs;
1812 int ret, i; 1805 int ret, i, vcpu_id;
1813 1806
1814 if (dist->nr_cpus) /* Already allocated */ 1807 if (vgic_initialized(kvm))
1815 return 0; 1808 return 0;
1816 1809
1817 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); 1810 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
@@ -1859,16 +1852,28 @@ static int vgic_init_maps(struct kvm *kvm)
1859 if (ret) 1852 if (ret)
1860 goto out; 1853 goto out;
1861 1854
1862 kvm_for_each_vcpu(i, vcpu, kvm) { 1855 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
1856 vgic_set_target_reg(kvm, 0, i);
1857
1858 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
1863 ret = vgic_vcpu_init_maps(vcpu, nr_irqs); 1859 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1864 if (ret) { 1860 if (ret) {
1865 kvm_err("VGIC: Failed to allocate vcpu memory\n"); 1861 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1866 break; 1862 break;
1867 } 1863 }
1868 }
1869 1864
1870 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) 1865 for (i = 0; i < dist->nr_irqs; i++) {
1871 vgic_set_target_reg(kvm, 0, i); 1866 if (i < VGIC_NR_PPIS)
1867 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1868 vcpu->vcpu_id, i, 1);
1869 if (i < VGIC_NR_PRIVATE_IRQS)
1870 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1871 vcpu->vcpu_id, i,
1872 VGIC_CFG_EDGE);
1873 }
1874
1875 vgic_enable(vcpu);
1876 }
1872 1877
1873out: 1878out:
1874 if (ret) 1879 if (ret)
@@ -1878,25 +1883,23 @@ out:
1878} 1883}
1879 1884
1880/** 1885/**
1881 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs 1886 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
1882 * @kvm: pointer to the kvm struct 1887 * @kvm: pointer to the kvm struct
1883 * 1888 *
1884 * Map the virtual CPU interface into the VM before running any VCPUs. We 1889 * Map the virtual CPU interface into the VM before running any VCPUs. We
1885 * can't do this at creation time, because user space must first set the 1890 * can't do this at creation time, because user space must first set the
1886 * virtual CPU interface address in the guest physical address space. Also 1891 * virtual CPU interface address in the guest physical address space.
1887 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1888 */ 1892 */
1889int kvm_vgic_init(struct kvm *kvm) 1893int kvm_vgic_map_resources(struct kvm *kvm)
1890{ 1894{
1891 struct kvm_vcpu *vcpu; 1895 int ret = 0;
1892 int ret = 0, i;
1893 1896
1894 if (!irqchip_in_kernel(kvm)) 1897 if (!irqchip_in_kernel(kvm))
1895 return 0; 1898 return 0;
1896 1899
1897 mutex_lock(&kvm->lock); 1900 mutex_lock(&kvm->lock);
1898 1901
1899 if (vgic_initialized(kvm)) 1902 if (vgic_ready(kvm))
1900 goto out; 1903 goto out;
1901 1904
1902 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || 1905 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
@@ -1906,7 +1909,11 @@ int kvm_vgic_init(struct kvm *kvm)
1906 goto out; 1909 goto out;
1907 } 1910 }
1908 1911
1909 ret = vgic_init_maps(kvm); 1912 /*
1913 * Initialize the vgic if this hasn't already been done on demand by
1914 * accessing the vgic state from userspace.
1915 */
1916 ret = vgic_init(kvm);
1910 if (ret) { 1917 if (ret) {
1911 kvm_err("Unable to allocate maps\n"); 1918 kvm_err("Unable to allocate maps\n");
1912 goto out; 1919 goto out;
@@ -1920,9 +1927,6 @@ int kvm_vgic_init(struct kvm *kvm)
1920 goto out; 1927 goto out;
1921 } 1928 }
1922 1929
1923 kvm_for_each_vcpu(i, vcpu, kvm)
1924 kvm_vgic_vcpu_init(vcpu);
1925
1926 kvm->arch.vgic.ready = true; 1930 kvm->arch.vgic.ready = true;
1927out: 1931out:
1928 if (ret) 1932 if (ret)
@@ -2167,7 +2171,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
2167 2171
2168 mutex_lock(&dev->kvm->lock); 2172 mutex_lock(&dev->kvm->lock);
2169 2173
2170 ret = vgic_init_maps(dev->kvm); 2174 ret = vgic_init(dev->kvm);
2171 if (ret) 2175 if (ret)
2172 goto out; 2176 goto out;
2173 2177
@@ -2289,7 +2293,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2289 2293
2290 mutex_lock(&dev->kvm->lock); 2294 mutex_lock(&dev->kvm->lock);
2291 2295
2292 if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) 2296 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2293 ret = -EBUSY; 2297 ret = -EBUSY;
2294 else 2298 else
2295 dev->kvm->arch.vgic.nr_irqs = val; 2299 dev->kvm->arch.vgic.nr_irqs = val;
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
deleted file mode 100644
index e05000e200d2..000000000000
--- a/virt/kvm/assigned-dev.c
+++ /dev/null
@@ -1,1026 +0,0 @@
1/*
2 * Kernel-based Virtual Machine - device assignment support
3 *
4 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#include <linux/kvm_host.h>
12#include <linux/kvm.h>
13#include <linux/uaccess.h>
14#include <linux/vmalloc.h>
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/pci.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include <linux/namei.h>
21#include <linux/fs.h>
22#include "irq.h"
23
24static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
25 int assigned_dev_id)
26{
27 struct list_head *ptr;
28 struct kvm_assigned_dev_kernel *match;
29
30 list_for_each(ptr, head) {
31 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
32 if (match->assigned_dev_id == assigned_dev_id)
33 return match;
34 }
35 return NULL;
36}
37
38static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
39 *assigned_dev, int irq)
40{
41 int i, index;
42 struct msix_entry *host_msix_entries;
43
44 host_msix_entries = assigned_dev->host_msix_entries;
45
46 index = -1;
47 for (i = 0; i < assigned_dev->entries_nr; i++)
48 if (irq == host_msix_entries[i].vector) {
49 index = i;
50 break;
51 }
52 if (index < 0)
53 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
54
55 return index;
56}
57
58static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id)
59{
60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61 int ret;
62
63 spin_lock(&assigned_dev->intx_lock);
64 if (pci_check_and_mask_intx(assigned_dev->dev)) {
65 assigned_dev->host_irq_disabled = true;
66 ret = IRQ_WAKE_THREAD;
67 } else
68 ret = IRQ_NONE;
69 spin_unlock(&assigned_dev->intx_lock);
70
71 return ret;
72}
73
74static void
75kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
76 int vector)
77{
78 if (unlikely(assigned_dev->irq_requested_type &
79 KVM_DEV_IRQ_GUEST_INTX)) {
80 spin_lock(&assigned_dev->intx_mask_lock);
81 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
82 kvm_set_irq(assigned_dev->kvm,
83 assigned_dev->irq_source_id, vector, 1,
84 false);
85 spin_unlock(&assigned_dev->intx_mask_lock);
86 } else
87 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
88 vector, 1, false);
89}
90
91static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
92{
93 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
94
95 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
96 spin_lock_irq(&assigned_dev->intx_lock);
97 disable_irq_nosync(irq);
98 assigned_dev->host_irq_disabled = true;
99 spin_unlock_irq(&assigned_dev->intx_lock);
100 }
101
102 kvm_assigned_dev_raise_guest_irq(assigned_dev,
103 assigned_dev->guest_irq);
104
105 return IRQ_HANDLED;
106}
107
108#ifdef __KVM_HAVE_MSI
109static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
110{
111 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
112 int ret = kvm_set_irq_inatomic(assigned_dev->kvm,
113 assigned_dev->irq_source_id,
114 assigned_dev->guest_irq, 1);
115 return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
116}
117
118static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
119{
120 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
121
122 kvm_assigned_dev_raise_guest_irq(assigned_dev,
123 assigned_dev->guest_irq);
124
125 return IRQ_HANDLED;
126}
127#endif
128
129#ifdef __KVM_HAVE_MSIX
130static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
131{
132 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
133 int index = find_index_from_host_irq(assigned_dev, irq);
134 u32 vector;
135 int ret = 0;
136
137 if (index >= 0) {
138 vector = assigned_dev->guest_msix_entries[index].vector;
139 ret = kvm_set_irq_inatomic(assigned_dev->kvm,
140 assigned_dev->irq_source_id,
141 vector, 1);
142 }
143
144 return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
145}
146
147static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
148{
149 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
150 int index = find_index_from_host_irq(assigned_dev, irq);
151 u32 vector;
152
153 if (index >= 0) {
154 vector = assigned_dev->guest_msix_entries[index].vector;
155 kvm_assigned_dev_raise_guest_irq(assigned_dev, vector);
156 }
157
158 return IRQ_HANDLED;
159}
160#endif
161
162/* Ack the irq line for an assigned device */
163static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
164{
165 struct kvm_assigned_dev_kernel *dev =
166 container_of(kian, struct kvm_assigned_dev_kernel,
167 ack_notifier);
168
169 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false);
170
171 spin_lock(&dev->intx_mask_lock);
172
173 if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) {
174 bool reassert = false;
175
176 spin_lock_irq(&dev->intx_lock);
177 /*
178 * The guest IRQ may be shared so this ack can come from an
179 * IRQ for another guest device.
180 */
181 if (dev->host_irq_disabled) {
182 if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3))
183 enable_irq(dev->host_irq);
184 else if (!pci_check_and_unmask_intx(dev->dev))
185 reassert = true;
186 dev->host_irq_disabled = reassert;
187 }
188 spin_unlock_irq(&dev->intx_lock);
189
190 if (reassert)
191 kvm_set_irq(dev->kvm, dev->irq_source_id,
192 dev->guest_irq, 1, false);
193 }
194
195 spin_unlock(&dev->intx_mask_lock);
196}
197
198static void deassign_guest_irq(struct kvm *kvm,
199 struct kvm_assigned_dev_kernel *assigned_dev)
200{
201 if (assigned_dev->ack_notifier.gsi != -1)
202 kvm_unregister_irq_ack_notifier(kvm,
203 &assigned_dev->ack_notifier);
204
205 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
206 assigned_dev->guest_irq, 0, false);
207
208 if (assigned_dev->irq_source_id != -1)
209 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
210 assigned_dev->irq_source_id = -1;
211 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
212}
213
214/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
215static void deassign_host_irq(struct kvm *kvm,
216 struct kvm_assigned_dev_kernel *assigned_dev)
217{
218 /*
219 * We disable irq here to prevent further events.
220 *
221 * Notice this maybe result in nested disable if the interrupt type is
222 * INTx, but it's OK for we are going to free it.
223 *
224 * If this function is a part of VM destroy, please ensure that till
225 * now, the kvm state is still legal for probably we also have to wait
226 * on a currently running IRQ handler.
227 */
228 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
229 int i;
230 for (i = 0; i < assigned_dev->entries_nr; i++)
231 disable_irq(assigned_dev->host_msix_entries[i].vector);
232
233 for (i = 0; i < assigned_dev->entries_nr; i++)
234 free_irq(assigned_dev->host_msix_entries[i].vector,
235 assigned_dev);
236
237 assigned_dev->entries_nr = 0;
238 kfree(assigned_dev->host_msix_entries);
239 kfree(assigned_dev->guest_msix_entries);
240 pci_disable_msix(assigned_dev->dev);
241 } else {
242 /* Deal with MSI and INTx */
243 if ((assigned_dev->irq_requested_type &
244 KVM_DEV_IRQ_HOST_INTX) &&
245 (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
246 spin_lock_irq(&assigned_dev->intx_lock);
247 pci_intx(assigned_dev->dev, false);
248 spin_unlock_irq(&assigned_dev->intx_lock);
249 synchronize_irq(assigned_dev->host_irq);
250 } else
251 disable_irq(assigned_dev->host_irq);
252
253 free_irq(assigned_dev->host_irq, assigned_dev);
254
255 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
256 pci_disable_msi(assigned_dev->dev);
257 }
258
259 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
260}
261
262static int kvm_deassign_irq(struct kvm *kvm,
263 struct kvm_assigned_dev_kernel *assigned_dev,
264 unsigned long irq_requested_type)
265{
266 unsigned long guest_irq_type, host_irq_type;
267
268 if (!irqchip_in_kernel(kvm))
269 return -EINVAL;
270 /* no irq assignment to deassign */
271 if (!assigned_dev->irq_requested_type)
272 return -ENXIO;
273
274 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
275 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
276
277 if (host_irq_type)
278 deassign_host_irq(kvm, assigned_dev);
279 if (guest_irq_type)
280 deassign_guest_irq(kvm, assigned_dev);
281
282 return 0;
283}
284
285static void kvm_free_assigned_irq(struct kvm *kvm,
286 struct kvm_assigned_dev_kernel *assigned_dev)
287{
288 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
289}
290
291static void kvm_free_assigned_device(struct kvm *kvm,
292 struct kvm_assigned_dev_kernel
293 *assigned_dev)
294{
295 kvm_free_assigned_irq(kvm, assigned_dev);
296
297 pci_reset_function(assigned_dev->dev);
298 if (pci_load_and_free_saved_state(assigned_dev->dev,
299 &assigned_dev->pci_saved_state))
300 printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
301 __func__, dev_name(&assigned_dev->dev->dev));
302 else
303 pci_restore_state(assigned_dev->dev);
304
305 pci_clear_dev_assigned(assigned_dev->dev);
306
307 pci_release_regions(assigned_dev->dev);
308 pci_disable_device(assigned_dev->dev);
309 pci_dev_put(assigned_dev->dev);
310
311 list_del(&assigned_dev->list);
312 kfree(assigned_dev);
313}
314
315void kvm_free_all_assigned_devices(struct kvm *kvm)
316{
317 struct list_head *ptr, *ptr2;
318 struct kvm_assigned_dev_kernel *assigned_dev;
319
320 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
321 assigned_dev = list_entry(ptr,
322 struct kvm_assigned_dev_kernel,
323 list);
324
325 kvm_free_assigned_device(kvm, assigned_dev);
326 }
327}
328
329static int assigned_device_enable_host_intx(struct kvm *kvm,
330 struct kvm_assigned_dev_kernel *dev)
331{
332 irq_handler_t irq_handler;
333 unsigned long flags;
334
335 dev->host_irq = dev->dev->irq;
336
337 /*
338 * We can only share the IRQ line with other host devices if we are
339 * able to disable the IRQ source at device-level - independently of
340 * the guest driver. Otherwise host devices may suffer from unbounded
341 * IRQ latencies when the guest keeps the line asserted.
342 */
343 if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
344 irq_handler = kvm_assigned_dev_intx;
345 flags = IRQF_SHARED;
346 } else {
347 irq_handler = NULL;
348 flags = IRQF_ONESHOT;
349 }
350 if (request_threaded_irq(dev->host_irq, irq_handler,
351 kvm_assigned_dev_thread_intx, flags,
352 dev->irq_name, dev))
353 return -EIO;
354
355 if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
356 spin_lock_irq(&dev->intx_lock);
357 pci_intx(dev->dev, true);
358 spin_unlock_irq(&dev->intx_lock);
359 }
360 return 0;
361}
362
363#ifdef __KVM_HAVE_MSI
364static int assigned_device_enable_host_msi(struct kvm *kvm,
365 struct kvm_assigned_dev_kernel *dev)
366{
367 int r;
368
369 if (!dev->dev->msi_enabled) {
370 r = pci_enable_msi(dev->dev);
371 if (r)
372 return r;
373 }
374
375 dev->host_irq = dev->dev->irq;
376 if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
377 kvm_assigned_dev_thread_msi, 0,
378 dev->irq_name, dev)) {
379 pci_disable_msi(dev->dev);
380 return -EIO;
381 }
382
383 return 0;
384}
385#endif
386
387#ifdef __KVM_HAVE_MSIX
388static int assigned_device_enable_host_msix(struct kvm *kvm,
389 struct kvm_assigned_dev_kernel *dev)
390{
391 int i, r = -EINVAL;
392
393 /* host_msix_entries and guest_msix_entries should have been
394 * initialized */
395 if (dev->entries_nr == 0)
396 return r;
397
398 r = pci_enable_msix_exact(dev->dev,
399 dev->host_msix_entries, dev->entries_nr);
400 if (r)
401 return r;
402
403 for (i = 0; i < dev->entries_nr; i++) {
404 r = request_threaded_irq(dev->host_msix_entries[i].vector,
405 kvm_assigned_dev_msix,
406 kvm_assigned_dev_thread_msix,
407 0, dev->irq_name, dev);
408 if (r)
409 goto err;
410 }
411
412 return 0;
413err:
414 for (i -= 1; i >= 0; i--)
415 free_irq(dev->host_msix_entries[i].vector, dev);
416 pci_disable_msix(dev->dev);
417 return r;
418}
419
420#endif
421
422static int assigned_device_enable_guest_intx(struct kvm *kvm,
423 struct kvm_assigned_dev_kernel *dev,
424 struct kvm_assigned_irq *irq)
425{
426 dev->guest_irq = irq->guest_irq;
427 dev->ack_notifier.gsi = irq->guest_irq;
428 return 0;
429}
430
431#ifdef __KVM_HAVE_MSI
432static int assigned_device_enable_guest_msi(struct kvm *kvm,
433 struct kvm_assigned_dev_kernel *dev,
434 struct kvm_assigned_irq *irq)
435{
436 dev->guest_irq = irq->guest_irq;
437 dev->ack_notifier.gsi = -1;
438 return 0;
439}
440#endif
441
442#ifdef __KVM_HAVE_MSIX
443static int assigned_device_enable_guest_msix(struct kvm *kvm,
444 struct kvm_assigned_dev_kernel *dev,
445 struct kvm_assigned_irq *irq)
446{
447 dev->guest_irq = irq->guest_irq;
448 dev->ack_notifier.gsi = -1;
449 return 0;
450}
451#endif
452
453static int assign_host_irq(struct kvm *kvm,
454 struct kvm_assigned_dev_kernel *dev,
455 __u32 host_irq_type)
456{
457 int r = -EEXIST;
458
459 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
460 return r;
461
462 snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
463 pci_name(dev->dev));
464
465 switch (host_irq_type) {
466 case KVM_DEV_IRQ_HOST_INTX:
467 r = assigned_device_enable_host_intx(kvm, dev);
468 break;
469#ifdef __KVM_HAVE_MSI
470 case KVM_DEV_IRQ_HOST_MSI:
471 r = assigned_device_enable_host_msi(kvm, dev);
472 break;
473#endif
474#ifdef __KVM_HAVE_MSIX
475 case KVM_DEV_IRQ_HOST_MSIX:
476 r = assigned_device_enable_host_msix(kvm, dev);
477 break;
478#endif
479 default:
480 r = -EINVAL;
481 }
482 dev->host_irq_disabled = false;
483
484 if (!r)
485 dev->irq_requested_type |= host_irq_type;
486
487 return r;
488}
489
490static int assign_guest_irq(struct kvm *kvm,
491 struct kvm_assigned_dev_kernel *dev,
492 struct kvm_assigned_irq *irq,
493 unsigned long guest_irq_type)
494{
495 int id;
496 int r = -EEXIST;
497
498 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
499 return r;
500
501 id = kvm_request_irq_source_id(kvm);
502 if (id < 0)
503 return id;
504
505 dev->irq_source_id = id;
506
507 switch (guest_irq_type) {
508 case KVM_DEV_IRQ_GUEST_INTX:
509 r = assigned_device_enable_guest_intx(kvm, dev, irq);
510 break;
511#ifdef __KVM_HAVE_MSI
512 case KVM_DEV_IRQ_GUEST_MSI:
513 r = assigned_device_enable_guest_msi(kvm, dev, irq);
514 break;
515#endif
516#ifdef __KVM_HAVE_MSIX
517 case KVM_DEV_IRQ_GUEST_MSIX:
518 r = assigned_device_enable_guest_msix(kvm, dev, irq);
519 break;
520#endif
521 default:
522 r = -EINVAL;
523 }
524
525 if (!r) {
526 dev->irq_requested_type |= guest_irq_type;
527 if (dev->ack_notifier.gsi != -1)
528 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
529 } else {
530 kvm_free_irq_source_id(kvm, dev->irq_source_id);
531 dev->irq_source_id = -1;
532 }
533
534 return r;
535}
536
537/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
538static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
539 struct kvm_assigned_irq *assigned_irq)
540{
541 int r = -EINVAL;
542 struct kvm_assigned_dev_kernel *match;
543 unsigned long host_irq_type, guest_irq_type;
544
545 if (!irqchip_in_kernel(kvm))
546 return r;
547
548 mutex_lock(&kvm->lock);
549 r = -ENODEV;
550 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
551 assigned_irq->assigned_dev_id);
552 if (!match)
553 goto out;
554
555 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
556 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
557
558 r = -EINVAL;
559 /* can only assign one type at a time */
560 if (hweight_long(host_irq_type) > 1)
561 goto out;
562 if (hweight_long(guest_irq_type) > 1)
563 goto out;
564 if (host_irq_type == 0 && guest_irq_type == 0)
565 goto out;
566
567 r = 0;
568 if (host_irq_type)
569 r = assign_host_irq(kvm, match, host_irq_type);
570 if (r)
571 goto out;
572
573 if (guest_irq_type)
574 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
575out:
576 mutex_unlock(&kvm->lock);
577 return r;
578}
579
580static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
581 struct kvm_assigned_irq
582 *assigned_irq)
583{
584 int r = -ENODEV;
585 struct kvm_assigned_dev_kernel *match;
586 unsigned long irq_type;
587
588 mutex_lock(&kvm->lock);
589
590 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
591 assigned_irq->assigned_dev_id);
592 if (!match)
593 goto out;
594
595 irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK |
596 KVM_DEV_IRQ_GUEST_MASK);
597 r = kvm_deassign_irq(kvm, match, irq_type);
598out:
599 mutex_unlock(&kvm->lock);
600 return r;
601}
602
603/*
604 * We want to test whether the caller has been granted permissions to
605 * use this device. To be able to configure and control the device,
606 * the user needs access to PCI configuration space and BAR resources.
607 * These are accessed through PCI sysfs. PCI config space is often
608 * passed to the process calling this ioctl via file descriptor, so we
609 * can't rely on access to that file. We can check for permissions
610 * on each of the BAR resource files, which is a pretty clear
611 * indicator that the user has been granted access to the device.
612 */
613static int probe_sysfs_permissions(struct pci_dev *dev)
614{
615#ifdef CONFIG_SYSFS
616 int i;
617 bool bar_found = false;
618
619 for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
620 char *kpath, *syspath;
621 struct path path;
622 struct inode *inode;
623 int r;
624
625 if (!pci_resource_len(dev, i))
626 continue;
627
628 kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
629 if (!kpath)
630 return -ENOMEM;
631
632 /* Per sysfs-rules, sysfs is always at /sys */
633 syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
634 kfree(kpath);
635 if (!syspath)
636 return -ENOMEM;
637
638 r = kern_path(syspath, LOOKUP_FOLLOW, &path);
639 kfree(syspath);
640 if (r)
641 return r;
642
643 inode = path.dentry->d_inode;
644
645 r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
646 path_put(&path);
647 if (r)
648 return r;
649
650 bar_found = true;
651 }
652
653 /* If no resources, probably something special */
654 if (!bar_found)
655 return -EPERM;
656
657 return 0;
658#else
659 return -EINVAL; /* No way to control the device without sysfs */
660#endif
661}
662
663static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
664 struct kvm_assigned_pci_dev *assigned_dev)
665{
666 int r = 0, idx;
667 struct kvm_assigned_dev_kernel *match;
668 struct pci_dev *dev;
669
670 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
671 return -EINVAL;
672
673 mutex_lock(&kvm->lock);
674 idx = srcu_read_lock(&kvm->srcu);
675
676 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
677 assigned_dev->assigned_dev_id);
678 if (match) {
679 /* device already assigned */
680 r = -EEXIST;
681 goto out;
682 }
683
684 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
685 if (match == NULL) {
686 printk(KERN_INFO "%s: Couldn't allocate memory\n",
687 __func__);
688 r = -ENOMEM;
689 goto out;
690 }
691 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
692 assigned_dev->busnr,
693 assigned_dev->devfn);
694 if (!dev) {
695 printk(KERN_INFO "%s: host device not found\n", __func__);
696 r = -EINVAL;
697 goto out_free;
698 }
699
700 /* Don't allow bridges to be assigned */
701 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
702 r = -EPERM;
703 goto out_put;
704 }
705
706 r = probe_sysfs_permissions(dev);
707 if (r)
708 goto out_put;
709
710 if (pci_enable_device(dev)) {
711 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
712 r = -EBUSY;
713 goto out_put;
714 }
715 r = pci_request_regions(dev, "kvm_assigned_device");
716 if (r) {
717 printk(KERN_INFO "%s: Could not get access to device regions\n",
718 __func__);
719 goto out_disable;
720 }
721
722 pci_reset_function(dev);
723 pci_save_state(dev);
724 match->pci_saved_state = pci_store_saved_state(dev);
725 if (!match->pci_saved_state)
726 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
727 __func__, dev_name(&dev->dev));
728
729 if (!pci_intx_mask_supported(dev))
730 assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3;
731
732 match->assigned_dev_id = assigned_dev->assigned_dev_id;
733 match->host_segnr = assigned_dev->segnr;
734 match->host_busnr = assigned_dev->busnr;
735 match->host_devfn = assigned_dev->devfn;
736 match->flags = assigned_dev->flags;
737 match->dev = dev;
738 spin_lock_init(&match->intx_lock);
739 spin_lock_init(&match->intx_mask_lock);
740 match->irq_source_id = -1;
741 match->kvm = kvm;
742 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
743
744 list_add(&match->list, &kvm->arch.assigned_dev_head);
745
746 if (!kvm->arch.iommu_domain) {
747 r = kvm_iommu_map_guest(kvm);
748 if (r)
749 goto out_list_del;
750 }
751 r = kvm_assign_device(kvm, match);
752 if (r)
753 goto out_list_del;
754
755out:
756 srcu_read_unlock(&kvm->srcu, idx);
757 mutex_unlock(&kvm->lock);
758 return r;
759out_list_del:
760 if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
761 printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
762 __func__, dev_name(&dev->dev));
763 list_del(&match->list);
764 pci_release_regions(dev);
765out_disable:
766 pci_disable_device(dev);
767out_put:
768 pci_dev_put(dev);
769out_free:
770 kfree(match);
771 srcu_read_unlock(&kvm->srcu, idx);
772 mutex_unlock(&kvm->lock);
773 return r;
774}
775
776static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
777 struct kvm_assigned_pci_dev *assigned_dev)
778{
779 int r = 0;
780 struct kvm_assigned_dev_kernel *match;
781
782 mutex_lock(&kvm->lock);
783
784 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
785 assigned_dev->assigned_dev_id);
786 if (!match) {
787 printk(KERN_INFO "%s: device hasn't been assigned before, "
788 "so cannot be deassigned\n", __func__);
789 r = -EINVAL;
790 goto out;
791 }
792
793 kvm_deassign_device(kvm, match);
794
795 kvm_free_assigned_device(kvm, match);
796
797out:
798 mutex_unlock(&kvm->lock);
799 return r;
800}
801
802
803#ifdef __KVM_HAVE_MSIX
804static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
805 struct kvm_assigned_msix_nr *entry_nr)
806{
807 int r = 0;
808 struct kvm_assigned_dev_kernel *adev;
809
810 mutex_lock(&kvm->lock);
811
812 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
813 entry_nr->assigned_dev_id);
814 if (!adev) {
815 r = -EINVAL;
816 goto msix_nr_out;
817 }
818
819 if (adev->entries_nr == 0) {
820 adev->entries_nr = entry_nr->entry_nr;
821 if (adev->entries_nr == 0 ||
822 adev->entries_nr > KVM_MAX_MSIX_PER_DEV) {
823 r = -EINVAL;
824 goto msix_nr_out;
825 }
826
827 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
828 entry_nr->entry_nr,
829 GFP_KERNEL);
830 if (!adev->host_msix_entries) {
831 r = -ENOMEM;
832 goto msix_nr_out;
833 }
834 adev->guest_msix_entries =
835 kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
836 GFP_KERNEL);
837 if (!adev->guest_msix_entries) {
838 kfree(adev->host_msix_entries);
839 r = -ENOMEM;
840 goto msix_nr_out;
841 }
842 } else /* Not allowed set MSI-X number twice */
843 r = -EINVAL;
844msix_nr_out:
845 mutex_unlock(&kvm->lock);
846 return r;
847}
848
849static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
850 struct kvm_assigned_msix_entry *entry)
851{
852 int r = 0, i;
853 struct kvm_assigned_dev_kernel *adev;
854
855 mutex_lock(&kvm->lock);
856
857 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
858 entry->assigned_dev_id);
859
860 if (!adev) {
861 r = -EINVAL;
862 goto msix_entry_out;
863 }
864
865 for (i = 0; i < adev->entries_nr; i++)
866 if (adev->guest_msix_entries[i].vector == 0 ||
867 adev->guest_msix_entries[i].entry == entry->entry) {
868 adev->guest_msix_entries[i].entry = entry->entry;
869 adev->guest_msix_entries[i].vector = entry->gsi;
870 adev->host_msix_entries[i].entry = entry->entry;
871 break;
872 }
873 if (i == adev->entries_nr) {
874 r = -ENOSPC;
875 goto msix_entry_out;
876 }
877
878msix_entry_out:
879 mutex_unlock(&kvm->lock);
880
881 return r;
882}
883#endif
884
885static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
886 struct kvm_assigned_pci_dev *assigned_dev)
887{
888 int r = 0;
889 struct kvm_assigned_dev_kernel *match;
890
891 mutex_lock(&kvm->lock);
892
893 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
894 assigned_dev->assigned_dev_id);
895 if (!match) {
896 r = -ENODEV;
897 goto out;
898 }
899
900 spin_lock(&match->intx_mask_lock);
901
902 match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX;
903 match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX;
904
905 if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
906 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
907 kvm_set_irq(match->kvm, match->irq_source_id,
908 match->guest_irq, 0, false);
909 /*
910 * Masking at hardware-level is performed on demand,
911 * i.e. when an IRQ actually arrives at the host.
912 */
913 } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
914 /*
915 * Unmask the IRQ line if required. Unmasking at
916 * device level will be performed by user space.
917 */
918 spin_lock_irq(&match->intx_lock);
919 if (match->host_irq_disabled) {
920 enable_irq(match->host_irq);
921 match->host_irq_disabled = false;
922 }
923 spin_unlock_irq(&match->intx_lock);
924 }
925 }
926
927 spin_unlock(&match->intx_mask_lock);
928
929out:
930 mutex_unlock(&kvm->lock);
931 return r;
932}
933
934long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
935 unsigned long arg)
936{
937 void __user *argp = (void __user *)arg;
938 int r;
939
940 switch (ioctl) {
941 case KVM_ASSIGN_PCI_DEVICE: {
942 struct kvm_assigned_pci_dev assigned_dev;
943
944 r = -EFAULT;
945 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
946 goto out;
947 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
948 if (r)
949 goto out;
950 break;
951 }
952 case KVM_ASSIGN_IRQ: {
953 r = -EOPNOTSUPP;
954 break;
955 }
956 case KVM_ASSIGN_DEV_IRQ: {
957 struct kvm_assigned_irq assigned_irq;
958
959 r = -EFAULT;
960 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
961 goto out;
962 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
963 if (r)
964 goto out;
965 break;
966 }
967 case KVM_DEASSIGN_DEV_IRQ: {
968 struct kvm_assigned_irq assigned_irq;
969
970 r = -EFAULT;
971 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
972 goto out;
973 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
974 if (r)
975 goto out;
976 break;
977 }
978 case KVM_DEASSIGN_PCI_DEVICE: {
979 struct kvm_assigned_pci_dev assigned_dev;
980
981 r = -EFAULT;
982 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
983 goto out;
984 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
985 if (r)
986 goto out;
987 break;
988 }
989#ifdef __KVM_HAVE_MSIX
990 case KVM_ASSIGN_SET_MSIX_NR: {
991 struct kvm_assigned_msix_nr entry_nr;
992 r = -EFAULT;
993 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
994 goto out;
995 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
996 if (r)
997 goto out;
998 break;
999 }
1000 case KVM_ASSIGN_SET_MSIX_ENTRY: {
1001 struct kvm_assigned_msix_entry entry;
1002 r = -EFAULT;
1003 if (copy_from_user(&entry, argp, sizeof entry))
1004 goto out;
1005 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
1006 if (r)
1007 goto out;
1008 break;
1009 }
1010#endif
1011 case KVM_ASSIGN_SET_INTX_MASK: {
1012 struct kvm_assigned_pci_dev assigned_dev;
1013
1014 r = -EFAULT;
1015 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1016 goto out;
1017 r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev);
1018 break;
1019 }
1020 default:
1021 r = -ENOTTY;
1022 break;
1023 }
1024out:
1025 return r;
1026}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b0fb390943c6..148b2392c762 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,9 +36,6 @@
36#include <linux/seqlock.h> 36#include <linux/seqlock.h>
37#include <trace/events/kvm.h> 37#include <trace/events/kvm.h>
38 38
39#ifdef __KVM_HAVE_IOAPIC
40#include "ioapic.h"
41#endif
42#include "iodev.h" 39#include "iodev.h"
43 40
44#ifdef CONFIG_HAVE_KVM_IRQFD 41#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -492,9 +489,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
492 mutex_lock(&kvm->irq_lock); 489 mutex_lock(&kvm->irq_lock);
493 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); 490 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
494 mutex_unlock(&kvm->irq_lock); 491 mutex_unlock(&kvm->irq_lock);
495#ifdef __KVM_HAVE_IOAPIC
496 kvm_vcpu_request_scan_ioapic(kvm); 492 kvm_vcpu_request_scan_ioapic(kvm);
497#endif
498} 493}
499 494
500void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 495void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -504,9 +499,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
504 hlist_del_init_rcu(&kian->link); 499 hlist_del_init_rcu(&kian->link);
505 mutex_unlock(&kvm->irq_lock); 500 mutex_unlock(&kvm->irq_lock);
506 synchronize_srcu(&kvm->irq_srcu); 501 synchronize_srcu(&kvm->irq_srcu);
507#ifdef __KVM_HAVE_IOAPIC
508 kvm_vcpu_request_scan_ioapic(kvm); 502 kvm_vcpu_request_scan_ioapic(kvm);
509#endif
510} 503}
511#endif 504#endif
512 505
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
deleted file mode 100644
index 0ba4057d271b..000000000000
--- a/virt/kvm/ioapic.c
+++ /dev/null
@@ -1,687 +0,0 @@
1/*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
4 *
5 * MandrakeSoft S.A.
6 * 43, rue d'Aboukir
7 * 75002 Paris - France
8 * http://www.linux-mandrake.com/
9 * http://www.mandrakesoft.com/
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * Yunhong Jiang <yunhong.jiang@intel.com>
26 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
27 * Based on Xen 3.1 code.
28 */
29
30#include <linux/kvm_host.h>
31#include <linux/kvm.h>
32#include <linux/mm.h>
33#include <linux/highmem.h>
34#include <linux/smp.h>
35#include <linux/hrtimer.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/export.h>
39#include <asm/processor.h>
40#include <asm/page.h>
41#include <asm/current.h>
42#include <trace/events/kvm.h>
43
44#include "ioapic.h"
45#include "lapic.h"
46#include "irq.h"
47
48#if 0
49#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
50#else
51#define ioapic_debug(fmt, arg...)
52#endif
53static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
54 bool line_status);
55
56static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
57 unsigned long addr,
58 unsigned long length)
59{
60 unsigned long result = 0;
61
62 switch (ioapic->ioregsel) {
63 case IOAPIC_REG_VERSION:
64 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
65 | (IOAPIC_VERSION_ID & 0xff));
66 break;
67
68 case IOAPIC_REG_APIC_ID:
69 case IOAPIC_REG_ARB_ID:
70 result = ((ioapic->id & 0xf) << 24);
71 break;
72
73 default:
74 {
75 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
76 u64 redir_content;
77
78 if (redir_index < IOAPIC_NUM_PINS)
79 redir_content =
80 ioapic->redirtbl[redir_index].bits;
81 else
82 redir_content = ~0ULL;
83
84 result = (ioapic->ioregsel & 0x1) ?
85 (redir_content >> 32) & 0xffffffff :
86 redir_content & 0xffffffff;
87 break;
88 }
89 }
90
91 return result;
92}
93
94static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
95{
96 ioapic->rtc_status.pending_eoi = 0;
97 bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
98}
99
100static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
101
102static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
103{
104 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
105 kvm_rtc_eoi_tracking_restore_all(ioapic);
106}
107
108static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
109{
110 bool new_val, old_val;
111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
112 union kvm_ioapic_redirect_entry *e;
113
114 e = &ioapic->redirtbl[RTC_GSI];
115 if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
116 e->fields.dest_mode))
117 return;
118
119 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
120 old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
121
122 if (new_val == old_val)
123 return;
124
125 if (new_val) {
126 __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
127 ioapic->rtc_status.pending_eoi++;
128 } else {
129 __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
130 ioapic->rtc_status.pending_eoi--;
131 rtc_status_pending_eoi_check_valid(ioapic);
132 }
133}
134
135void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
136{
137 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
138
139 spin_lock(&ioapic->lock);
140 __rtc_irq_eoi_tracking_restore_one(vcpu);
141 spin_unlock(&ioapic->lock);
142}
143
144static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
145{
146 struct kvm_vcpu *vcpu;
147 int i;
148
149 if (RTC_GSI >= IOAPIC_NUM_PINS)
150 return;
151
152 rtc_irq_eoi_tracking_reset(ioapic);
153 kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
154 __rtc_irq_eoi_tracking_restore_one(vcpu);
155}
156
157static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
158{
159 if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
160 --ioapic->rtc_status.pending_eoi;
161 rtc_status_pending_eoi_check_valid(ioapic);
162 }
163}
164
165static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
166{
167 if (ioapic->rtc_status.pending_eoi > 0)
168 return true; /* coalesced */
169
170 return false;
171}
172
173static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
174 int irq_level, bool line_status)
175{
176 union kvm_ioapic_redirect_entry entry;
177 u32 mask = 1 << irq;
178 u32 old_irr;
179 int edge, ret;
180
181 entry = ioapic->redirtbl[irq];
182 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
183
184 if (!irq_level) {
185 ioapic->irr &= ~mask;
186 ret = 1;
187 goto out;
188 }
189
190 /*
191 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
192 * this only happens if a previous edge has not been delivered due
193 * do masking. For level interrupts, the remote_irr field tells
194 * us if the interrupt is waiting for an EOI.
195 *
196 * RTC is special: it is edge-triggered, but userspace likes to know
197 * if it has been already ack-ed via EOI because coalesced RTC
198 * interrupts lead to time drift in Windows guests. So we track
199 * EOI manually for the RTC interrupt.
200 */
201 if (irq == RTC_GSI && line_status &&
202 rtc_irq_check_coalesced(ioapic)) {
203 ret = 0;
204 goto out;
205 }
206
207 old_irr = ioapic->irr;
208 ioapic->irr |= mask;
209 if ((edge && old_irr == ioapic->irr) ||
210 (!edge && entry.fields.remote_irr)) {
211 ret = 0;
212 goto out;
213 }
214
215 ret = ioapic_service(ioapic, irq, line_status);
216
217out:
218 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
219 return ret;
220}
221
222static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
223{
224 u32 idx;
225
226 rtc_irq_eoi_tracking_reset(ioapic);
227 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
228 ioapic_set_irq(ioapic, idx, 1, true);
229
230 kvm_rtc_eoi_tracking_restore_all(ioapic);
231}
232
233
234static void update_handled_vectors(struct kvm_ioapic *ioapic)
235{
236 DECLARE_BITMAP(handled_vectors, 256);
237 int i;
238
239 memset(handled_vectors, 0, sizeof(handled_vectors));
240 for (i = 0; i < IOAPIC_NUM_PINS; ++i)
241 __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
242 memcpy(ioapic->handled_vectors, handled_vectors,
243 sizeof(handled_vectors));
244 smp_wmb();
245}
246
247void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
248 u32 *tmr)
249{
250 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
251 union kvm_ioapic_redirect_entry *e;
252 int index;
253
254 spin_lock(&ioapic->lock);
255 for (index = 0; index < IOAPIC_NUM_PINS; index++) {
256 e = &ioapic->redirtbl[index];
257 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
258 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
259 index == RTC_GSI) {
260 if (kvm_apic_match_dest(vcpu, NULL, 0,
261 e->fields.dest_id, e->fields.dest_mode)) {
262 __set_bit(e->fields.vector,
263 (unsigned long *)eoi_exit_bitmap);
264 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG)
265 __set_bit(e->fields.vector,
266 (unsigned long *)tmr);
267 }
268 }
269 }
270 spin_unlock(&ioapic->lock);
271}
272
273#ifdef CONFIG_X86
274void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
275{
276 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
277
278 if (!ioapic)
279 return;
280 kvm_make_scan_ioapic_request(kvm);
281}
282#else
283void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
284{
285 return;
286}
287#endif
288
289static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
290{
291 unsigned index;
292 bool mask_before, mask_after;
293 union kvm_ioapic_redirect_entry *e;
294
295 switch (ioapic->ioregsel) {
296 case IOAPIC_REG_VERSION:
297 /* Writes are ignored. */
298 break;
299
300 case IOAPIC_REG_APIC_ID:
301 ioapic->id = (val >> 24) & 0xf;
302 break;
303
304 case IOAPIC_REG_ARB_ID:
305 break;
306
307 default:
308 index = (ioapic->ioregsel - 0x10) >> 1;
309
310 ioapic_debug("change redir index %x val %x\n", index, val);
311 if (index >= IOAPIC_NUM_PINS)
312 return;
313 e = &ioapic->redirtbl[index];
314 mask_before = e->fields.mask;
315 if (ioapic->ioregsel & 1) {
316 e->bits &= 0xffffffff;
317 e->bits |= (u64) val << 32;
318 } else {
319 e->bits &= ~0xffffffffULL;
320 e->bits |= (u32) val;
321 e->fields.remote_irr = 0;
322 }
323 update_handled_vectors(ioapic);
324 mask_after = e->fields.mask;
325 if (mask_before != mask_after)
326 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
327 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
328 && ioapic->irr & (1 << index))
329 ioapic_service(ioapic, index, false);
330 kvm_vcpu_request_scan_ioapic(ioapic->kvm);
331 break;
332 }
333}
334
335static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
336{
337 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
338 struct kvm_lapic_irq irqe;
339 int ret;
340
341 if (entry->fields.mask)
342 return -1;
343
344 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
345 "vector=%x trig_mode=%x\n",
346 entry->fields.dest_id, entry->fields.dest_mode,
347 entry->fields.delivery_mode, entry->fields.vector,
348 entry->fields.trig_mode);
349
350 irqe.dest_id = entry->fields.dest_id;
351 irqe.vector = entry->fields.vector;
352 irqe.dest_mode = entry->fields.dest_mode;
353 irqe.trig_mode = entry->fields.trig_mode;
354 irqe.delivery_mode = entry->fields.delivery_mode << 8;
355 irqe.level = 1;
356 irqe.shorthand = 0;
357
358 if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
359 ioapic->irr &= ~(1 << irq);
360
361 if (irq == RTC_GSI && line_status) {
362 /*
363 * pending_eoi cannot ever become negative (see
364 * rtc_status_pending_eoi_check_valid) and the caller
365 * ensures that it is only called if it is >= zero, namely
366 * if rtc_irq_check_coalesced returns false).
367 */
368 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
369 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
370 ioapic->rtc_status.dest_map);
371 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
372 } else
373 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
374
375 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
376 entry->fields.remote_irr = 1;
377
378 return ret;
379}
380
381int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
382 int level, bool line_status)
383{
384 int ret, irq_level;
385
386 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
387
388 spin_lock(&ioapic->lock);
389 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
390 irq_source_id, level);
391 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
392
393 spin_unlock(&ioapic->lock);
394
395 return ret;
396}
397
398void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
399{
400 int i;
401
402 spin_lock(&ioapic->lock);
403 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
404 __clear_bit(irq_source_id, &ioapic->irq_states[i]);
405 spin_unlock(&ioapic->lock);
406}
407
408static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
409{
410 int i;
411 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
412 eoi_inject.work);
413 spin_lock(&ioapic->lock);
414 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
415 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
416
417 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
418 continue;
419
420 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
421 ioapic_service(ioapic, i, false);
422 }
423 spin_unlock(&ioapic->lock);
424}
425
426#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
427
428static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
429 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
430{
431 int i;
432
433 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
434 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
435
436 if (ent->fields.vector != vector)
437 continue;
438
439 if (i == RTC_GSI)
440 rtc_irq_eoi(ioapic, vcpu);
441 /*
442 * We are dropping lock while calling ack notifiers because ack
443 * notifier callbacks for assigned devices call into IOAPIC
444 * recursively. Since remote_irr is cleared only after call
445 * to notifiers if the same vector will be delivered while lock
446 * is dropped it will be put into irr and will be delivered
447 * after ack notifier returns.
448 */
449 spin_unlock(&ioapic->lock);
450 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
451 spin_lock(&ioapic->lock);
452
453 if (trigger_mode != IOAPIC_LEVEL_TRIG)
454 continue;
455
456 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
457 ent->fields.remote_irr = 0;
458 if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
459 ++ioapic->irq_eoi[i];
460 if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
461 /*
462 * Real hardware does not deliver the interrupt
463 * immediately during eoi broadcast, and this
464 * lets a buggy guest make slow progress
465 * even if it does not correctly handle a
466 * level-triggered interrupt. Emulate this
467 * behavior if we detect an interrupt storm.
468 */
469 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
470 ioapic->irq_eoi[i] = 0;
471 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
472 } else {
473 ioapic_service(ioapic, i, false);
474 }
475 } else {
476 ioapic->irq_eoi[i] = 0;
477 }
478 }
479}
480
481bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
482{
483 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
484 smp_rmb();
485 return test_bit(vector, ioapic->handled_vectors);
486}
487
488void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
489{
490 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
491
492 spin_lock(&ioapic->lock);
493 __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
494 spin_unlock(&ioapic->lock);
495}
496
497static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
498{
499 return container_of(dev, struct kvm_ioapic, dev);
500}
501
502static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
503{
504 return ((addr >= ioapic->base_address &&
505 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
506}
507
508static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
509 void *val)
510{
511 struct kvm_ioapic *ioapic = to_ioapic(this);
512 u32 result;
513 if (!ioapic_in_range(ioapic, addr))
514 return -EOPNOTSUPP;
515
516 ioapic_debug("addr %lx\n", (unsigned long)addr);
517 ASSERT(!(addr & 0xf)); /* check alignment */
518
519 addr &= 0xff;
520 spin_lock(&ioapic->lock);
521 switch (addr) {
522 case IOAPIC_REG_SELECT:
523 result = ioapic->ioregsel;
524 break;
525
526 case IOAPIC_REG_WINDOW:
527 result = ioapic_read_indirect(ioapic, addr, len);
528 break;
529
530 default:
531 result = 0;
532 break;
533 }
534 spin_unlock(&ioapic->lock);
535
536 switch (len) {
537 case 8:
538 *(u64 *) val = result;
539 break;
540 case 1:
541 case 2:
542 case 4:
543 memcpy(val, (char *)&result, len);
544 break;
545 default:
546 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
547 }
548 return 0;
549}
550
551static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
552 const void *val)
553{
554 struct kvm_ioapic *ioapic = to_ioapic(this);
555 u32 data;
556 if (!ioapic_in_range(ioapic, addr))
557 return -EOPNOTSUPP;
558
559 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
560 (void*)addr, len, val);
561 ASSERT(!(addr & 0xf)); /* check alignment */
562
563 switch (len) {
564 case 8:
565 case 4:
566 data = *(u32 *) val;
567 break;
568 case 2:
569 data = *(u16 *) val;
570 break;
571 case 1:
572 data = *(u8 *) val;
573 break;
574 default:
575 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
576 return 0;
577 }
578
579 addr &= 0xff;
580 spin_lock(&ioapic->lock);
581 switch (addr) {
582 case IOAPIC_REG_SELECT:
583 ioapic->ioregsel = data & 0xFF; /* 8-bit register */
584 break;
585
586 case IOAPIC_REG_WINDOW:
587 ioapic_write_indirect(ioapic, data);
588 break;
589#ifdef CONFIG_IA64
590 case IOAPIC_REG_EOI:
591 __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG);
592 break;
593#endif
594
595 default:
596 break;
597 }
598 spin_unlock(&ioapic->lock);
599 return 0;
600}
601
602static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
603{
604 int i;
605
606 cancel_delayed_work_sync(&ioapic->eoi_inject);
607 for (i = 0; i < IOAPIC_NUM_PINS; i++)
608 ioapic->redirtbl[i].fields.mask = 1;
609 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
610 ioapic->ioregsel = 0;
611 ioapic->irr = 0;
612 ioapic->id = 0;
613 memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
614 rtc_irq_eoi_tracking_reset(ioapic);
615 update_handled_vectors(ioapic);
616}
617
618static const struct kvm_io_device_ops ioapic_mmio_ops = {
619 .read = ioapic_mmio_read,
620 .write = ioapic_mmio_write,
621};
622
623int kvm_ioapic_init(struct kvm *kvm)
624{
625 struct kvm_ioapic *ioapic;
626 int ret;
627
628 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
629 if (!ioapic)
630 return -ENOMEM;
631 spin_lock_init(&ioapic->lock);
632 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
633 kvm->arch.vioapic = ioapic;
634 kvm_ioapic_reset(ioapic);
635 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
636 ioapic->kvm = kvm;
637 mutex_lock(&kvm->slots_lock);
638 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
639 IOAPIC_MEM_LENGTH, &ioapic->dev);
640 mutex_unlock(&kvm->slots_lock);
641 if (ret < 0) {
642 kvm->arch.vioapic = NULL;
643 kfree(ioapic);
644 }
645
646 return ret;
647}
648
649void kvm_ioapic_destroy(struct kvm *kvm)
650{
651 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
652
653 cancel_delayed_work_sync(&ioapic->eoi_inject);
654 if (ioapic) {
655 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
656 kvm->arch.vioapic = NULL;
657 kfree(ioapic);
658 }
659}
660
661int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
662{
663 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
664 if (!ioapic)
665 return -EINVAL;
666
667 spin_lock(&ioapic->lock);
668 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
669 spin_unlock(&ioapic->lock);
670 return 0;
671}
672
673int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
674{
675 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
676 if (!ioapic)
677 return -EINVAL;
678
679 spin_lock(&ioapic->lock);
680 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
681 ioapic->irr = 0;
682 update_handled_vectors(ioapic);
683 kvm_vcpu_request_scan_ioapic(kvm);
684 kvm_ioapic_inject_all(ioapic, state->irr);
685 spin_unlock(&ioapic->lock);
686 return 0;
687}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
deleted file mode 100644
index e23b70634f1e..000000000000
--- a/virt/kvm/ioapic.h
+++ /dev/null
@@ -1,104 +0,0 @@
1#ifndef __KVM_IO_APIC_H
2#define __KVM_IO_APIC_H
3
4#include <linux/kvm_host.h>
5
6#include "iodev.h"
7
8struct kvm;
9struct kvm_vcpu;
10
11#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS
12#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */
13#define IOAPIC_EDGE_TRIG 0
14#define IOAPIC_LEVEL_TRIG 1
15
16#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
17#define IOAPIC_MEM_LENGTH 0x100
18
19/* Direct registers. */
20#define IOAPIC_REG_SELECT 0x00
21#define IOAPIC_REG_WINDOW 0x10
22#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
23
24/* Indirect registers. */
25#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
26#define IOAPIC_REG_VERSION 0x01
27#define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */
28
29/*ioapic delivery mode*/
30#define IOAPIC_FIXED 0x0
31#define IOAPIC_LOWEST_PRIORITY 0x1
32#define IOAPIC_PMI 0x2
33#define IOAPIC_NMI 0x4
34#define IOAPIC_INIT 0x5
35#define IOAPIC_EXTINT 0x7
36
37#ifdef CONFIG_X86
38#define RTC_GSI 8
39#else
40#define RTC_GSI -1U
41#endif
42
43struct rtc_status {
44 int pending_eoi;
45 DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS);
46};
47
48struct kvm_ioapic {
49 u64 base_address;
50 u32 ioregsel;
51 u32 id;
52 u32 irr;
53 u32 pad;
54 union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS];
55 unsigned long irq_states[IOAPIC_NUM_PINS];
56 struct kvm_io_device dev;
57 struct kvm *kvm;
58 void (*ack_notifier)(void *opaque, int irq);
59 spinlock_t lock;
60 DECLARE_BITMAP(handled_vectors, 256);
61 struct rtc_status rtc_status;
62 struct delayed_work eoi_inject;
63 u32 irq_eoi[IOAPIC_NUM_PINS];
64};
65
66#ifdef DEBUG
67#define ASSERT(x) \
68do { \
69 if (!(x)) { \
70 printk(KERN_EMERG "assertion failed %s: %d: %s\n", \
71 __FILE__, __LINE__, #x); \
72 BUG(); \
73 } \
74} while (0)
75#else
76#define ASSERT(x) do { } while (0)
77#endif
78
79static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
80{
81 return kvm->arch.vioapic;
82}
83
84void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
85int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
86 int short_hand, int dest, int dest_mode);
87int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
88void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
89 int trigger_mode);
90bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
91int kvm_ioapic_init(struct kvm *kvm);
92void kvm_ioapic_destroy(struct kvm *kvm);
93int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
94 int level, bool line_status);
95void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
96int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
97 struct kvm_lapic_irq *irq, unsigned long *dest_map);
98int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
99int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
100void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
101void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
102 u32 *tmr);
103
104#endif
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
deleted file mode 100644
index c1e6ae989a43..000000000000
--- a/virt/kvm/iommu.c
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/stat.h>
31#include <linux/dmar.h>
32#include <linux/iommu.h>
33#include <linux/intel-iommu.h>
34
35static bool allow_unsafe_assigned_interrupts;
36module_param_named(allow_unsafe_assigned_interrupts,
37 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
39 "Enable device assignment on platforms without interrupt remapping support.");
40
41static int kvm_iommu_unmap_memslots(struct kvm *kvm);
42static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages);
44
45static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
46 unsigned long npages)
47{
48 gfn_t end_gfn;
49 pfn_t pfn;
50
51 pfn = gfn_to_pfn_memslot(slot, gfn);
52 end_gfn = gfn + npages;
53 gfn += 1;
54
55 if (is_error_noslot_pfn(pfn))
56 return pfn;
57
58 while (gfn < end_gfn)
59 gfn_to_pfn_memslot(slot, gfn++);
60
61 return pfn;
62}
63
64static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
65{
66 unsigned long i;
67
68 for (i = 0; i < npages; ++i)
69 kvm_release_pfn_clean(pfn + i);
70}
71
72int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
73{
74 gfn_t gfn, end_gfn;
75 pfn_t pfn;
76 int r = 0;
77 struct iommu_domain *domain = kvm->arch.iommu_domain;
78 int flags;
79
80 /* check if iommu exists and in use */
81 if (!domain)
82 return 0;
83
84 gfn = slot->base_gfn;
85 end_gfn = gfn + slot->npages;
86
87 flags = IOMMU_READ;
88 if (!(slot->flags & KVM_MEM_READONLY))
89 flags |= IOMMU_WRITE;
90 if (!kvm->arch.iommu_noncoherent)
91 flags |= IOMMU_CACHE;
92
93
94 while (gfn < end_gfn) {
95 unsigned long page_size;
96
97 /* Check if already mapped */
98 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
99 gfn += 1;
100 continue;
101 }
102
103 /* Get the page size we could use to map */
104 page_size = kvm_host_page_size(kvm, gfn);
105
106 /* Make sure the page_size does not exceed the memslot */
107 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
108 page_size >>= 1;
109
110 /* Make sure gfn is aligned to the page size we want to map */
111 while ((gfn << PAGE_SHIFT) & (page_size - 1))
112 page_size >>= 1;
113
114 /* Make sure hva is aligned to the page size we want to map */
115 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
116 page_size >>= 1;
117
118 /*
119 * Pin all pages we are about to map in memory. This is
120 * important because we unmap and unpin in 4kb steps later.
121 */
122 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
123 if (is_error_noslot_pfn(pfn)) {
124 gfn += 1;
125 continue;
126 }
127
128 /* Map into IO address space */
129 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
130 page_size, flags);
131 if (r) {
132 printk(KERN_ERR "kvm_iommu_map_address:"
133 "iommu failed to map pfn=%llx\n", pfn);
134 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
135 goto unmap_pages;
136 }
137
138 gfn += page_size >> PAGE_SHIFT;
139
140
141 }
142
143 return 0;
144
145unmap_pages:
146 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
147 return r;
148}
149
150static int kvm_iommu_map_memslots(struct kvm *kvm)
151{
152 int idx, r = 0;
153 struct kvm_memslots *slots;
154 struct kvm_memory_slot *memslot;
155
156 if (kvm->arch.iommu_noncoherent)
157 kvm_arch_register_noncoherent_dma(kvm);
158
159 idx = srcu_read_lock(&kvm->srcu);
160 slots = kvm_memslots(kvm);
161
162 kvm_for_each_memslot(memslot, slots) {
163 r = kvm_iommu_map_pages(kvm, memslot);
164 if (r)
165 break;
166 }
167 srcu_read_unlock(&kvm->srcu, idx);
168
169 return r;
170}
171
172int kvm_assign_device(struct kvm *kvm,
173 struct kvm_assigned_dev_kernel *assigned_dev)
174{
175 struct pci_dev *pdev = NULL;
176 struct iommu_domain *domain = kvm->arch.iommu_domain;
177 int r;
178 bool noncoherent;
179
180 /* check if iommu exists and in use */
181 if (!domain)
182 return 0;
183
184 pdev = assigned_dev->dev;
185 if (pdev == NULL)
186 return -ENODEV;
187
188 r = iommu_attach_device(domain, &pdev->dev);
189 if (r) {
190 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
191 return r;
192 }
193
194 noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
195
196 /* Check if need to update IOMMU page table for guest memory */
197 if (noncoherent != kvm->arch.iommu_noncoherent) {
198 kvm_iommu_unmap_memslots(kvm);
199 kvm->arch.iommu_noncoherent = noncoherent;
200 r = kvm_iommu_map_memslots(kvm);
201 if (r)
202 goto out_unmap;
203 }
204
205 pci_set_dev_assigned(pdev);
206
207 dev_info(&pdev->dev, "kvm assign device\n");
208
209 return 0;
210out_unmap:
211 kvm_iommu_unmap_memslots(kvm);
212 return r;
213}
214
215int kvm_deassign_device(struct kvm *kvm,
216 struct kvm_assigned_dev_kernel *assigned_dev)
217{
218 struct iommu_domain *domain = kvm->arch.iommu_domain;
219 struct pci_dev *pdev = NULL;
220
221 /* check if iommu exists and in use */
222 if (!domain)
223 return 0;
224
225 pdev = assigned_dev->dev;
226 if (pdev == NULL)
227 return -ENODEV;
228
229 iommu_detach_device(domain, &pdev->dev);
230
231 pci_clear_dev_assigned(pdev);
232
233 dev_info(&pdev->dev, "kvm deassign device\n");
234
235 return 0;
236}
237
238int kvm_iommu_map_guest(struct kvm *kvm)
239{
240 int r;
241
242 if (!iommu_present(&pci_bus_type)) {
243 printk(KERN_ERR "%s: iommu not found\n", __func__);
244 return -ENODEV;
245 }
246
247 mutex_lock(&kvm->slots_lock);
248
249 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
250 if (!kvm->arch.iommu_domain) {
251 r = -ENOMEM;
252 goto out_unlock;
253 }
254
255 if (!allow_unsafe_assigned_interrupts &&
256 !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
257 printk(KERN_WARNING "%s: No interrupt remapping support,"
258 " disallowing device assignment."
259 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
260 " module option.\n", __func__);
261 iommu_domain_free(kvm->arch.iommu_domain);
262 kvm->arch.iommu_domain = NULL;
263 r = -EPERM;
264 goto out_unlock;
265 }
266
267 r = kvm_iommu_map_memslots(kvm);
268 if (r)
269 kvm_iommu_unmap_memslots(kvm);
270
271out_unlock:
272 mutex_unlock(&kvm->slots_lock);
273 return r;
274}
275
276static void kvm_iommu_put_pages(struct kvm *kvm,
277 gfn_t base_gfn, unsigned long npages)
278{
279 struct iommu_domain *domain;
280 gfn_t end_gfn, gfn;
281 pfn_t pfn;
282 u64 phys;
283
284 domain = kvm->arch.iommu_domain;
285 end_gfn = base_gfn + npages;
286 gfn = base_gfn;
287
288 /* check if iommu exists and in use */
289 if (!domain)
290 return;
291
292 while (gfn < end_gfn) {
293 unsigned long unmap_pages;
294 size_t size;
295
296 /* Get physical address */
297 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
298
299 if (!phys) {
300 gfn++;
301 continue;
302 }
303
304 pfn = phys >> PAGE_SHIFT;
305
306 /* Unmap address from IO address space */
307 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
308 unmap_pages = 1ULL << get_order(size);
309
310 /* Unpin all pages we just unmapped to not leak any memory */
311 kvm_unpin_pages(kvm, pfn, unmap_pages);
312
313 gfn += unmap_pages;
314 }
315}
316
317void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
318{
319 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
320}
321
322static int kvm_iommu_unmap_memslots(struct kvm *kvm)
323{
324 int idx;
325 struct kvm_memslots *slots;
326 struct kvm_memory_slot *memslot;
327
328 idx = srcu_read_lock(&kvm->srcu);
329 slots = kvm_memslots(kvm);
330
331 kvm_for_each_memslot(memslot, slots)
332 kvm_iommu_unmap_pages(kvm, memslot);
333
334 srcu_read_unlock(&kvm->srcu, idx);
335
336 if (kvm->arch.iommu_noncoherent)
337 kvm_arch_unregister_noncoherent_dma(kvm);
338
339 return 0;
340}
341
342int kvm_iommu_unmap_guest(struct kvm *kvm)
343{
344 struct iommu_domain *domain = kvm->arch.iommu_domain;
345
346 /* check if iommu exists and in use */
347 if (!domain)
348 return 0;
349
350 mutex_lock(&kvm->slots_lock);
351 kvm_iommu_unmap_memslots(kvm);
352 kvm->arch.iommu_domain = NULL;
353 kvm->arch.iommu_noncoherent = false;
354 mutex_unlock(&kvm->slots_lock);
355
356 iommu_domain_free(domain);
357 return 0;
358}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
deleted file mode 100644
index 963b8995a9e8..000000000000
--- a/virt/kvm/irq_comm.c
+++ /dev/null
@@ -1,369 +0,0 @@
1/*
2 * irq_comm.c: Common API for in kernel interrupt controller
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 *
20 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 */
22
23#include <linux/kvm_host.h>
24#include <linux/slab.h>
25#include <linux/export.h>
26#include <trace/events/kvm.h>
27
28#include <asm/msidef.h>
29#ifdef CONFIG_IA64
30#include <asm/iosapic.h>
31#endif
32
33#include "irq.h"
34
35#include "ioapic.h"
36
37static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
38 struct kvm *kvm, int irq_source_id, int level,
39 bool line_status)
40{
41#ifdef CONFIG_X86
42 struct kvm_pic *pic = pic_irqchip(kvm);
43 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
44#else
45 return -1;
46#endif
47}
48
49static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
50 struct kvm *kvm, int irq_source_id, int level,
51 bool line_status)
52{
53 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
54 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
55 line_status);
56}
57
58inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
59{
60#ifdef CONFIG_IA64
61 return irq->delivery_mode ==
62 (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
63#else
64 return irq->delivery_mode == APIC_DM_LOWEST;
65#endif
66}
67
68int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
69 struct kvm_lapic_irq *irq, unsigned long *dest_map)
70{
71 int i, r = -1;
72 struct kvm_vcpu *vcpu, *lowest = NULL;
73
74 if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
75 kvm_is_dm_lowest_prio(irq)) {
76 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
77 irq->delivery_mode = APIC_DM_FIXED;
78 }
79
80 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
81 return r;
82
83 kvm_for_each_vcpu(i, vcpu, kvm) {
84 if (!kvm_apic_present(vcpu))
85 continue;
86
87 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
88 irq->dest_id, irq->dest_mode))
89 continue;
90
91 if (!kvm_is_dm_lowest_prio(irq)) {
92 if (r < 0)
93 r = 0;
94 r += kvm_apic_set_irq(vcpu, irq, dest_map);
95 } else if (kvm_lapic_enabled(vcpu)) {
96 if (!lowest)
97 lowest = vcpu;
98 else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
99 lowest = vcpu;
100 }
101 }
102
103 if (lowest)
104 r = kvm_apic_set_irq(lowest, irq, dest_map);
105
106 return r;
107}
108
109static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
110 struct kvm_lapic_irq *irq)
111{
112 trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
113
114 irq->dest_id = (e->msi.address_lo &
115 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
116 irq->vector = (e->msi.data &
117 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
118 irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
119 irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
120 irq->delivery_mode = e->msi.data & 0x700;
121 irq->level = 1;
122 irq->shorthand = 0;
123 /* TODO Deal with RH bit of MSI message address */
124}
125
126int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
127 struct kvm *kvm, int irq_source_id, int level, bool line_status)
128{
129 struct kvm_lapic_irq irq;
130
131 if (!level)
132 return -1;
133
134 kvm_set_msi_irq(e, &irq);
135
136 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
137}
138
139
140static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
141 struct kvm *kvm)
142{
143 struct kvm_lapic_irq irq;
144 int r;
145
146 kvm_set_msi_irq(e, &irq);
147
148 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
149 return r;
150 else
151 return -EWOULDBLOCK;
152}
153
154/*
155 * Deliver an IRQ in an atomic context if we can, or return a failure,
156 * user can retry in a process context.
157 * Return value:
158 * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
159 * Other values - No need to retry.
160 */
161int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
162{
163 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
164 struct kvm_kernel_irq_routing_entry *e;
165 int ret = -EINVAL;
166 int idx;
167
168 trace_kvm_set_irq(irq, level, irq_source_id);
169
170 /*
171 * Injection into either PIC or IOAPIC might need to scan all CPUs,
172 * which would need to be retried from thread context; when same GSI
173 * is connected to both PIC and IOAPIC, we'd have to report a
174 * partial failure here.
175 * Since there's no easy way to do this, we only support injecting MSI
176 * which is limited to 1:1 GSI mapping.
177 */
178 idx = srcu_read_lock(&kvm->irq_srcu);
179 if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
180 e = &entries[0];
181 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
182 ret = kvm_set_msi_inatomic(e, kvm);
183 else
184 ret = -EWOULDBLOCK;
185 }
186 srcu_read_unlock(&kvm->irq_srcu, idx);
187 return ret;
188}
189
190int kvm_request_irq_source_id(struct kvm *kvm)
191{
192 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
193 int irq_source_id;
194
195 mutex_lock(&kvm->irq_lock);
196 irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
197
198 if (irq_source_id >= BITS_PER_LONG) {
199 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
200 irq_source_id = -EFAULT;
201 goto unlock;
202 }
203
204 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
205#ifdef CONFIG_X86
206 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
207#endif
208 set_bit(irq_source_id, bitmap);
209unlock:
210 mutex_unlock(&kvm->irq_lock);
211
212 return irq_source_id;
213}
214
215void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
216{
217 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
218#ifdef CONFIG_X86
219 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
220#endif
221
222 mutex_lock(&kvm->irq_lock);
223 if (irq_source_id < 0 ||
224 irq_source_id >= BITS_PER_LONG) {
225 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
226 goto unlock;
227 }
228 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
229 if (!irqchip_in_kernel(kvm))
230 goto unlock;
231
232 kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
233#ifdef CONFIG_X86
234 kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
235#endif
236unlock:
237 mutex_unlock(&kvm->irq_lock);
238}
239
240void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
241 struct kvm_irq_mask_notifier *kimn)
242{
243 mutex_lock(&kvm->irq_lock);
244 kimn->irq = irq;
245 hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
246 mutex_unlock(&kvm->irq_lock);
247}
248
249void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
250 struct kvm_irq_mask_notifier *kimn)
251{
252 mutex_lock(&kvm->irq_lock);
253 hlist_del_rcu(&kimn->link);
254 mutex_unlock(&kvm->irq_lock);
255 synchronize_srcu(&kvm->irq_srcu);
256}
257
258void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
259 bool mask)
260{
261 struct kvm_irq_mask_notifier *kimn;
262 int idx, gsi;
263
264 idx = srcu_read_lock(&kvm->irq_srcu);
265 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
266 if (gsi != -1)
267 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
268 if (kimn->irq == gsi)
269 kimn->func(kimn, mask);
270 srcu_read_unlock(&kvm->irq_srcu, idx);
271}
272
273int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
274 const struct kvm_irq_routing_entry *ue)
275{
276 int r = -EINVAL;
277 int delta;
278 unsigned max_pin;
279
280 switch (ue->type) {
281 case KVM_IRQ_ROUTING_IRQCHIP:
282 delta = 0;
283 switch (ue->u.irqchip.irqchip) {
284 case KVM_IRQCHIP_PIC_MASTER:
285 e->set = kvm_set_pic_irq;
286 max_pin = PIC_NUM_PINS;
287 break;
288 case KVM_IRQCHIP_PIC_SLAVE:
289 e->set = kvm_set_pic_irq;
290 max_pin = PIC_NUM_PINS;
291 delta = 8;
292 break;
293 case KVM_IRQCHIP_IOAPIC:
294 max_pin = KVM_IOAPIC_NUM_PINS;
295 e->set = kvm_set_ioapic_irq;
296 break;
297 default:
298 goto out;
299 }
300 e->irqchip.irqchip = ue->u.irqchip.irqchip;
301 e->irqchip.pin = ue->u.irqchip.pin + delta;
302 if (e->irqchip.pin >= max_pin)
303 goto out;
304 break;
305 case KVM_IRQ_ROUTING_MSI:
306 e->set = kvm_set_msi;
307 e->msi.address_lo = ue->u.msi.address_lo;
308 e->msi.address_hi = ue->u.msi.address_hi;
309 e->msi.data = ue->u.msi.data;
310 break;
311 default:
312 goto out;
313 }
314
315 r = 0;
316out:
317 return r;
318}
319
320#define IOAPIC_ROUTING_ENTRY(irq) \
321 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
322 .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
323#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
324
325#ifdef CONFIG_X86
326# define PIC_ROUTING_ENTRY(irq) \
327 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
328 .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
329# define ROUTING_ENTRY2(irq) \
330 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
331#else
332# define ROUTING_ENTRY2(irq) \
333 IOAPIC_ROUTING_ENTRY(irq)
334#endif
335
336static const struct kvm_irq_routing_entry default_routing[] = {
337 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
338 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
339 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
340 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
341 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
342 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
343 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
344 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
345 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
346 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
347 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
348 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
349#ifdef CONFIG_IA64
350 ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
351 ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
352 ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
353 ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
354 ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
355 ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
356 ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
357 ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
358 ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
359 ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
360 ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
361 ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
362#endif
363};
364
365int kvm_setup_default_irq_routing(struct kvm *kvm)
366{
367 return kvm_set_irq_routing(kvm, default_routing,
368 ARRAY_SIZE(default_routing), 0);
369}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3cee7b167052..f5283438ee05 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -124,15 +124,6 @@ int vcpu_load(struct kvm_vcpu *vcpu)
124 124
125 if (mutex_lock_killable(&vcpu->mutex)) 125 if (mutex_lock_killable(&vcpu->mutex))
126 return -EINTR; 126 return -EINTR;
127 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
128 /* The thread running this VCPU changed. */
129 struct pid *oldpid = vcpu->pid;
130 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
131 rcu_assign_pointer(vcpu->pid, newpid);
132 if (oldpid)
133 synchronize_rcu();
134 put_pid(oldpid);
135 }
136 cpu = get_cpu(); 127 cpu = get_cpu();
137 preempt_notifier_register(&vcpu->preempt_notifier); 128 preempt_notifier_register(&vcpu->preempt_notifier);
138 kvm_arch_vcpu_load(vcpu, cpu); 129 kvm_arch_vcpu_load(vcpu, cpu);
@@ -468,9 +459,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
468 if (r) 459 if (r)
469 goto out_err_no_disable; 460 goto out_err_no_disable;
470 461
471#ifdef CONFIG_HAVE_KVM_IRQCHIP
472 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
473#endif
474#ifdef CONFIG_HAVE_KVM_IRQFD 462#ifdef CONFIG_HAVE_KVM_IRQFD
475 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 463 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
476#endif 464#endif
@@ -668,48 +656,46 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
668 return 0; 656 return 0;
669} 657}
670 658
671static int cmp_memslot(const void *slot1, const void *slot2)
672{
673 struct kvm_memory_slot *s1, *s2;
674
675 s1 = (struct kvm_memory_slot *)slot1;
676 s2 = (struct kvm_memory_slot *)slot2;
677
678 if (s1->npages < s2->npages)
679 return 1;
680 if (s1->npages > s2->npages)
681 return -1;
682
683 return 0;
684}
685
686/* 659/*
687 * Sort the memslots base on its size, so the larger slots 660 * Insert memslot and re-sort memslots based on their GFN,
688 * will get better fit. 661 * so binary search could be used to lookup GFN.
662 * Sorting algorithm takes advantage of having initially
663 * sorted array and known changed memslot position.
689 */ 664 */
690static void sort_memslots(struct kvm_memslots *slots)
691{
692 int i;
693
694 sort(slots->memslots, KVM_MEM_SLOTS_NUM,
695 sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
696
697 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
698 slots->id_to_index[slots->memslots[i].id] = i;
699}
700
701static void update_memslots(struct kvm_memslots *slots, 665static void update_memslots(struct kvm_memslots *slots,
702 struct kvm_memory_slot *new) 666 struct kvm_memory_slot *new)
703{ 667{
704 if (new) { 668 int id = new->id;
705 int id = new->id; 669 int i = slots->id_to_index[id];
706 struct kvm_memory_slot *old = id_to_memslot(slots, id); 670 struct kvm_memory_slot *mslots = slots->memslots;
707 unsigned long npages = old->npages;
708 671
709 *old = *new; 672 WARN_ON(mslots[i].id != id);
710 if (new->npages != npages) 673 if (!new->npages) {
711 sort_memslots(slots); 674 new->base_gfn = 0;
675 if (mslots[i].npages)
676 slots->used_slots--;
677 } else {
678 if (!mslots[i].npages)
679 slots->used_slots++;
712 } 680 }
681
682 while (i < KVM_MEM_SLOTS_NUM - 1 &&
683 new->base_gfn <= mslots[i + 1].base_gfn) {
684 if (!mslots[i + 1].npages)
685 break;
686 mslots[i] = mslots[i + 1];
687 slots->id_to_index[mslots[i].id] = i;
688 i++;
689 }
690 while (i > 0 &&
691 new->base_gfn > mslots[i - 1].base_gfn) {
692 mslots[i] = mslots[i - 1];
693 slots->id_to_index[mslots[i].id] = i;
694 i--;
695 }
696
697 mslots[i] = *new;
698 slots->id_to_index[mslots[i].id] = i;
713} 699}
714 700
715static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 701static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
@@ -727,7 +713,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
727} 713}
728 714
729static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 715static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
730 struct kvm_memslots *slots, struct kvm_memory_slot *new) 716 struct kvm_memslots *slots)
731{ 717{
732 struct kvm_memslots *old_memslots = kvm->memslots; 718 struct kvm_memslots *old_memslots = kvm->memslots;
733 719
@@ -738,7 +724,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
738 WARN_ON(old_memslots->generation & 1); 724 WARN_ON(old_memslots->generation & 1);
739 slots->generation = old_memslots->generation + 1; 725 slots->generation = old_memslots->generation + 1;
740 726
741 update_memslots(slots, new);
742 rcu_assign_pointer(kvm->memslots, slots); 727 rcu_assign_pointer(kvm->memslots, slots);
743 synchronize_srcu_expedited(&kvm->srcu); 728 synchronize_srcu_expedited(&kvm->srcu);
744 729
@@ -760,7 +745,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
760 * 745 *
761 * Discontiguous memory is allowed, mostly for framebuffers. 746 * Discontiguous memory is allowed, mostly for framebuffers.
762 * 747 *
763 * Must be called holding mmap_sem for write. 748 * Must be called holding kvm->slots_lock for write.
764 */ 749 */
765int __kvm_set_memory_region(struct kvm *kvm, 750int __kvm_set_memory_region(struct kvm *kvm,
766 struct kvm_userspace_memory_region *mem) 751 struct kvm_userspace_memory_region *mem)
@@ -866,15 +851,16 @@ int __kvm_set_memory_region(struct kvm *kvm,
866 goto out_free; 851 goto out_free;
867 } 852 }
868 853
854 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
855 GFP_KERNEL);
856 if (!slots)
857 goto out_free;
858
869 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 859 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
870 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
871 GFP_KERNEL);
872 if (!slots)
873 goto out_free;
874 slot = id_to_memslot(slots, mem->slot); 860 slot = id_to_memslot(slots, mem->slot);
875 slot->flags |= KVM_MEMSLOT_INVALID; 861 slot->flags |= KVM_MEMSLOT_INVALID;
876 862
877 old_memslots = install_new_memslots(kvm, slots, NULL); 863 old_memslots = install_new_memslots(kvm, slots);
878 864
879 /* slot was deleted or moved, clear iommu mapping */ 865 /* slot was deleted or moved, clear iommu mapping */
880 kvm_iommu_unmap_pages(kvm, &old); 866 kvm_iommu_unmap_pages(kvm, &old);
@@ -886,6 +872,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
886 * - kvm_is_visible_gfn (mmu_check_roots) 872 * - kvm_is_visible_gfn (mmu_check_roots)
887 */ 873 */
888 kvm_arch_flush_shadow_memslot(kvm, slot); 874 kvm_arch_flush_shadow_memslot(kvm, slot);
875
876 /*
877 * We can re-use the old_memslots from above, the only difference
878 * from the currently installed memslots is the invalid flag. This
879 * will get overwritten by update_memslots anyway.
880 */
889 slots = old_memslots; 881 slots = old_memslots;
890 } 882 }
891 883
@@ -893,26 +885,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
893 if (r) 885 if (r)
894 goto out_slots; 886 goto out_slots;
895 887
896 r = -ENOMEM;
897 /*
898 * We can re-use the old_memslots from above, the only difference
899 * from the currently installed memslots is the invalid flag. This
900 * will get overwritten by update_memslots anyway.
901 */
902 if (!slots) {
903 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
904 GFP_KERNEL);
905 if (!slots)
906 goto out_free;
907 }
908
909 /* actual memory is freed via old in kvm_free_physmem_slot below */ 888 /* actual memory is freed via old in kvm_free_physmem_slot below */
910 if (change == KVM_MR_DELETE) { 889 if (change == KVM_MR_DELETE) {
911 new.dirty_bitmap = NULL; 890 new.dirty_bitmap = NULL;
912 memset(&new.arch, 0, sizeof(new.arch)); 891 memset(&new.arch, 0, sizeof(new.arch));
913 } 892 }
914 893
915 old_memslots = install_new_memslots(kvm, slots, &new); 894 update_memslots(slots, &new);
895 old_memslots = install_new_memslots(kvm, slots);
916 896
917 kvm_arch_commit_memory_region(kvm, mem, &old, change); 897 kvm_arch_commit_memory_region(kvm, mem, &old, change);
918 898
@@ -1799,10 +1779,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
1799 rcu_read_unlock(); 1779 rcu_read_unlock();
1800 if (!task) 1780 if (!task)
1801 return ret; 1781 return ret;
1802 if (task->flags & PF_VCPU) {
1803 put_task_struct(task);
1804 return ret;
1805 }
1806 ret = yield_to(task, 1); 1782 ret = yield_to(task, 1);
1807 put_task_struct(task); 1783 put_task_struct(task);
1808 1784
@@ -2065,6 +2041,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
2065 r = -EINVAL; 2041 r = -EINVAL;
2066 if (arg) 2042 if (arg)
2067 goto out; 2043 goto out;
2044 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
2045 /* The thread running this VCPU changed. */
2046 struct pid *oldpid = vcpu->pid;
2047 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
2048 rcu_assign_pointer(vcpu->pid, newpid);
2049 if (oldpid)
2050 synchronize_rcu();
2051 put_pid(oldpid);
2052 }
2068 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2053 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2069 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2054 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
2070 break; 2055 break;
@@ -2599,8 +2584,6 @@ static long kvm_vm_ioctl(struct file *filp,
2599 break; 2584 break;
2600 default: 2585 default:
2601 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2586 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2602 if (r == -ENOTTY)
2603 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
2604 } 2587 }
2605out: 2588out:
2606 return r; 2589 return r;