aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
commit66dcff86ba40eebb5133cccf450878f2bba102ef (patch)
treee7eb49ad9316989a529b00303d2dd2cffa61a7f5 /virt/kvm/arm
parent91ed9e8a32d9a76adc59c83f8b40024076cf8a02 (diff)
parent2c4aa55a6af070262cca425745e8e54310e96b8d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini: "3.19 changes for KVM: - spring cleaning: removed support for IA64, and for hardware- assisted virtualization on the PPC970 - ARM, PPC, s390 all had only small fixes For x86: - small performance improvements (though only on weird guests) - usual round of hardware-compliancy fixes from Nadav - APICv fixes - XSAVES support for hosts and guests. XSAVES hosts were broken because the (non-KVM) XSAVES patches inadvertently changed the KVM userspace ABI whenever XSAVES was enabled; hence, this part is going to stable. Guest support is just a matter of exposing the feature and CPUID leaves support" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits) KVM: move APIC types to arch/x86/ KVM: PPC: Book3S: Enable in-kernel XICS emulation by default KVM: PPC: Book3S HV: Improve H_CONFER implementation KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register KVM: PPC: Book3S HV: Remove code for PPC970 processors KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions KVM: PPC: Book3S HV: Simplify locking around stolen time calculations arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function arch: powerpc: kvm: book3s_pr.c: Remove unused function arch: powerpc: kvm: book3s.c: Remove some unused functions arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked KVM: PPC: Book3S HV: ptes are big endian KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI KVM: PPC: Book3S HV: Fix KSM memory corruption KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI KVM: PPC: Book3S HV: Fix computation of tlbie operand KVM: PPC: Book3S HV: Add missing HPTE unlock KVM: PPC: BookE: Improve irq inject tracepoint arm/arm64: KVM: Require in-kernel vgic for the arch timers ...
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r--virt/kvm/arm/arch_timer.c30
-rw-r--r--virt/kvm/arm/vgic.c116
2 files changed, 82 insertions, 64 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 22fa819a9b6a..1c0772b340d8 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
61 61
62static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) 62static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
63{ 63{
64 int ret;
64 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 65 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
65 66
66 timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; 67 timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
67 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, 68 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
68 timer->irq->irq, 69 timer->irq->irq,
69 timer->irq->level); 70 timer->irq->level);
71 WARN_ON(ret);
70} 72}
71 73
72static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 74static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
307 timer_disarm(timer); 309 timer_disarm(timer);
308} 310}
309 311
310int kvm_timer_init(struct kvm *kvm) 312void kvm_timer_enable(struct kvm *kvm)
311{ 313{
312 if (timecounter && wqueue) { 314 if (kvm->arch.timer.enabled)
313 kvm->arch.timer.cntvoff = kvm_phys_timer_read(); 315 return;
316
317 /*
318 * There is a potential race here between VCPUs starting for the first
319 * time, which may be enabling the timer multiple times. That doesn't
320 * hurt though, because we're just setting a variable to the same
321 * variable that it already was. The important thing is that all
322 * VCPUs have the enabled variable set, before entering the guest, if
323 * the arch timers are enabled.
324 */
325 if (timecounter && wqueue)
314 kvm->arch.timer.enabled = 1; 326 kvm->arch.timer.enabled = 1;
315 } 327}
316 328
317 return 0; 329void kvm_timer_init(struct kvm *kvm)
330{
331 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
318} 332}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index aacdb59f30de..03affc7bf453 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -91,6 +91,7 @@
91#define ACCESS_WRITE_VALUE (3 << 1) 91#define ACCESS_WRITE_VALUE (3 << 1)
92#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 92#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
93 93
94static int vgic_init(struct kvm *kvm);
94static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 95static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); 96static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96static void vgic_update_state(struct kvm *kvm); 97static void vgic_update_state(struct kvm *kvm);
@@ -1607,7 +1608,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1607 } 1608 }
1608} 1609}
1609 1610
1610static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, 1611static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1611 unsigned int irq_num, bool level) 1612 unsigned int irq_num, bool level)
1612{ 1613{
1613 struct vgic_dist *dist = &kvm->arch.vgic; 1614 struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1643,9 +1644,10 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1643 vgic_dist_irq_clear_level(vcpu, irq_num); 1644 vgic_dist_irq_clear_level(vcpu, irq_num);
1644 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) 1645 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1645 vgic_dist_irq_clear_pending(vcpu, irq_num); 1646 vgic_dist_irq_clear_pending(vcpu, irq_num);
1646 } else {
1647 vgic_dist_irq_clear_pending(vcpu, irq_num);
1648 } 1647 }
1648
1649 ret = false;
1650 goto out;
1649 } 1651 }
1650 1652
1651 enabled = vgic_irq_is_enabled(vcpu, irq_num); 1653 enabled = vgic_irq_is_enabled(vcpu, irq_num);
@@ -1672,7 +1674,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1672out: 1674out:
1673 spin_unlock(&dist->lock); 1675 spin_unlock(&dist->lock);
1674 1676
1675 return ret; 1677 return ret ? cpuid : -EINVAL;
1676} 1678}
1677 1679
1678/** 1680/**
@@ -1692,11 +1694,26 @@ out:
1692int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, 1694int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1693 bool level) 1695 bool level)
1694{ 1696{
1695 if (likely(vgic_initialized(kvm)) && 1697 int ret = 0;
1696 vgic_update_irq_pending(kvm, cpuid, irq_num, level)) 1698 int vcpu_id;
1697 vgic_kick_vcpus(kvm);
1698 1699
1699 return 0; 1700 if (unlikely(!vgic_initialized(kvm))) {
1701 mutex_lock(&kvm->lock);
1702 ret = vgic_init(kvm);
1703 mutex_unlock(&kvm->lock);
1704
1705 if (ret)
1706 goto out;
1707 }
1708
1709 vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
1710 if (vcpu_id >= 0) {
1711 /* kick the specified vcpu */
1712 kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
1713 }
1714
1715out:
1716 return ret;
1700} 1717}
1701 1718
1702static irqreturn_t vgic_maintenance_handler(int irq, void *data) 1719static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1726,39 +1743,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1726 1743
1727 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1744 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1728 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1745 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1729 vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); 1746 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1730 1747
1731 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { 1748 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1732 kvm_vgic_vcpu_destroy(vcpu); 1749 kvm_vgic_vcpu_destroy(vcpu);
1733 return -ENOMEM; 1750 return -ENOMEM;
1734 } 1751 }
1735 1752
1736 return 0; 1753 memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
1737}
1738
1739/**
1740 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1741 * @vcpu: pointer to the vcpu struct
1742 *
1743 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1744 * this vcpu and enable the VGIC for this VCPU
1745 */
1746static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1747{
1748 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1749 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1750 int i;
1751
1752 for (i = 0; i < dist->nr_irqs; i++) {
1753 if (i < VGIC_NR_PPIS)
1754 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1755 vcpu->vcpu_id, i, 1);
1756 if (i < VGIC_NR_PRIVATE_IRQS)
1757 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1758 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1759
1760 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1761 }
1762 1754
1763 /* 1755 /*
1764 * Store the number of LRs per vcpu, so we don't have to go 1756 * Store the number of LRs per vcpu, so we don't have to go
@@ -1767,7 +1759,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1767 */ 1759 */
1768 vgic_cpu->nr_lr = vgic->nr_lr; 1760 vgic_cpu->nr_lr = vgic->nr_lr;
1769 1761
1770 vgic_enable(vcpu); 1762 return 0;
1771} 1763}
1772 1764
1773void kvm_vgic_destroy(struct kvm *kvm) 1765void kvm_vgic_destroy(struct kvm *kvm)
@@ -1798,20 +1790,21 @@ void kvm_vgic_destroy(struct kvm *kvm)
1798 dist->irq_spi_cpu = NULL; 1790 dist->irq_spi_cpu = NULL;
1799 dist->irq_spi_target = NULL; 1791 dist->irq_spi_target = NULL;
1800 dist->irq_pending_on_cpu = NULL; 1792 dist->irq_pending_on_cpu = NULL;
1793 dist->nr_cpus = 0;
1801} 1794}
1802 1795
1803/* 1796/*
1804 * Allocate and initialize the various data structures. Must be called 1797 * Allocate and initialize the various data structures. Must be called
1805 * with kvm->lock held! 1798 * with kvm->lock held!
1806 */ 1799 */
1807static int vgic_init_maps(struct kvm *kvm) 1800static int vgic_init(struct kvm *kvm)
1808{ 1801{
1809 struct vgic_dist *dist = &kvm->arch.vgic; 1802 struct vgic_dist *dist = &kvm->arch.vgic;
1810 struct kvm_vcpu *vcpu; 1803 struct kvm_vcpu *vcpu;
1811 int nr_cpus, nr_irqs; 1804 int nr_cpus, nr_irqs;
1812 int ret, i; 1805 int ret, i, vcpu_id;
1813 1806
1814 if (dist->nr_cpus) /* Already allocated */ 1807 if (vgic_initialized(kvm))
1815 return 0; 1808 return 0;
1816 1809
1817 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); 1810 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
@@ -1859,16 +1852,28 @@ static int vgic_init_maps(struct kvm *kvm)
1859 if (ret) 1852 if (ret)
1860 goto out; 1853 goto out;
1861 1854
1862 kvm_for_each_vcpu(i, vcpu, kvm) { 1855 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
1856 vgic_set_target_reg(kvm, 0, i);
1857
1858 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
1863 ret = vgic_vcpu_init_maps(vcpu, nr_irqs); 1859 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1864 if (ret) { 1860 if (ret) {
1865 kvm_err("VGIC: Failed to allocate vcpu memory\n"); 1861 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1866 break; 1862 break;
1867 } 1863 }
1868 }
1869 1864
1870 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) 1865 for (i = 0; i < dist->nr_irqs; i++) {
1871 vgic_set_target_reg(kvm, 0, i); 1866 if (i < VGIC_NR_PPIS)
1867 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1868 vcpu->vcpu_id, i, 1);
1869 if (i < VGIC_NR_PRIVATE_IRQS)
1870 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1871 vcpu->vcpu_id, i,
1872 VGIC_CFG_EDGE);
1873 }
1874
1875 vgic_enable(vcpu);
1876 }
1872 1877
1873out: 1878out:
1874 if (ret) 1879 if (ret)
@@ -1878,25 +1883,23 @@ out:
1878} 1883}
1879 1884
1880/** 1885/**
1881 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs 1886 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
1882 * @kvm: pointer to the kvm struct 1887 * @kvm: pointer to the kvm struct
1883 * 1888 *
1884 * Map the virtual CPU interface into the VM before running any VCPUs. We 1889 * Map the virtual CPU interface into the VM before running any VCPUs. We
1885 * can't do this at creation time, because user space must first set the 1890 * can't do this at creation time, because user space must first set the
1886 * virtual CPU interface address in the guest physical address space. Also 1891 * virtual CPU interface address in the guest physical address space.
1887 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1888 */ 1892 */
1889int kvm_vgic_init(struct kvm *kvm) 1893int kvm_vgic_map_resources(struct kvm *kvm)
1890{ 1894{
1891 struct kvm_vcpu *vcpu; 1895 int ret = 0;
1892 int ret = 0, i;
1893 1896
1894 if (!irqchip_in_kernel(kvm)) 1897 if (!irqchip_in_kernel(kvm))
1895 return 0; 1898 return 0;
1896 1899
1897 mutex_lock(&kvm->lock); 1900 mutex_lock(&kvm->lock);
1898 1901
1899 if (vgic_initialized(kvm)) 1902 if (vgic_ready(kvm))
1900 goto out; 1903 goto out;
1901 1904
1902 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || 1905 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
@@ -1906,7 +1909,11 @@ int kvm_vgic_init(struct kvm *kvm)
1906 goto out; 1909 goto out;
1907 } 1910 }
1908 1911
1909 ret = vgic_init_maps(kvm); 1912 /*
1913 * Initialize the vgic if this hasn't already been done on demand by
1914 * accessing the vgic state from userspace.
1915 */
1916 ret = vgic_init(kvm);
1910 if (ret) { 1917 if (ret) {
1911 kvm_err("Unable to allocate maps\n"); 1918 kvm_err("Unable to allocate maps\n");
1912 goto out; 1919 goto out;
@@ -1920,9 +1927,6 @@ int kvm_vgic_init(struct kvm *kvm)
1920 goto out; 1927 goto out;
1921 } 1928 }
1922 1929
1923 kvm_for_each_vcpu(i, vcpu, kvm)
1924 kvm_vgic_vcpu_init(vcpu);
1925
1926 kvm->arch.vgic.ready = true; 1930 kvm->arch.vgic.ready = true;
1927out: 1931out:
1928 if (ret) 1932 if (ret)
@@ -2167,7 +2171,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
2167 2171
2168 mutex_lock(&dev->kvm->lock); 2172 mutex_lock(&dev->kvm->lock);
2169 2173
2170 ret = vgic_init_maps(dev->kvm); 2174 ret = vgic_init(dev->kvm);
2171 if (ret) 2175 if (ret)
2172 goto out; 2176 goto out;
2173 2177
@@ -2289,7 +2293,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2289 2293
2290 mutex_lock(&dev->kvm->lock); 2294 mutex_lock(&dev->kvm->lock);
2291 2295
2292 if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) 2296 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2293 ret = -EBUSY; 2297 ret = -EBUSY;
2294 else 2298 else
2295 dev->kvm->arch.vgic.nr_irqs = val; 2299 dev->kvm->arch.vgic.nr_irqs = val;