aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2017-04-05 10:27:47 -0400
committerRadim Krčmář <rkrcmar@redhat.com>2017-04-05 10:27:47 -0400
commit6fd6410311d618d80748b0a5dd3e8d6cd17662bc (patch)
tree5bc09692135e712549f8184f35bc5501b3bd2f7f
parent1fb883bb827ee8efc1cc9ea0154f953f8a219d38 (diff)
parent6d56111c92d247bb64301029fe88365aa4caf16e (diff)
Merge tag 'kvm-arm-for-v4.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
From: Christoffer Dall <cdall@linaro.org> KVM/ARM Fixes for v4.11-rc6 Fixes include: - Fix a problem with GICv3 userspace save/restore - Clarify GICv2 userspace save/restore ABI - Be more careful in clearing GIC LRs - Add missing synchronization primitive to our MMU handling code
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt6
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.h11
9 files changed, 99 insertions, 10 deletions
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 76e61c883347..b2f60ca8b60c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,12 @@ Groups:
83 83
84 Bits for undefined preemption levels are RAZ/WI. 84 Bits for undefined preemption levels are RAZ/WI.
85 85
86 For historical reasons and to provide ABI compatibility with userspace we
87 export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
88 field in the lower 5 bits of a word, meaning that userspace must always
89 use the lower 5 bits to communicate with the KVM device and must shift the
90 value left by 3 places to obtain the actual priority mask level.
91
86 Limitations: 92 Limitations:
87 - Priorities are not implemented, and registers are RAZ/WI 93 - Priorities are not implemented, and registers are RAZ/WI
88 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. 94 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 96dba7cd8be7..314eb6abe1ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
1124 if (__hyp_get_vectors() == hyp_default_vectors) 1124 if (__hyp_get_vectors() == hyp_default_vectors)
1125 cpu_init_hyp_mode(NULL); 1125 cpu_init_hyp_mode(NULL);
1126 } 1126 }
1127
1128 if (vgic_present)
1129 kvm_vgic_init_cpu_hardware();
1127} 1130}
1128 1131
1129static void cpu_hyp_reset(void) 1132static void cpu_hyp_reset(void)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 962616fd4ddd..582a972371cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 phys_addr_t addr = start, end = start + size; 292 phys_addr_t addr = start, end = start + size;
293 phys_addr_t next; 293 phys_addr_t next;
294 294
295 assert_spin_locked(&kvm->mmu_lock);
295 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
296 do { 297 do {
297 next = stage2_pgd_addr_end(addr, end); 298 next = stage2_pgd_addr_end(addr, end);
298 if (!stage2_pgd_none(*pgd)) 299 if (!stage2_pgd_none(*pgd))
299 unmap_stage2_puds(kvm, pgd, addr, next); 300 unmap_stage2_puds(kvm, pgd, addr, next);
301 /*
302 * If the range is too large, release the kvm->mmu_lock
303 * to prevent starvation and lockup detector warnings.
304 */
305 if (next != end)
306 cond_resched_lock(&kvm->mmu_lock);
300 } while (pgd++, addr = next, addr != end); 307 } while (pgd++, addr = next, addr != end);
301} 308}
302 309
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
803 int idx; 810 int idx;
804 811
805 idx = srcu_read_lock(&kvm->srcu); 812 idx = srcu_read_lock(&kvm->srcu);
813 down_read(&current->mm->mmap_sem);
806 spin_lock(&kvm->mmu_lock); 814 spin_lock(&kvm->mmu_lock);
807 815
808 slots = kvm_memslots(kvm); 816 slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
810 stage2_unmap_memslot(kvm, memslot); 818 stage2_unmap_memslot(kvm, memslot);
811 819
812 spin_unlock(&kvm->mmu_lock); 820 spin_unlock(&kvm->mmu_lock);
821 up_read(&current->mm->mmap_sem);
813 srcu_read_unlock(&kvm->srcu, idx); 822 srcu_read_unlock(&kvm->srcu, idx);
814} 823}
815 824
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
829 if (kvm->arch.pgd == NULL) 838 if (kvm->arch.pgd == NULL)
830 return; 839 return;
831 840
841 spin_lock(&kvm->mmu_lock);
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
843 spin_unlock(&kvm->mmu_lock);
844
833 /* Free the HW pgd, one page at a time */ 845 /* Free the HW pgd, one page at a time */
834 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
835 kvm->arch.pgd = NULL; 847 kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1801 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 1813 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1802 return -EFAULT; 1814 return -EFAULT;
1803 1815
1816 down_read(&current->mm->mmap_sem);
1804 /* 1817 /*
1805 * A memory region could potentially cover multiple VMAs, and any holes 1818 * A memory region could potentially cover multiple VMAs, and any holes
1806 * between them, so iterate over all of them to find out if we can map 1819 * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1844 pa += vm_start - vma->vm_start; 1857 pa += vm_start - vma->vm_start;
1845 1858
1846 /* IO region dirty page logging not allowed */ 1859 /* IO region dirty page logging not allowed */
1847 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1860 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1848 return -EINVAL; 1861 ret = -EINVAL;
1862 goto out;
1863 }
1849 1864
1850 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, 1865 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1851 vm_end - vm_start, 1866 vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1857 } while (hva < reg_end); 1872 } while (hva < reg_end);
1858 1873
1859 if (change == KVM_MR_FLAGS_ONLY) 1874 if (change == KVM_MR_FLAGS_ONLY)
1860 return ret; 1875 goto out;
1861 1876
1862 spin_lock(&kvm->mmu_lock); 1877 spin_lock(&kvm->mmu_lock);
1863 if (ret) 1878 if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1865 else 1880 else
1866 stage2_flush_memslot(kvm, memslot); 1881 stage2_flush_memslot(kvm, memslot);
1867 spin_unlock(&kvm->mmu_lock); 1882 spin_unlock(&kvm->mmu_lock);
1883out:
1884 up_read(&current->mm->mmap_sem);
1868 return ret; 1885 return ret;
1869} 1886}
1870 1887
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b72dd2ad5f44..c0b3d999c266 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
296int kvm_vgic_map_resources(struct kvm *kvm); 296int kvm_vgic_map_resources(struct kvm *kvm);
297int kvm_vgic_hyp_init(void); 297int kvm_vgic_hyp_init(void);
298void kvm_vgic_init_cpu_hardware(void);
298 299
299int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 300int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
300 bool level); 301 bool level);
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
96#define GICH_MISR_EOI (1 << 0) 96#define GICH_MISR_EOI (1 << 0)
97#define GICH_MISR_U (1 << 1) 97#define GICH_MISR_U (1 << 1)
98 98
99#define GICV_PMR_PRIORITY_SHIFT 3
100#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
101
99#ifndef __ASSEMBLY__ 102#ifndef __ASSEMBLY__
100 103
101#include <linux/irqdomain.h> 104#include <linux/irqdomain.h>
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 276139a24e6f..702f8108608d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
392} 392}
393 393
394/** 394/**
395 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
396 *
397 * For a specific CPU, initialize the GIC VE hardware.
398 */
399void kvm_vgic_init_cpu_hardware(void)
400{
401 BUG_ON(preemptible());
402
403 /*
404 * We want to make sure the list registers start out clear so that we
405 * only have the program the used registers.
406 */
407 if (kvm_vgic_global_state.type == VGIC_V2)
408 vgic_v2_init_lrs();
409 else
410 kvm_call_hyp(__vgic_v3_init_lrs);
411}
412
413/**
395 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 414 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
396 * according to the host GIC model. Accordingly calls either 415 * according to the host GIC model. Accordingly calls either
397 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 416 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
229 val = vmcr.ctlr; 229 val = vmcr.ctlr;
230 break; 230 break;
231 case GIC_CPU_PRIMASK: 231 case GIC_CPU_PRIMASK:
232 val = vmcr.pmr; 232 /*
233 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
234 * the PMR field as GICH_VMCR.VMPriMask rather than
235 * GICC_PMR.Priority, so we expose the upper five bits of
236 * priority mask to userspace using the lower bits in the
237 * unsigned long.
238 */
239 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
240 GICV_PMR_PRIORITY_SHIFT;
233 break; 241 break;
234 case GIC_CPU_BINPOINT: 242 case GIC_CPU_BINPOINT:
235 val = vmcr.bpr; 243 val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
262 vmcr.ctlr = val; 270 vmcr.ctlr = val;
263 break; 271 break;
264 case GIC_CPU_PRIMASK: 272 case GIC_CPU_PRIMASK:
265 vmcr.pmr = val; 273 /*
274 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
275 * the PMR field as GICH_VMCR.VMPriMask rather than
276 * GICC_PMR.Priority, so we expose the upper five bits of
277 * priority mask to userspace using the lower bits in the
278 * unsigned long.
279 */
280 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
281 GICV_PMR_PRIORITY_MASK;
266 break; 282 break;
267 case GIC_CPU_BINPOINT: 283 case GIC_CPU_BINPOINT:
268 vmcr.bpr = val; 284 vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b834ecdf3225..b637d9c7afe3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
36 return (unsigned long *)val; 36 return (unsigned long *)val;
37} 37}
38 38
39static inline void vgic_v2_write_lr(int lr, u32 val)
40{
41 void __iomem *base = kvm_vgic_global_state.vctrl_base;
42
43 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
44}
45
46void vgic_v2_init_lrs(void)
47{
48 int i;
49
50 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
51 vgic_v2_write_lr(i, 0);
52}
53
39void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) 54void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
40{ 55{
41 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 56 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
191 GICH_VMCR_ALIAS_BINPOINT_MASK; 206 GICH_VMCR_ALIAS_BINPOINT_MASK;
192 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 207 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
193 GICH_VMCR_BINPOINT_MASK; 208 GICH_VMCR_BINPOINT_MASK;
194 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & 209 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
195 GICH_VMCR_PRIMASK_MASK; 210 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
196 211
197 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 212 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
198} 213}
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
207 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 222 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
208 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 223 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
209 GICH_VMCR_BINPOINT_SHIFT; 224 GICH_VMCR_BINPOINT_SHIFT;
210 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> 225 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
211 GICH_VMCR_PRIMASK_SHIFT; 226 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
212} 227}
213 228
214void vgic_v2_enable(struct kvm_vcpu *vcpu) 229void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index db28f7cadab2..6cf557e9f718 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
81 return irq->pending_latch || irq->line_level; 81 return irq->pending_latch || irq->line_level;
82} 82}
83 83
84/*
85 * This struct provides an intermediate representation of the fields contained
86 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
87 * state to userspace can generate either GICv2 or GICv3 CPU interface
88 * registers regardless of the hardware backed GIC used.
89 */
84struct vgic_vmcr { 90struct vgic_vmcr {
85 u32 ctlr; 91 u32 ctlr;
86 u32 abpr; 92 u32 abpr;
87 u32 bpr; 93 u32 bpr;
88 u32 pmr; 94 u32 pmr; /* Priority mask field in the GICC_PMR and
95 * ICC_PMR_EL1 priority field format */
89 /* Below member variable are valid only for GICv3 */ 96 /* Below member variable are valid only for GICv3 */
90 u32 grpen0; 97 u32 grpen0;
91 u32 grpen1; 98 u32 grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
130int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 137int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
131 enum vgic_type); 138 enum vgic_type);
132 139
140void vgic_v2_init_lrs(void);
141
133static inline void vgic_get_irq_kref(struct vgic_irq *irq) 142static inline void vgic_get_irq_kref(struct vgic_irq *irq)
134{ 143{
135 if (irq->intid < VGIC_MIN_LPI) 144 if (irq->intid < VGIC_MIN_LPI)