aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic-its.txt2
-rw-r--r--arch/arm/kvm/Kconfig5
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/Makefile1
-rw-r--r--include/kvm/arm_vgic.h41
-rw-r--r--virt/kvm/arm/arch_timer.c24
-rw-r--r--virt/kvm/arm/arm.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c9
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c204
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c364
-rw-r--r--virt/kvm/arm/vgic/vgic.c67
-rw-r--r--virt/kvm/arm/vgic/vgic.h10
17 files changed, 695 insertions, 114 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 62436bd5f34a..1b321e4484e6 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1890,6 +1890,10 @@
1890 [KVM,ARM] Trap guest accesses to GICv3 common 1890 [KVM,ARM] Trap guest accesses to GICv3 common
1891 system registers 1891 system registers
1892 1892
1893 kvm-arm.vgic_v4_enable=
1894 [KVM,ARM] Allow use of GICv4 for direct injection of
1895 LPIs.
1896
1893 kvm-intel.ept= [KVM,Intel] Disable extended page tables 1897 kvm-intel.ept= [KVM,Intel] Disable extended page tables
1894 (virtualized MMU) support on capable Intel chips. 1898 (virtualized MMU) support on capable Intel chips.
1895 Default is 1 (enabled) 1899 Default is 1 (enabled)
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt
index 8d5830eab26a..4f0c9fc40365 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic-its.txt
@@ -64,6 +64,8 @@ Groups:
64 -EINVAL: Inconsistent restored data 64 -EINVAL: Inconsistent restored data
65 -EFAULT: Invalid guest ram access 65 -EFAULT: Invalid guest ram access
66 -EBUSY: One or more VCPUS are running 66 -EBUSY: One or more VCPUS are running
67 -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
68 state is not available
67 69
68 KVM_DEV_ARM_VGIC_GRP_ITS_REGS 70 KVM_DEV_ARM_VGIC_GRP_ITS_REGS
69 Attributes: 71 Attributes:
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index f24628db5409..e2bd35b6780c 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -4,6 +4,7 @@
4# 4#
5 5
6source "virt/kvm/Kconfig" 6source "virt/kvm/Kconfig"
7source "virt/lib/Kconfig"
7 8
8menuconfig VIRTUALIZATION 9menuconfig VIRTUALIZATION
9 bool "Virtualization" 10 bool "Virtualization"
@@ -23,6 +24,8 @@ config KVM
23 select PREEMPT_NOTIFIERS 24 select PREEMPT_NOTIFIERS
24 select ANON_INODES 25 select ANON_INODES
25 select ARM_GIC 26 select ARM_GIC
27 select ARM_GIC_V3
28 select ARM_GIC_V3_ITS
26 select HAVE_KVM_CPU_RELAX_INTERCEPT 29 select HAVE_KVM_CPU_RELAX_INTERCEPT
27 select HAVE_KVM_ARCH_TLB_FLUSH_ALL 30 select HAVE_KVM_ARCH_TLB_FLUSH_ALL
28 select KVM_MMIO 31 select KVM_MMIO
@@ -36,6 +39,8 @@ config KVM
36 select HAVE_KVM_IRQCHIP 39 select HAVE_KVM_IRQCHIP
37 select HAVE_KVM_IRQ_ROUTING 40 select HAVE_KVM_IRQ_ROUTING
38 select HAVE_KVM_MSI 41 select HAVE_KVM_MSI
42 select IRQ_BYPASS_MANAGER
43 select HAVE_KVM_IRQ_BYPASS
39 depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER 44 depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
40 ---help--- 45 ---help---
41 Support hosting virtualized guest machines. 46 Support hosting virtualized guest machines.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f550abd64a25..48de846f2246 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -32,6 +32,7 @@ obj-y += $(KVM)/arm/vgic/vgic-init.o
32obj-y += $(KVM)/arm/vgic/vgic-irqfd.o 32obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
33obj-y += $(KVM)/arm/vgic/vgic-v2.o 33obj-y += $(KVM)/arm/vgic/vgic-v2.o
34obj-y += $(KVM)/arm/vgic/vgic-v3.o 34obj-y += $(KVM)/arm/vgic/vgic-v3.o
35obj-y += $(KVM)/arm/vgic/vgic-v4.o
35obj-y += $(KVM)/arm/vgic/vgic-mmio.o 36obj-y += $(KVM)/arm/vgic/vgic-mmio.o
36obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o 37obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
37obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o 38obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 13f81f971390..2257dfcc44cc 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -4,6 +4,7 @@
4# 4#
5 5
6source "virt/kvm/Kconfig" 6source "virt/kvm/Kconfig"
7source "virt/lib/Kconfig"
7 8
8menuconfig VIRTUALIZATION 9menuconfig VIRTUALIZATION
9 bool "Virtualization" 10 bool "Virtualization"
@@ -36,6 +37,8 @@ config KVM
36 select HAVE_KVM_MSI 37 select HAVE_KVM_MSI
37 select HAVE_KVM_IRQCHIP 38 select HAVE_KVM_IRQCHIP
38 select HAVE_KVM_IRQ_ROUTING 39 select HAVE_KVM_IRQ_ROUTING
40 select IRQ_BYPASS_MANAGER
41 select HAVE_KVM_IRQ_BYPASS
39 ---help--- 42 ---help---
40 Support hosting virtualized guest machines. 43 Support hosting virtualized guest machines.
41 We don't support KVM with 16K page tables yet, due to the multiple 44 We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 861acbbac385..87c4f7ae24de 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -27,6 +27,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
27kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o 27kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
28kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o 28kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
29kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o 29kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
30kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
30kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o 31kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
31kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o 32kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
32kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o 33kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 34dba516ef24..8c896540a72c 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -26,6 +26,8 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/jump_label.h> 27#include <linux/jump_label.h>
28 28
29#include <linux/irqchip/arm-gic-v4.h>
30
29#define VGIC_V3_MAX_CPUS 255 31#define VGIC_V3_MAX_CPUS 255
30#define VGIC_V2_MAX_CPUS 8 32#define VGIC_V2_MAX_CPUS 8
31#define VGIC_NR_IRQS_LEGACY 256 33#define VGIC_NR_IRQS_LEGACY 256
@@ -73,6 +75,9 @@ struct vgic_global {
73 /* Only needed for the legacy KVM_CREATE_IRQCHIP */ 75 /* Only needed for the legacy KVM_CREATE_IRQCHIP */
74 bool can_emulate_gicv2; 76 bool can_emulate_gicv2;
75 77
78 /* Hardware has GICv4? */
79 bool has_gicv4;
80
76 /* GIC system register CPU interface */ 81 /* GIC system register CPU interface */
77 struct static_key_false gicv3_cpuif; 82 struct static_key_false gicv3_cpuif;
78 83
@@ -116,6 +121,7 @@ struct vgic_irq {
116 bool hw; /* Tied to HW IRQ */ 121 bool hw; /* Tied to HW IRQ */
117 struct kref refcount; /* Used for LPIs */ 122 struct kref refcount; /* Used for LPIs */
118 u32 hwintid; /* HW INTID number */ 123 u32 hwintid; /* HW INTID number */
124 unsigned int host_irq; /* linux irq corresponding to hwintid */
119 union { 125 union {
120 u8 targets; /* GICv2 target VCPUs mask */ 126 u8 targets; /* GICv2 target VCPUs mask */
121 u32 mpidr; /* GICv3 target VCPU */ 127 u32 mpidr; /* GICv3 target VCPU */
@@ -232,6 +238,15 @@ struct vgic_dist {
232 238
233 /* used by vgic-debug */ 239 /* used by vgic-debug */
234 struct vgic_state_iter *iter; 240 struct vgic_state_iter *iter;
241
242 /*
243 * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
244 * array, the property table pointer as well as allocation
245 * data. This essentially ties the Linux IRQ core and ITS
246 * together, and avoids leaking KVM's data structures anywhere
247 * else.
248 */
249 struct its_vm its_vm;
235}; 250};
236 251
237struct vgic_v2_cpu_if { 252struct vgic_v2_cpu_if {
@@ -250,6 +265,14 @@ struct vgic_v3_cpu_if {
250 u32 vgic_ap0r[4]; 265 u32 vgic_ap0r[4];
251 u32 vgic_ap1r[4]; 266 u32 vgic_ap1r[4];
252 u64 vgic_lr[VGIC_V3_MAX_LRS]; 267 u64 vgic_lr[VGIC_V3_MAX_LRS];
268
269 /*
270 * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
271 * pending table pointer, the its_vm pointer and a few other
272 * HW specific things. As for the its_vm structure, this is
273 * linking the Linux IRQ subsystem and the ITS together.
274 */
275 struct its_vpe its_vpe;
253}; 276};
254 277
255struct vgic_cpu { 278struct vgic_cpu {
@@ -307,9 +330,10 @@ void kvm_vgic_init_cpu_hardware(void);
307 330
308int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 331int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
309 bool level, void *owner); 332 bool level, void *owner);
310int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq); 333int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
311int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq); 334 u32 vintid);
312bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq); 335int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
336bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
313 337
314int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 338int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
315 339
@@ -349,4 +373,15 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
349 373
350int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); 374int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
351 375
376struct kvm_kernel_irq_routing_entry;
377
378int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
379 struct kvm_kernel_irq_routing_entry *irq_entry);
380
381int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
382 struct kvm_kernel_irq_routing_entry *irq_entry);
383
384void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
385void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
386
352#endif /* __KVM_ARM_VGIC_H */ 387#endif /* __KVM_ARM_VGIC_H */
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4db54ff08d9e..4151250ce8da 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -817,9 +817,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
817{ 817{
818 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 818 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
819 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 819 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
820 struct irq_desc *desc;
821 struct irq_data *data;
822 int phys_irq;
823 int ret; 820 int ret;
824 821
825 if (timer->enabled) 822 if (timer->enabled)
@@ -837,26 +834,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
837 return -EINVAL; 834 return -EINVAL;
838 } 835 }
839 836
840 /* 837 ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
841 * Find the physical IRQ number corresponding to the host_vtimer_irq
842 */
843 desc = irq_to_desc(host_vtimer_irq);
844 if (!desc) {
845 kvm_err("%s: no interrupt descriptor\n", __func__);
846 return -EINVAL;
847 }
848
849 data = irq_desc_get_irq_data(desc);
850 while (data->parent_data)
851 data = data->parent_data;
852
853 phys_irq = data->hwirq;
854
855 /*
856 * Tell the VGIC that the virtual interrupt is tied to a
857 * physical interrupt. We do that once per VCPU.
858 */
859 ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
860 if (ret) 838 if (ret)
861 return ret; 839 return ret;
862 840
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 772bf74ac2e9..a6524ff27de4 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -27,6 +27,8 @@
27#include <linux/mman.h> 27#include <linux/mman.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/kvm.h> 29#include <linux/kvm.h>
30#include <linux/kvm_irqfd.h>
31#include <linux/irqbypass.h>
30#include <trace/events/kvm.h> 32#include <trace/events/kvm.h>
31#include <kvm/arm_pmu.h> 33#include <kvm/arm_pmu.h>
32 34
@@ -175,6 +177,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
175{ 177{
176 int i; 178 int i;
177 179
180 kvm_vgic_destroy(kvm);
181
178 free_percpu(kvm->arch.last_vcpu_ran); 182 free_percpu(kvm->arch.last_vcpu_ran);
179 kvm->arch.last_vcpu_ran = NULL; 183 kvm->arch.last_vcpu_ran = NULL;
180 184
@@ -184,8 +188,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
184 kvm->vcpus[i] = NULL; 188 kvm->vcpus[i] = NULL;
185 } 189 }
186 } 190 }
187
188 kvm_vgic_destroy(kvm);
189} 191}
190 192
191int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 193int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -313,11 +315,13 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
313void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 315void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
314{ 316{
315 kvm_timer_schedule(vcpu); 317 kvm_timer_schedule(vcpu);
318 kvm_vgic_v4_enable_doorbell(vcpu);
316} 319}
317 320
318void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 321void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
319{ 322{
320 kvm_timer_unschedule(vcpu); 323 kvm_timer_unschedule(vcpu);
324 kvm_vgic_v4_disable_doorbell(vcpu);
321} 325}
322 326
323int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 327int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -1450,6 +1454,46 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1450 return NULL; 1454 return NULL;
1451} 1455}
1452 1456
1457bool kvm_arch_has_irq_bypass(void)
1458{
1459 return true;
1460}
1461
1462int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
1463 struct irq_bypass_producer *prod)
1464{
1465 struct kvm_kernel_irqfd *irqfd =
1466 container_of(cons, struct kvm_kernel_irqfd, consumer);
1467
1468 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1469 &irqfd->irq_entry);
1470}
1471void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
1472 struct irq_bypass_producer *prod)
1473{
1474 struct kvm_kernel_irqfd *irqfd =
1475 container_of(cons, struct kvm_kernel_irqfd, consumer);
1476
1477 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1478 &irqfd->irq_entry);
1479}
1480
1481void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
1482{
1483 struct kvm_kernel_irqfd *irqfd =
1484 container_of(cons, struct kvm_kernel_irqfd, consumer);
1485
1486 kvm_arm_halt_guest(irqfd->kvm);
1487}
1488
1489void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
1490{
1491 struct kvm_kernel_irqfd *irqfd =
1492 container_of(cons, struct kvm_kernel_irqfd, consumer);
1493
1494 kvm_arm_resume_guest(irqfd->kvm);
1495}
1496
1453/** 1497/**
1454 * Initialize Hyp-mode and memory mappings on all CPUs. 1498 * Initialize Hyp-mode and memory mappings on all CPUs.
1455 */ 1499 */
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 91728faa13fd..f5c3d6d7019e 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -258,7 +258,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
258 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); 258 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
259 } 259 }
260 } else { 260 } else {
261 if (static_branch_unlikely(&vgic_v3_cpuif_trap)) 261 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
262 cpu_if->its_vpe.its_vm)
262 write_gicreg(0, ICH_HCR_EL2); 263 write_gicreg(0, ICH_HCR_EL2);
263 264
264 cpu_if->vgic_elrsr = 0xffff; 265 cpu_if->vgic_elrsr = 0xffff;
@@ -337,9 +338,11 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
337 /* 338 /*
338 * If we need to trap system registers, we must write 339 * If we need to trap system registers, we must write
339 * ICH_HCR_EL2 anyway, even if no interrupts are being 340 * ICH_HCR_EL2 anyway, even if no interrupts are being
340 * injected, 341 * injected. Same thing if GICv4 is used, as VLPI
342 * delivery is gated by ICH_HCR_EL2.En.
341 */ 343 */
342 if (static_branch_unlikely(&vgic_v3_cpuif_trap)) 344 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
345 cpu_if->its_vpe.its_vm)
343 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 346 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
344 } 347 }
345 348
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5801261f3add..62310122ee78 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,6 +285,10 @@ int vgic_init(struct kvm *kvm)
285 if (ret) 285 if (ret)
286 goto out; 286 goto out;
287 287
288 ret = vgic_v4_init(kvm);
289 if (ret)
290 goto out;
291
288 kvm_for_each_vcpu(i, vcpu, kvm) 292 kvm_for_each_vcpu(i, vcpu, kvm)
289 kvm_vgic_vcpu_enable(vcpu); 293 kvm_vgic_vcpu_enable(vcpu);
290 294
@@ -320,6 +324,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
320 324
321 kfree(dist->spis); 325 kfree(dist->spis);
322 dist->nr_spis = 0; 326 dist->nr_spis = 0;
327
328 if (vgic_supports_direct_msis(kvm))
329 vgic_v4_teardown(kvm);
323} 330}
324 331
325void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 332void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index d2a99ab0ade7..1f761a9991e7 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -38,7 +38,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its);
38static int vgic_its_restore_tables_v0(struct vgic_its *its); 38static int vgic_its_restore_tables_v0(struct vgic_its *its);
39static int vgic_its_commit_v0(struct vgic_its *its); 39static int vgic_its_commit_v0(struct vgic_its *its);
40static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 40static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
41 struct kvm_vcpu *filter_vcpu); 41 struct kvm_vcpu *filter_vcpu, bool needs_inv);
42 42
43/* 43/*
44 * Creates a new (reference to a) struct vgic_irq for a given LPI. 44 * Creates a new (reference to a) struct vgic_irq for a given LPI.
@@ -106,7 +106,7 @@ out_unlock:
106 * However we only have those structs for mapped IRQs, so we read in 106 * However we only have those structs for mapped IRQs, so we read in
107 * the respective config data from memory here upon mapping the LPI. 107 * the respective config data from memory here upon mapping the LPI.
108 */ 108 */
109 ret = update_lpi_config(kvm, irq, NULL); 109 ret = update_lpi_config(kvm, irq, NULL, false);
110 if (ret) 110 if (ret)
111 return ERR_PTR(ret); 111 return ERR_PTR(ret);
112 112
@@ -273,7 +273,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
273 * VCPU. Unconditionally applies if filter_vcpu is NULL. 273 * VCPU. Unconditionally applies if filter_vcpu is NULL.
274 */ 274 */
275static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 275static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
276 struct kvm_vcpu *filter_vcpu) 276 struct kvm_vcpu *filter_vcpu, bool needs_inv)
277{ 277{
278 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); 278 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
279 u8 prop; 279 u8 prop;
@@ -292,11 +292,17 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
292 irq->priority = LPI_PROP_PRIORITY(prop); 292 irq->priority = LPI_PROP_PRIORITY(prop);
293 irq->enabled = LPI_PROP_ENABLE_BIT(prop); 293 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
294 294
295 vgic_queue_irq_unlock(kvm, irq, flags); 295 if (!irq->hw) {
296 } else { 296 vgic_queue_irq_unlock(kvm, irq, flags);
297 spin_unlock_irqrestore(&irq->irq_lock, flags); 297 return 0;
298 }
298 } 299 }
299 300
301 spin_unlock_irqrestore(&irq->irq_lock, flags);
302
303 if (irq->hw)
304 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
305
300 return 0; 306 return 0;
301} 307}
302 308
@@ -336,6 +342,29 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
336 return i; 342 return i;
337} 343}
338 344
345static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
346{
347 int ret = 0;
348
349 spin_lock(&irq->irq_lock);
350 irq->target_vcpu = vcpu;
351 spin_unlock(&irq->irq_lock);
352
353 if (irq->hw) {
354 struct its_vlpi_map map;
355
356 ret = its_get_vlpi(irq->host_irq, &map);
357 if (ret)
358 return ret;
359
360 map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
361
362 ret = its_map_vlpi(irq->host_irq, &map);
363 }
364
365 return ret;
366}
367
339/* 368/*
340 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI 369 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
341 * is targeting) to the VGIC's view, which deals with target VCPUs. 370 * is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -350,10 +379,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
350 return; 379 return;
351 380
352 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); 381 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
353 382 update_affinity(ite->irq, vcpu);
354 spin_lock(&ite->irq->irq_lock);
355 ite->irq->target_vcpu = vcpu;
356 spin_unlock(&ite->irq->irq_lock);
357} 383}
358 384
359/* 385/*
@@ -505,19 +531,11 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
505 return 0; 531 return 0;
506} 532}
507 533
508/* 534int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
509 * Find the target VCPU and the LPI number for a given devid/eventid pair 535 u32 devid, u32 eventid, struct vgic_irq **irq)
510 * and make this IRQ pending, possibly injecting it.
511 * Must be called with the its_lock mutex held.
512 * Returns 0 on success, a positive error value for any ITS mapping
513 * related errors and negative error values for generic errors.
514 */
515static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
516 u32 devid, u32 eventid)
517{ 536{
518 struct kvm_vcpu *vcpu; 537 struct kvm_vcpu *vcpu;
519 struct its_ite *ite; 538 struct its_ite *ite;
520 unsigned long flags;
521 539
522 if (!its->enabled) 540 if (!its->enabled)
523 return -EBUSY; 541 return -EBUSY;
@@ -533,26 +551,65 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
533 if (!vcpu->arch.vgic_cpu.lpis_enabled) 551 if (!vcpu->arch.vgic_cpu.lpis_enabled)
534 return -EBUSY; 552 return -EBUSY;
535 553
536 spin_lock_irqsave(&ite->irq->irq_lock, flags); 554 *irq = ite->irq;
537 ite->irq->pending_latch = true;
538 vgic_queue_irq_unlock(kvm, ite->irq, flags);
539
540 return 0; 555 return 0;
541} 556}
542 557
543static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev) 558struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
544{ 559{
560 u64 address;
561 struct kvm_io_device *kvm_io_dev;
545 struct vgic_io_device *iodev; 562 struct vgic_io_device *iodev;
546 563
547 if (dev->ops != &kvm_io_gic_ops) 564 if (!vgic_has_its(kvm))
548 return NULL; 565 return ERR_PTR(-ENODEV);
549 566
550 iodev = container_of(dev, struct vgic_io_device, dev); 567 if (!(msi->flags & KVM_MSI_VALID_DEVID))
568 return ERR_PTR(-EINVAL);
551 569
570 address = (u64)msi->address_hi << 32 | msi->address_lo;
571
572 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
573 if (!kvm_io_dev)
574 return ERR_PTR(-EINVAL);
575
576 if (kvm_io_dev->ops != &kvm_io_gic_ops)
577 return ERR_PTR(-EINVAL);
578
579 iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
552 if (iodev->iodev_type != IODEV_ITS) 580 if (iodev->iodev_type != IODEV_ITS)
553 return NULL; 581 return ERR_PTR(-EINVAL);
582
583 return iodev->its;
584}
585
586/*
587 * Find the target VCPU and the LPI number for a given devid/eventid pair
588 * and make this IRQ pending, possibly injecting it.
589 * Must be called with the its_lock mutex held.
590 * Returns 0 on success, a positive error value for any ITS mapping
591 * related errors and negative error values for generic errors.
592 */
593static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
594 u32 devid, u32 eventid)
595{
596 struct vgic_irq *irq = NULL;
597 unsigned long flags;
598 int err;
599
600 err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
601 if (err)
602 return err;
603
604 if (irq->hw)
605 return irq_set_irqchip_state(irq->host_irq,
606 IRQCHIP_STATE_PENDING, true);
607
608 spin_lock_irqsave(&irq->irq_lock, flags);
609 irq->pending_latch = true;
610 vgic_queue_irq_unlock(kvm, irq, flags);
554 611
555 return iodev; 612 return 0;
556} 613}
557 614
558/* 615/*
@@ -563,30 +620,16 @@ static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
563 */ 620 */
564int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) 621int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
565{ 622{
566 u64 address; 623 struct vgic_its *its;
567 struct kvm_io_device *kvm_io_dev;
568 struct vgic_io_device *iodev;
569 int ret; 624 int ret;
570 625
571 if (!vgic_has_its(kvm)) 626 its = vgic_msi_to_its(kvm, msi);
572 return -ENODEV; 627 if (IS_ERR(its))
573 628 return PTR_ERR(its);
574 if (!(msi->flags & KVM_MSI_VALID_DEVID))
575 return -EINVAL;
576 629
577 address = (u64)msi->address_hi << 32 | msi->address_lo; 630 mutex_lock(&its->its_lock);
578 631 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
579 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); 632 mutex_unlock(&its->its_lock);
580 if (!kvm_io_dev)
581 return -EINVAL;
582
583 iodev = vgic_get_its_iodev(kvm_io_dev);
584 if (!iodev)
585 return -EINVAL;
586
587 mutex_lock(&iodev->its->its_lock);
588 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
589 mutex_unlock(&iodev->its->its_lock);
590 633
591 if (ret < 0) 634 if (ret < 0)
592 return ret; 635 return ret;
@@ -608,8 +651,12 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
608 list_del(&ite->ite_list); 651 list_del(&ite->ite_list);
609 652
610 /* This put matches the get in vgic_add_lpi. */ 653 /* This put matches the get in vgic_add_lpi. */
611 if (ite->irq) 654 if (ite->irq) {
655 if (ite->irq->hw)
656 WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
657
612 vgic_put_irq(kvm, ite->irq); 658 vgic_put_irq(kvm, ite->irq);
659 }
613 660
614 kfree(ite); 661 kfree(ite);
615} 662}
@@ -683,11 +730,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
683 ite->collection = collection; 730 ite->collection = collection;
684 vcpu = kvm_get_vcpu(kvm, collection->target_addr); 731 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
685 732
686 spin_lock(&ite->irq->irq_lock); 733 return update_affinity(ite->irq, vcpu);
687 ite->irq->target_vcpu = vcpu;
688 spin_unlock(&ite->irq->irq_lock);
689
690 return 0;
691} 734}
692 735
693/* 736/*
@@ -1054,6 +1097,10 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1054 1097
1055 ite->irq->pending_latch = false; 1098 ite->irq->pending_latch = false;
1056 1099
1100 if (ite->irq->hw)
1101 return irq_set_irqchip_state(ite->irq->host_irq,
1102 IRQCHIP_STATE_PENDING, false);
1103
1057 return 0; 1104 return 0;
1058} 1105}
1059 1106
@@ -1073,7 +1120,7 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1073 if (!ite) 1120 if (!ite)
1074 return E_ITS_INV_UNMAPPED_INTERRUPT; 1121 return E_ITS_INV_UNMAPPED_INTERRUPT;
1075 1122
1076 return update_lpi_config(kvm, ite->irq, NULL); 1123 return update_lpi_config(kvm, ite->irq, NULL, true);
1077} 1124}
1078 1125
1079/* 1126/*
@@ -1108,12 +1155,15 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1108 irq = vgic_get_irq(kvm, NULL, intids[i]); 1155 irq = vgic_get_irq(kvm, NULL, intids[i]);
1109 if (!irq) 1156 if (!irq)
1110 continue; 1157 continue;
1111 update_lpi_config(kvm, irq, vcpu); 1158 update_lpi_config(kvm, irq, vcpu, false);
1112 vgic_put_irq(kvm, irq); 1159 vgic_put_irq(kvm, irq);
1113 } 1160 }
1114 1161
1115 kfree(intids); 1162 kfree(intids);
1116 1163
1164 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1165 its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1166
1117 return 0; 1167 return 0;
1118} 1168}
1119 1169
@@ -1128,11 +1178,12 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1128static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its, 1178static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1129 u64 *its_cmd) 1179 u64 *its_cmd)
1130{ 1180{
1131 struct vgic_dist *dist = &kvm->arch.vgic;
1132 u32 target1_addr = its_cmd_get_target_addr(its_cmd); 1181 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1133 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32); 1182 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1134 struct kvm_vcpu *vcpu1, *vcpu2; 1183 struct kvm_vcpu *vcpu1, *vcpu2;
1135 struct vgic_irq *irq; 1184 struct vgic_irq *irq;
1185 u32 *intids;
1186 int irq_count, i;
1136 1187
1137 if (target1_addr >= atomic_read(&kvm->online_vcpus) || 1188 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1138 target2_addr >= atomic_read(&kvm->online_vcpus)) 1189 target2_addr >= atomic_read(&kvm->online_vcpus))
@@ -1144,19 +1195,19 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1144 vcpu1 = kvm_get_vcpu(kvm, target1_addr); 1195 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1145 vcpu2 = kvm_get_vcpu(kvm, target2_addr); 1196 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1146 1197
1147 spin_lock(&dist->lpi_list_lock); 1198 irq_count = vgic_copy_lpi_list(vcpu1, &intids);
1199 if (irq_count < 0)
1200 return irq_count;
1148 1201
1149 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 1202 for (i = 0; i < irq_count; i++) {
1150 spin_lock(&irq->irq_lock); 1203 irq = vgic_get_irq(kvm, NULL, intids[i]);
1151 1204
1152 if (irq->target_vcpu == vcpu1) 1205 update_affinity(irq, vcpu2);
1153 irq->target_vcpu = vcpu2;
1154 1206
1155 spin_unlock(&irq->irq_lock); 1207 vgic_put_irq(kvm, irq);
1156 } 1208 }
1157 1209
1158 spin_unlock(&dist->lpi_list_lock); 1210 kfree(intids);
1159
1160 return 0; 1211 return 0;
1161} 1212}
1162 1213
@@ -1634,6 +1685,14 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
1634 if (!its) 1685 if (!its)
1635 return -ENOMEM; 1686 return -ENOMEM;
1636 1687
1688 if (vgic_initialized(dev->kvm)) {
1689 int ret = vgic_v4_init(dev->kvm);
1690 if (ret < 0) {
1691 kfree(its);
1692 return ret;
1693 }
1694 }
1695
1637 mutex_init(&its->its_lock); 1696 mutex_init(&its->its_lock);
1638 mutex_init(&its->cmd_lock); 1697 mutex_init(&its->cmd_lock);
1639 1698
@@ -1946,6 +2005,15 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
1946 list_for_each_entry(ite, &device->itt_head, ite_list) { 2005 list_for_each_entry(ite, &device->itt_head, ite_list) {
1947 gpa_t gpa = base + ite->event_id * ite_esz; 2006 gpa_t gpa = base + ite->event_id * ite_esz;
1948 2007
2008 /*
2009 * If an LPI carries the HW bit, this means that this
2010 * interrupt is controlled by GICv4, and we do not
2011 * have direct access to that state. Let's simply fail
2012 * the save operation...
2013 */
2014 if (ite->irq->hw)
2015 return -EACCES;
2016
1949 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz); 2017 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
1950 if (ret) 2018 if (ret)
1951 return ret; 2019 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 83786108829e..671fe81f8e1d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -54,6 +54,11 @@ bool vgic_has_its(struct kvm *kvm)
54 return dist->has_its; 54 return dist->has_its;
55} 55}
56 56
57bool vgic_supports_direct_msis(struct kvm *kvm)
58{
59 return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
60}
61
57static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, 62static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
58 gpa_t addr, unsigned int len) 63 gpa_t addr, unsigned int len)
59{ 64{
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 863351c090d8..2f05f732d3fd 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -24,6 +24,7 @@
24static bool group0_trap; 24static bool group0_trap;
25static bool group1_trap; 25static bool group1_trap;
26static bool common_trap; 26static bool common_trap;
27static bool gicv4_enable;
27 28
28void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) 29void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
29{ 30{
@@ -461,6 +462,12 @@ static int __init early_common_trap_cfg(char *buf)
461} 462}
462early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg); 463early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
463 464
465static int __init early_gicv4_enable(char *buf)
466{
467 return strtobool(buf, &gicv4_enable);
468}
469early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
470
464/** 471/**
465 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT 472 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
466 * @node: pointer to the DT node 473 * @node: pointer to the DT node
@@ -480,6 +487,13 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
480 kvm_vgic_global_state.can_emulate_gicv2 = false; 487 kvm_vgic_global_state.can_emulate_gicv2 = false;
481 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2; 488 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
482 489
490 /* GICv4 support? */
491 if (info->has_v4) {
492 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
493 kvm_info("GICv4 support %sabled\n",
494 gicv4_enable ? "en" : "dis");
495 }
496
483 if (!info->vcpu.start) { 497 if (!info->vcpu.start) {
484 kvm_info("GICv3: no GICV resource entry\n"); 498 kvm_info("GICv3: no GICV resource entry\n");
485 kvm_vgic_global_state.vcpu_base = 0; 499 kvm_vgic_global_state.vcpu_base = 0;
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
new file mode 100644
index 000000000000..53c324aa44ef
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -0,0 +1,364 @@
1/*
2 * Copyright (C) 2017 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/kvm_host.h>
22#include <linux/irqchip/arm-gic-v3.h>
23
24#include "vgic.h"
25
26/*
27 * How KVM uses GICv4 (insert rude comments here):
28 *
29 * The vgic-v4 layer acts as a bridge between several entities:
30 * - The GICv4 ITS representation offered by the ITS driver
31 * - VFIO, which is in charge of the PCI endpoint
32 * - The virtual ITS, which is the only thing the guest sees
33 *
34 * The configuration of VLPIs is triggered by a callback from VFIO,
35 * instructing KVM that a PCI device has been configured to deliver
36 * MSIs to a vITS.
37 *
38 * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
39 * and this is used to find the corresponding vITS data structures
40 * (ITS instance, device, event and irq) using a process that is
41 * extremely similar to the injection of an MSI.
42 *
43 * At this stage, we can link the guest's view of an LPI (uniquely
44 * identified by the routing entry) and the host irq, using the GICv4
45 * driver mapping operation. Should the mapping succeed, we've then
46 * successfully upgraded the guest's LPI to a VLPI. We can then start
47 * with updating GICv4's view of the property table and generating an
48 * INValidation in order to kickstart the delivery of this VLPI to the
49 * guest directly, without software intervention. Well, almost.
50 *
51 * When the PCI endpoint is deconfigured, this operation is reversed
52 * with VFIO calling kvm_vgic_v4_unset_forwarding().
53 *
54 * Once the VLPI has been mapped, it needs to follow any change the
55 * guest performs on its LPI through the vITS. For that, a number of
56 * command handlers have hooks to communicate these changes to the HW:
57 * - Any invalidation triggers a call to its_prop_update_vlpi()
58 * - The INT command results in a irq_set_irqchip_state(), which
59 * generates an INT on the corresponding VLPI.
60 * - The CLEAR command results in a irq_set_irqchip_state(), which
61 * generates an CLEAR on the corresponding VLPI.
62 * - DISCARD translates into an unmap, similar to a call to
63 * kvm_vgic_v4_unset_forwarding().
64 * - MOVI is translated by an update of the existing mapping, changing
65 * the target vcpu, resulting in a VMOVI being generated.
66 * - MOVALL is translated by a string of mapping updates (similar to
67 * the handling of MOVI). MOVALL is horrible.
68 *
69 * Note that a DISCARD/MAPTI sequence emitted from the guest without
70 * reprogramming the PCI endpoint after MAPTI does not result in a
71 * VLPI being mapped, as there is no callback from VFIO (the guest
72 * will get the interrupt via the normal SW injection). Fixing this is
73 * not trivial, and requires some horrible messing with the VFIO
74 * internals. Not fun. Don't do that.
75 *
76 * Then there is the scheduling. Each time a vcpu is about to run on a
77 * physical CPU, KVM must tell the corresponding redistributor about
78 * it. And if we've migrated our vcpu from one CPU to another, we must
79 * tell the ITS (so that the messages reach the right redistributor).
80 * This is done in two steps: first issue a irq_set_affinity() on the
81 * irq corresponding to the vcpu, then call its_schedule_vpe(). You
82 * must be in a non-preemptible context. On exit, another call to
83 * its_schedule_vpe() tells the redistributor that we're done with the
84 * vcpu.
85 *
86 * Finally, the doorbell handling: Each vcpu is allocated an interrupt
87 * which will fire each time a VLPI is made pending whilst the vcpu is
88 * not running. Each time the vcpu gets blocked, the doorbell
89 * interrupt gets enabled. When the vcpu is unblocked (for whatever
90 * reason), the doorbell interrupt is disabled.
91 */
92
93#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
94
95static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
96{
97 struct kvm_vcpu *vcpu = info;
98
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
101 kvm_vcpu_kick(vcpu);
102
103 return IRQ_HANDLED;
104}
105
106/**
107 * vgic_v4_init - Initialize the GICv4 data structures
108 * @kvm: Pointer to the VM being initialized
109 *
110 * We may be called each time a vITS is created, or when the
111 * vgic is initialized. This relies on kvm->lock to be
112 * held. In both cases, the number of vcpus should now be
113 * fixed.
114 */
115int vgic_v4_init(struct kvm *kvm)
116{
117 struct vgic_dist *dist = &kvm->arch.vgic;
118 struct kvm_vcpu *vcpu;
119 int i, nr_vcpus, ret;
120
121 if (!vgic_supports_direct_msis(kvm))
122 return 0; /* Nothing to see here... move along. */
123
124 if (dist->its_vm.vpes)
125 return 0;
126
127 nr_vcpus = atomic_read(&kvm->online_vcpus);
128
129 dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
130 GFP_KERNEL);
131 if (!dist->its_vm.vpes)
132 return -ENOMEM;
133
134 dist->its_vm.nr_vpes = nr_vcpus;
135
136 kvm_for_each_vcpu(i, vcpu, kvm)
137 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
138
139 ret = its_alloc_vcpu_irqs(&dist->its_vm);
140 if (ret < 0) {
141 kvm_err("VPE IRQ allocation failure\n");
142 kfree(dist->its_vm.vpes);
143 dist->its_vm.nr_vpes = 0;
144 dist->its_vm.vpes = NULL;
145 return ret;
146 }
147
148 kvm_for_each_vcpu(i, vcpu, kvm) {
149 int irq = dist->its_vm.vpes[i]->irq;
150
151 /*
152 * Don't automatically enable the doorbell, as we're
153 * flipping it back and forth when the vcpu gets
154 * blocked. Also disable the lazy disabling, as the
155 * doorbell could kick us out of the guest too
156 * early...
157 */
158 irq_set_status_flags(irq, DB_IRQ_FLAGS);
159 ret = request_irq(irq, vgic_v4_doorbell_handler,
160 0, "vcpu", vcpu);
161 if (ret) {
162 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
163 /*
164 * Trick: adjust the number of vpes so we know
165 * how many to nuke on teardown...
166 */
167 dist->its_vm.nr_vpes = i;
168 break;
169 }
170 }
171
172 if (ret)
173 vgic_v4_teardown(kvm);
174
175 return ret;
176}
177
178/**
179 * vgic_v4_teardown - Free the GICv4 data structures
180 * @kvm: Pointer to the VM being destroyed
181 *
182 * Relies on kvm->lock to be held.
183 */
184void vgic_v4_teardown(struct kvm *kvm)
185{
186 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
187 int i;
188
189 if (!its_vm->vpes)
190 return;
191
192 for (i = 0; i < its_vm->nr_vpes; i++) {
193 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
194 int irq = its_vm->vpes[i]->irq;
195
196 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
197 free_irq(irq, vcpu);
198 }
199
200 its_free_vcpu_irqs(its_vm);
201 kfree(its_vm->vpes);
202 its_vm->nr_vpes = 0;
203 its_vm->vpes = NULL;
204}
205
206int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
207{
208 if (!vgic_supports_direct_msis(vcpu->kvm))
209 return 0;
210
211 return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
212}
213
214int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
215{
216 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
217 int err;
218
219 if (!vgic_supports_direct_msis(vcpu->kvm))
220 return 0;
221
222 /*
223 * Before making the VPE resident, make sure the redistributor
224 * corresponding to our current CPU expects us here. See the
225 * doc in drivers/irqchip/irq-gic-v4.c to understand how this
226 * turns into a VMOVP command at the ITS level.
227 */
228 err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
229 if (err)
230 return err;
231
232 err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
233 if (err)
234 return err;
235
236 /*
237 * Now that the VPE is resident, let's get rid of a potential
238 * doorbell interrupt that would still be pending.
239 */
240 err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
241
242 return err;
243}
244
245static struct vgic_its *vgic_get_its(struct kvm *kvm,
246 struct kvm_kernel_irq_routing_entry *irq_entry)
247{
248 struct kvm_msi msi = (struct kvm_msi) {
249 .address_lo = irq_entry->msi.address_lo,
250 .address_hi = irq_entry->msi.address_hi,
251 .data = irq_entry->msi.data,
252 .flags = irq_entry->msi.flags,
253 .devid = irq_entry->msi.devid,
254 };
255
256 return vgic_msi_to_its(kvm, &msi);
257}
258
259int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
260 struct kvm_kernel_irq_routing_entry *irq_entry)
261{
262 struct vgic_its *its;
263 struct vgic_irq *irq;
264 struct its_vlpi_map map;
265 int ret;
266
267 if (!vgic_supports_direct_msis(kvm))
268 return 0;
269
270 /*
271 * Get the ITS, and escape early on error (not a valid
272 * doorbell for any of our vITSs).
273 */
274 its = vgic_get_its(kvm, irq_entry);
275 if (IS_ERR(its))
276 return 0;
277
278 mutex_lock(&its->its_lock);
279
280 /* Perform then actual DevID/EventID -> LPI translation. */
281 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
282 irq_entry->msi.data, &irq);
283 if (ret)
284 goto out;
285
286 /*
287 * Emit the mapping request. If it fails, the ITS probably
288 * isn't v4 compatible, so let's silently bail out. Holding
289 * the ITS lock should ensure that nothing can modify the
290 * target vcpu.
291 */
292 map = (struct its_vlpi_map) {
293 .vm = &kvm->arch.vgic.its_vm,
294 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
295 .vintid = irq->intid,
296 .properties = ((irq->priority & 0xfc) |
297 (irq->enabled ? LPI_PROP_ENABLED : 0) |
298 LPI_PROP_GROUP1),
299 .db_enabled = true,
300 };
301
302 ret = its_map_vlpi(virq, &map);
303 if (ret)
304 goto out;
305
306 irq->hw = true;
307 irq->host_irq = virq;
308
309out:
310 mutex_unlock(&its->its_lock);
311 return ret;
312}
313
314int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
315 struct kvm_kernel_irq_routing_entry *irq_entry)
316{
317 struct vgic_its *its;
318 struct vgic_irq *irq;
319 int ret;
320
321 if (!vgic_supports_direct_msis(kvm))
322 return 0;
323
324 /*
325 * Get the ITS, and escape early on error (not a valid
326 * doorbell for any of our vITSs).
327 */
328 its = vgic_get_its(kvm, irq_entry);
329 if (IS_ERR(its))
330 return 0;
331
332 mutex_lock(&its->its_lock);
333
334 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
335 irq_entry->msi.data, &irq);
336 if (ret)
337 goto out;
338
339 WARN_ON(!(irq->hw && irq->host_irq == virq));
340 irq->hw = false;
341 ret = its_unmap_vlpi(virq);
342
343out:
344 mutex_unlock(&its->its_lock);
345 return ret;
346}
347
348void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
349{
350 if (vgic_supports_direct_msis(vcpu->kvm)) {
351 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
352 if (irq)
353 enable_irq(irq);
354 }
355}
356
357void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
358{
359 if (vgic_supports_direct_msis(vcpu->kvm)) {
360 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
361 if (irq)
362 disable_irq(irq);
363 }
364}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e54ef2fdf73d..b168a328a9e0 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -17,6 +17,8 @@
17#include <linux/kvm.h> 17#include <linux/kvm.h>
18#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
19#include <linux/list_sort.h> 19#include <linux/list_sort.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
20 22
21#include "vgic.h" 23#include "vgic.h"
22 24
@@ -409,25 +411,56 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
409 return 0; 411 return 0;
410} 412}
411 413
412int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) 414/* @irq->irq_lock must be held */
415static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
416 unsigned int host_irq)
413{ 417{
414 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); 418 struct irq_desc *desc;
419 struct irq_data *data;
420
421 /*
422 * Find the physical IRQ number corresponding to @host_irq
423 */
424 desc = irq_to_desc(host_irq);
425 if (!desc) {
426 kvm_err("%s: no interrupt descriptor\n", __func__);
427 return -EINVAL;
428 }
429 data = irq_desc_get_irq_data(desc);
430 while (data->parent_data)
431 data = data->parent_data;
432
433 irq->hw = true;
434 irq->host_irq = host_irq;
435 irq->hwintid = data->hwirq;
436 return 0;
437}
438
439/* @irq->irq_lock must be held */
440static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
441{
442 irq->hw = false;
443 irq->hwintid = 0;
444}
445
446int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
447 u32 vintid)
448{
449 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
415 unsigned long flags; 450 unsigned long flags;
451 int ret;
416 452
417 BUG_ON(!irq); 453 BUG_ON(!irq);
418 454
419 spin_lock_irqsave(&irq->irq_lock, flags); 455 spin_lock_irqsave(&irq->irq_lock, flags);
420 456 ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
421 irq->hw = true;
422 irq->hwintid = phys_irq;
423
424 spin_unlock_irqrestore(&irq->irq_lock, flags); 457 spin_unlock_irqrestore(&irq->irq_lock, flags);
425 vgic_put_irq(vcpu->kvm, irq); 458 vgic_put_irq(vcpu->kvm, irq);
426 459
427 return 0; 460 return ret;
428} 461}
429 462
430int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) 463int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
431{ 464{
432 struct vgic_irq *irq; 465 struct vgic_irq *irq;
433 unsigned long flags; 466 unsigned long flags;
@@ -435,14 +468,11 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
435 if (!vgic_initialized(vcpu->kvm)) 468 if (!vgic_initialized(vcpu->kvm))
436 return -EAGAIN; 469 return -EAGAIN;
437 470
438 irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); 471 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
439 BUG_ON(!irq); 472 BUG_ON(!irq);
440 473
441 spin_lock_irqsave(&irq->irq_lock, flags); 474 spin_lock_irqsave(&irq->irq_lock, flags);
442 475 kvm_vgic_unmap_irq(irq);
443 irq->hw = false;
444 irq->hwintid = 0;
445
446 spin_unlock_irqrestore(&irq->irq_lock, flags); 476 spin_unlock_irqrestore(&irq->irq_lock, flags);
447 vgic_put_irq(vcpu->kvm, irq); 477 vgic_put_irq(vcpu->kvm, irq);
448 478
@@ -688,6 +718,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
688{ 718{
689 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 719 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
690 720
721 WARN_ON(vgic_v4_sync_hwstate(vcpu));
722
691 /* An empty ap_list_head implies used_lrs == 0 */ 723 /* An empty ap_list_head implies used_lrs == 0 */
692 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) 724 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
693 return; 725 return;
@@ -700,6 +732,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
700/* Flush our emulation state into the GIC hardware before entering the guest. */ 732/* Flush our emulation state into the GIC hardware before entering the guest. */
701void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) 733void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
702{ 734{
735 WARN_ON(vgic_v4_flush_hwstate(vcpu));
736
703 /* 737 /*
704 * If there are no virtual interrupts active or pending for this 738 * If there are no virtual interrupts active or pending for this
705 * VCPU, then there is no work to do and we can bail out without 739 * VCPU, then there is no work to do and we can bail out without
@@ -751,6 +785,9 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
751 if (!vcpu->kvm->arch.vgic.enabled) 785 if (!vcpu->kvm->arch.vgic.enabled)
752 return false; 786 return false;
753 787
788 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
789 return true;
790
754 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 791 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
755 792
756 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 793 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
@@ -784,9 +821,9 @@ void vgic_kick_vcpus(struct kvm *kvm)
784 } 821 }
785} 822}
786 823
787bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) 824bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
788{ 825{
789 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); 826 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
790 bool map_is_active; 827 bool map_is_active;
791 unsigned long flags; 828 unsigned long flags;
792 829
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 4f8aecb07ae6..efbcf8f96f9c 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -237,4 +237,14 @@ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
237 } 237 }
238} 238}
239 239
240int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
241 u32 devid, u32 eventid, struct vgic_irq **irq);
242struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
243
244bool vgic_supports_direct_msis(struct kvm *kvm);
245int vgic_v4_init(struct kvm *kvm);
246void vgic_v4_teardown(struct kvm *kvm);
247int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
248int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
249
240#endif 250#endif