aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-04-07 12:09:20 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-04-07 12:09:20 -0400
commitbf0fb67cf957fc8ecfaaa2819b7d6a0f795e2ef2 (patch)
tree22697f7deae781dbbacd2e19a5030df2e8551e6a
parent8999602d08a804ae9cb271fdd5378f910058112d (diff)
parentd44758c0dfc5993a4b9952935a7eae4c91ebb6b4 (diff)
Merge tag 'kvm-arm-for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into 'kvm-next'
KVM/ARM changes for v4.1: - fixes for live migration - irqfd support - kvm-io-bus & vgic rework to enable ioeventfd - page ageing for stage-2 translation - various cleanups
-rw-r--r--Documentation/virtual/kvm/api.txt22
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h15
-rw-r--r--arch/arm/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kvm/Kconfig30
-rw-r--r--arch/arm/kvm/Makefile12
-rw-r--r--arch/arm/kvm/arm.c45
-rw-r--r--arch/arm/kvm/guest.c18
-rw-r--r--arch/arm/kvm/interrupts_head.S8
-rw-r--r--arch/arm/kvm/mmio.c64
-rw-r--r--arch/arm/kvm/mmu.c134
-rw-r--r--arch/arm/kvm/trace.h48
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h15
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm64/kvm/Kconfig18
-rw-r--r--arch/arm64/kvm/Makefile20
-rw-r--r--arch/powerpc/kvm/mpic.c12
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c12
-rw-r--r--arch/x86/kvm/ioapic.c8
-rw-r--r--arch/x86/kvm/ioapic.h2
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c13
-rw-r--r--include/kvm/arm_arch_timer.h31
-rw-r--r--include/kvm/arm_vgic.h117
-rw-r--r--include/kvm/iodev.h (renamed from virt/kvm/iodev.h)28
-rw-r--r--include/linux/kvm_host.h24
-rw-r--r--virt/kvm/arm/arch_timer.c45
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c71
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c246
-rw-r--r--virt/kvm/arm/vgic.c479
-rw-r--r--virt/kvm/arm/vgic.h37
-rw-r--r--virt/kvm/coalesced_mmio.c7
-rw-r--r--virt/kvm/eventfd.c9
-rw-r--r--virt/kvm/kvm_main.c34
47 files changed, 1012 insertions, 703 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1490eb0ef798..57d25fdd3d7e 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -997,7 +997,7 @@ for vm-wide capabilities.
9974.38 KVM_GET_MP_STATE 9974.38 KVM_GET_MP_STATE
998 998
999Capability: KVM_CAP_MP_STATE 999Capability: KVM_CAP_MP_STATE
1000Architectures: x86, s390 1000Architectures: x86, s390, arm, arm64
1001Type: vcpu ioctl 1001Type: vcpu ioctl
1002Parameters: struct kvm_mp_state (out) 1002Parameters: struct kvm_mp_state (out)
1003Returns: 0 on success; -1 on error 1003Returns: 0 on success; -1 on error
@@ -1011,7 +1011,7 @@ uniprocessor guests).
1011 1011
1012Possible values are: 1012Possible values are:
1013 1013
1014 - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86] 1014 - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64]
1015 - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) 1015 - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
1016 which has not yet received an INIT signal [x86] 1016 which has not yet received an INIT signal [x86]
1017 - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is 1017 - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
@@ -1020,7 +1020,7 @@ Possible values are:
1020 is waiting for an interrupt [x86] 1020 is waiting for an interrupt [x86]
1021 - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector 1021 - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
1022 accessible via KVM_GET_VCPU_EVENTS) [x86] 1022 accessible via KVM_GET_VCPU_EVENTS) [x86]
1023 - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390] 1023 - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64]
1024 - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390] 1024 - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390]
1025 - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted) 1025 - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted)
1026 [s390] 1026 [s390]
@@ -1031,11 +1031,15 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
1031in-kernel irqchip, the multiprocessing state must be maintained by userspace on 1031in-kernel irqchip, the multiprocessing state must be maintained by userspace on
1032these architectures. 1032these architectures.
1033 1033
1034For arm/arm64:
1035
1036The only states that are valid are KVM_MP_STATE_STOPPED and
1037KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not.
1034 1038
10354.39 KVM_SET_MP_STATE 10394.39 KVM_SET_MP_STATE
1036 1040
1037Capability: KVM_CAP_MP_STATE 1041Capability: KVM_CAP_MP_STATE
1038Architectures: x86, s390 1042Architectures: x86, s390, arm, arm64
1039Type: vcpu ioctl 1043Type: vcpu ioctl
1040Parameters: struct kvm_mp_state (in) 1044Parameters: struct kvm_mp_state (in)
1041Returns: 0 on success; -1 on error 1045Returns: 0 on success; -1 on error
@@ -1047,6 +1051,10 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
1047in-kernel irqchip, the multiprocessing state must be maintained by userspace on 1051in-kernel irqchip, the multiprocessing state must be maintained by userspace on
1048these architectures. 1052these architectures.
1049 1053
1054For arm/arm64:
1055
1056The only states that are valid are KVM_MP_STATE_STOPPED and
1057KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not.
1050 1058
10514.40 KVM_SET_IDENTITY_MAP_ADDR 10594.40 KVM_SET_IDENTITY_MAP_ADDR
1052 1060
@@ -2263,7 +2271,7 @@ into the hash PTE second double word).
22634.75 KVM_IRQFD 22714.75 KVM_IRQFD
2264 2272
2265Capability: KVM_CAP_IRQFD 2273Capability: KVM_CAP_IRQFD
2266Architectures: x86 s390 2274Architectures: x86 s390 arm arm64
2267Type: vm ioctl 2275Type: vm ioctl
2268Parameters: struct kvm_irqfd (in) 2276Parameters: struct kvm_irqfd (in)
2269Returns: 0 on success, -1 on error 2277Returns: 0 on success, -1 on error
@@ -2289,6 +2297,10 @@ Note that closing the resamplefd is not sufficient to disable the
2289irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment 2297irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment
2290and need not be specified with KVM_IRQFD_FLAG_DEASSIGN. 2298and need not be specified with KVM_IRQFD_FLAG_DEASSIGN.
2291 2299
2300On ARM/ARM64, the gsi field in the kvm_irqfd struct specifies the Shared
2301Peripheral Interrupt (SPI) index, such that the GIC interrupt ID is
2302given by gsi + 32.
2303
22924.76 KVM_PPC_ALLOCATE_HTAB 23044.76 KVM_PPC_ALLOCATE_HTAB
2293 2305
2294Capability: KVM_CAP_PPC_ALLOC_HTAB 2306Capability: KVM_CAP_PPC_ALLOC_HTAB
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 816db0bf2dd8..d995821f1698 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -185,6 +185,7 @@
185#define HSR_COND (0xfU << HSR_COND_SHIFT) 185#define HSR_COND (0xfU << HSR_COND_SHIFT)
186 186
187#define FSC_FAULT (0x04) 187#define FSC_FAULT (0x04)
188#define FSC_ACCESS (0x08)
188#define FSC_PERM (0x0c) 189#define FSC_PERM (0x0c)
189 190
190/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ 191/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 41008cd7c53f..d71607c16601 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -27,6 +27,8 @@
27#include <asm/fpstate.h> 27#include <asm/fpstate.h>
28#include <kvm/arm_arch_timer.h> 28#include <kvm/arm_arch_timer.h>
29 29
30#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31
30#if defined(CONFIG_KVM_ARM_MAX_VCPUS) 32#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
31#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 33#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
32#else 34#else
@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
165 167
166unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 168unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
167int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 169int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
170int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
171int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
168 172
169/* We do not have shadow page tables, hence the empty hooks */ 173/* We do not have shadow page tables, hence the empty hooks */
170static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
171 unsigned long end)
172{
173 return 0;
174}
175
176static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
177{
178 return 0;
179}
180
181static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, 174static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
182 unsigned long address) 175 unsigned long address)
183{ 176{
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index 3f83db2f6cf0..d8e90c8cb5fa 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,28 +28,6 @@ struct kvm_decode {
28 bool sign_extend; 28 bool sign_extend;
29}; 29};
30 30
31/*
32 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
33 * which is an anonymous type. Use our own type instead.
34 */
35struct kvm_exit_mmio {
36 phys_addr_t phys_addr;
37 u8 data[8];
38 u32 len;
39 bool is_write;
40 void *private;
41};
42
43static inline void kvm_prepare_mmio(struct kvm_run *run,
44 struct kvm_exit_mmio *mmio)
45{
46 run->mmio.phys_addr = mmio->phys_addr;
47 run->mmio.len = mmio->len;
48 run->mmio.is_write = mmio->is_write;
49 memcpy(run->mmio.data, mmio->data, mmio->len);
50 run->exit_reason = KVM_EXIT_MMIO;
51}
52
53int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 31int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
54int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, 32int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
55 phys_addr_t fault_ipa); 33 phys_addr_t fault_ipa);
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 0db25bc32864..2499867dd0d8 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot {
198/* Highest supported SPI, from VGIC_NR_IRQS */ 198/* Highest supported SPI, from VGIC_NR_IRQS */
199#define KVM_ARM_IRQ_GIC_MAX 127 199#define KVM_ARM_IRQ_GIC_MAX 127
200 200
201/* One single KVM irqchip, ie. the VGIC */
202#define KVM_NR_IRQCHIPS 1
203
201/* PSCI interface */ 204/* PSCI interface */
202#define KVM_PSCI_FN_BASE 0x95c1ba5e 205#define KVM_PSCI_FN_BASE 0x95c1ba5e
203#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) 206#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 2d2d6087b9b1..488eaac56028 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -190,7 +190,6 @@ int main(void)
190 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); 190 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
191 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); 191 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
192 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); 192 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
193#ifdef CONFIG_KVM_ARM_VGIC
194 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 193 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
195 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); 194 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
196 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); 195 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
@@ -200,14 +199,11 @@ int main(void)
200 DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); 199 DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
201 DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); 200 DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
202 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); 201 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
203#ifdef CONFIG_KVM_ARM_TIMER
204 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); 202 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
205 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); 203 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
206 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); 204 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
207 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); 205 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
208#endif
209 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 206 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
210#endif
211 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 207 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
212#endif 208#endif
213 return 0; 209 return 0;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 338ace78ed18..f1f79d104309 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
18 18
19config KVM 19config KVM
20 bool "Kernel-based Virtual Machine (KVM) support" 20 bool "Kernel-based Virtual Machine (KVM) support"
21 depends on MMU && OF
21 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
22 select ANON_INODES 23 select ANON_INODES
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 24 select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -26,10 +27,12 @@ config KVM
26 select KVM_ARM_HOST 27 select KVM_ARM_HOST
27 select KVM_GENERIC_DIRTYLOG_READ_PROTECT 28 select KVM_GENERIC_DIRTYLOG_READ_PROTECT
28 select SRCU 29 select SRCU
29 depends on ARM_VIRT_EXT && ARM_LPAE 30 select MMU_NOTIFIER
31 select HAVE_KVM_EVENTFD
32 select HAVE_KVM_IRQFD
33 depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
30 ---help--- 34 ---help---
31 Support hosting virtualized guest machines. You will also 35 Support hosting virtualized guest machines.
32 need to select one or more of the processor modules below.
33 36
34 This module provides access to the hardware capabilities through 37 This module provides access to the hardware capabilities through
35 a character device node named /dev/kvm. 38 a character device node named /dev/kvm.
@@ -37,10 +40,7 @@ config KVM
37 If unsure, say N. 40 If unsure, say N.
38 41
39config KVM_ARM_HOST 42config KVM_ARM_HOST
40 bool "KVM host support for ARM cpus." 43 bool
41 depends on KVM
42 depends on MMU
43 select MMU_NOTIFIER
44 ---help--- 44 ---help---
45 Provides host support for ARM processors. 45 Provides host support for ARM processors.
46 46
@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS
55 large, so only choose a reasonable number that you expect to 55 large, so only choose a reasonable number that you expect to
56 actually use. 56 actually use.
57 57
58config KVM_ARM_VGIC
59 bool "KVM support for Virtual GIC"
60 depends on KVM_ARM_HOST && OF
61 select HAVE_KVM_IRQCHIP
62 default y
63 ---help---
64 Adds support for a hardware assisted, in-kernel GIC emulation.
65
66config KVM_ARM_TIMER
67 bool "KVM support for Architected Timers"
68 depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
69 select HAVE_KVM_IRQCHIP
70 default y
71 ---help---
72 Adds support for the Architected Timers in virtual machines
73
74endif # VIRTUALIZATION 58endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 443b8bea43e9..139e46c08b6e 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
7 plus_virt_def := -DREQUIRES_VIRT=1 7 plus_virt_def := -DREQUIRES_VIRT=1
8endif 8endif
9 9
10ccflags-y += -Ivirt/kvm -Iarch/arm/kvm 10ccflags-y += -Iarch/arm/kvm
11CFLAGS_arm.o := -I. $(plus_virt_def) 11CFLAGS_arm.o := -I. $(plus_virt_def)
12CFLAGS_mmu.o := -I. 12CFLAGS_mmu.o := -I.
13 13
@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
15AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) 15AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
16 16
17KVM := ../../../virt/kvm 17KVM := ../../../virt/kvm
18kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o 18kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
19 19
20obj-y += kvm-arm.o init.o interrupts.o 20obj-y += kvm-arm.o init.o interrupts.o
21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o 22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
23obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 23obj-y += $(KVM)/arm/vgic.o
24obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o 24obj-y += $(KVM)/arm/vgic-v2.o
25obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o 25obj-y += $(KVM)/arm/vgic-v2-emul.o
26obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o 26obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5560f74f9eee..6f536451ab78 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
61static u8 kvm_next_vmid; 61static u8 kvm_next_vmid;
62static DEFINE_SPINLOCK(kvm_vmid_lock); 62static DEFINE_SPINLOCK(kvm_vmid_lock);
63 63
64static bool vgic_present;
65
66static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) 64static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
67{ 65{
68 BUG_ON(preemptible()); 66 BUG_ON(preemptible());
@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
173 int r; 171 int r;
174 switch (ext) { 172 switch (ext) {
175 case KVM_CAP_IRQCHIP: 173 case KVM_CAP_IRQCHIP:
176 r = vgic_present; 174 case KVM_CAP_IRQFD:
177 break; 175 case KVM_CAP_IOEVENTFD:
178 case KVM_CAP_DEVICE_CTRL: 176 case KVM_CAP_DEVICE_CTRL:
179 case KVM_CAP_USER_MEMORY: 177 case KVM_CAP_USER_MEMORY:
180 case KVM_CAP_SYNC_MMU: 178 case KVM_CAP_SYNC_MMU:
@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
183 case KVM_CAP_ARM_PSCI: 181 case KVM_CAP_ARM_PSCI:
184 case KVM_CAP_ARM_PSCI_0_2: 182 case KVM_CAP_ARM_PSCI_0_2:
185 case KVM_CAP_READONLY_MEM: 183 case KVM_CAP_READONLY_MEM:
184 case KVM_CAP_MP_STATE:
186 r = 1; 185 r = 1;
187 break; 186 break;
188 case KVM_CAP_COALESCED_MMIO: 187 case KVM_CAP_COALESCED_MMIO:
@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
268 267
269int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 268int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
270{ 269{
271 return 0; 270 return kvm_timer_should_fire(vcpu);
272} 271}
273 272
274int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 273int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
313int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 312int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
314 struct kvm_mp_state *mp_state) 313 struct kvm_mp_state *mp_state)
315{ 314{
316 return -EINVAL; 315 if (vcpu->arch.pause)
316 mp_state->mp_state = KVM_MP_STATE_STOPPED;
317 else
318 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
319
320 return 0;
317} 321}
318 322
319int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 323int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
320 struct kvm_mp_state *mp_state) 324 struct kvm_mp_state *mp_state)
321{ 325{
322 return -EINVAL; 326 switch (mp_state->mp_state) {
327 case KVM_MP_STATE_RUNNABLE:
328 vcpu->arch.pause = false;
329 break;
330 case KVM_MP_STATE_STOPPED:
331 vcpu->arch.pause = true;
332 break;
333 default:
334 return -EINVAL;
335 }
336
337 return 0;
323} 338}
324 339
325/** 340/**
@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
452 return 0; 467 return 0;
453} 468}
454 469
470bool kvm_arch_intc_initialized(struct kvm *kvm)
471{
472 return vgic_initialized(kvm);
473}
474
455static void vcpu_pause(struct kvm_vcpu *vcpu) 475static void vcpu_pause(struct kvm_vcpu *vcpu)
456{ 476{
457 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); 477 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
831 851
832 switch (dev_id) { 852 switch (dev_id) {
833 case KVM_ARM_DEVICE_VGIC_V2: 853 case KVM_ARM_DEVICE_VGIC_V2:
834 if (!vgic_present)
835 return -ENXIO;
836 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); 854 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
837 default: 855 default:
838 return -ENODEV; 856 return -ENODEV;
@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
847 865
848 switch (ioctl) { 866 switch (ioctl) {
849 case KVM_CREATE_IRQCHIP: { 867 case KVM_CREATE_IRQCHIP: {
850 if (vgic_present) 868 return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
851 return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
852 else
853 return -ENXIO;
854 } 869 }
855 case KVM_ARM_SET_DEVICE_ADDR: { 870 case KVM_ARM_SET_DEVICE_ADDR: {
856 struct kvm_arm_device_addr dev_addr; 871 struct kvm_arm_device_addr dev_addr;
@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void)
1035 if (err) 1050 if (err)
1036 goto out_free_context; 1051 goto out_free_context;
1037 1052
1038#ifdef CONFIG_KVM_ARM_VGIC
1039 vgic_present = true;
1040#endif
1041
1042 /* 1053 /*
1043 * Init HYP architected timer support 1054 * Init HYP architected timer support
1044 */ 1055 */
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 384bab67c462..d503fbb787d3 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
109 return -EINVAL; 109 return -EINVAL;
110} 110}
111 111
112#ifndef CONFIG_KVM_ARM_TIMER
113
114#define NUM_TIMER_REGS 0
115
116static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
117{
118 return 0;
119}
120
121static bool is_timer_reg(u64 index)
122{
123 return false;
124}
125
126#else
127
128#define NUM_TIMER_REGS 3 112#define NUM_TIMER_REGS 3
129 113
130static bool is_timer_reg(u64 index) 114static bool is_timer_reg(u64 index)
@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
152 return 0; 136 return 0;
153} 137}
154 138
155#endif
156
157static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 139static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
158{ 140{
159 void __user *uaddr = (void __user *)(long)reg->addr; 141 void __user *uaddr = (void __user *)(long)reg->addr;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 14d488388480..35e4a3a0c476 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0
402 * Assumes vcpu pointer in vcpu reg 402 * Assumes vcpu pointer in vcpu reg
403 */ 403 */
404.macro save_vgic_state 404.macro save_vgic_state
405#ifdef CONFIG_KVM_ARM_VGIC
406 /* Get VGIC VCTRL base into r2 */ 405 /* Get VGIC VCTRL base into r2 */
407 ldr r2, [vcpu, #VCPU_KVM] 406 ldr r2, [vcpu, #VCPU_KVM]
408 ldr r2, [r2, #KVM_VGIC_VCTRL] 407 ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 )
460 subs r4, r4, #1 459 subs r4, r4, #1
461 bne 1b 460 bne 1b
4622: 4612:
463#endif
464.endm 462.endm
465 463
466/* 464/*
@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 )
469 * Assumes vcpu pointer in vcpu reg 467 * Assumes vcpu pointer in vcpu reg
470 */ 468 */
471.macro restore_vgic_state 469.macro restore_vgic_state
472#ifdef CONFIG_KVM_ARM_VGIC
473 /* Get VGIC VCTRL base into r2 */ 470 /* Get VGIC VCTRL base into r2 */
474 ldr r2, [vcpu, #VCPU_KVM] 471 ldr r2, [vcpu, #VCPU_KVM]
475 ldr r2, [r2, #KVM_VGIC_VCTRL] 472 ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 )
501 subs r4, r4, #1 498 subs r4, r4, #1
502 bne 1b 499 bne 1b
5032: 5002:
504#endif
505.endm 501.endm
506 502
507#define CNTHCTL_PL1PCTEN (1 << 0) 503#define CNTHCTL_PL1PCTEN (1 << 0)
@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 )
515 * Clobbers r2-r5 511 * Clobbers r2-r5
516 */ 512 */
517.macro save_timer_state 513.macro save_timer_state
518#ifdef CONFIG_KVM_ARM_TIMER
519 ldr r4, [vcpu, #VCPU_KVM] 514 ldr r4, [vcpu, #VCPU_KVM]
520 ldr r2, [r4, #KVM_TIMER_ENABLED] 515 ldr r2, [r4, #KVM_TIMER_ENABLED]
521 cmp r2, #0 516 cmp r2, #0
@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 )
537 mcrr p15, 4, r2, r2, c14 @ CNTVOFF 532 mcrr p15, 4, r2, r2, c14 @ CNTVOFF
538 533
5391: 5341:
540#endif
541 @ Allow physical timer/counter access for the host 535 @ Allow physical timer/counter access for the host
542 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL 536 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
543 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) 537 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 )
559 bic r2, r2, #CNTHCTL_PL1PCEN 553 bic r2, r2, #CNTHCTL_PL1PCEN
560 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL 554 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
561 555
562#ifdef CONFIG_KVM_ARM_TIMER
563 ldr r4, [vcpu, #VCPU_KVM] 556 ldr r4, [vcpu, #VCPU_KVM]
564 ldr r2, [r4, #KVM_TIMER_ENABLED] 557 ldr r2, [r4, #KVM_TIMER_ENABLED]
565 cmp r2, #0 558 cmp r2, #0
@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 )
579 and r2, r2, #3 572 and r2, r2, #3
580 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL 573 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
5811: 5741:
582#endif
583.endm 575.endm
584 576
585.equ vmentry, 0 577.equ vmentry, 0
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 5d3bfc0eb3f0..974b1c606d04 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
121 return 0; 121 return 0;
122} 122}
123 123
124static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 124static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
125 struct kvm_exit_mmio *mmio)
126{ 125{
127 unsigned long rt; 126 unsigned long rt;
128 int len; 127 int access_size;
129 bool is_write, sign_extend; 128 bool sign_extend;
130 129
131 if (kvm_vcpu_dabt_isextabt(vcpu)) { 130 if (kvm_vcpu_dabt_isextabt(vcpu)) {
132 /* cache operation on I/O addr, tell guest unsupported */ 131 /* cache operation on I/O addr, tell guest unsupported */
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
140 return 1; 139 return 1;
141 } 140 }
142 141
143 len = kvm_vcpu_dabt_get_as(vcpu); 142 access_size = kvm_vcpu_dabt_get_as(vcpu);
144 if (unlikely(len < 0)) 143 if (unlikely(access_size < 0))
145 return len; 144 return access_size;
146 145
147 is_write = kvm_vcpu_dabt_iswrite(vcpu); 146 *is_write = kvm_vcpu_dabt_iswrite(vcpu);
148 sign_extend = kvm_vcpu_dabt_issext(vcpu); 147 sign_extend = kvm_vcpu_dabt_issext(vcpu);
149 rt = kvm_vcpu_dabt_get_rd(vcpu); 148 rt = kvm_vcpu_dabt_get_rd(vcpu);
150 149
151 mmio->is_write = is_write; 150 *len = access_size;
152 mmio->phys_addr = fault_ipa;
153 mmio->len = len;
154 vcpu->arch.mmio_decode.sign_extend = sign_extend; 151 vcpu->arch.mmio_decode.sign_extend = sign_extend;
155 vcpu->arch.mmio_decode.rt = rt; 152 vcpu->arch.mmio_decode.rt = rt;
156 153
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
165int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, 162int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
166 phys_addr_t fault_ipa) 163 phys_addr_t fault_ipa)
167{ 164{
168 struct kvm_exit_mmio mmio;
169 unsigned long data; 165 unsigned long data;
170 unsigned long rt; 166 unsigned long rt;
171 int ret; 167 int ret;
168 bool is_write;
169 int len;
170 u8 data_buf[8];
172 171
173 /* 172 /*
174 * Prepare MMIO operation. First stash it in a private 173 * Prepare MMIO operation. First decode the syndrome data we get
175 * structure that we can use for in-kernel emulation. If the 174 * from the CPU. Then try if some in-kernel emulation feels
176 * kernel can't handle it, copy it into run->mmio and let user 175 * responsible, otherwise let user space do its magic.
177 * space do its magic.
178 */ 176 */
179
180 if (kvm_vcpu_dabt_isvalid(vcpu)) { 177 if (kvm_vcpu_dabt_isvalid(vcpu)) {
181 ret = decode_hsr(vcpu, fault_ipa, &mmio); 178 ret = decode_hsr(vcpu, &is_write, &len);
182 if (ret) 179 if (ret)
183 return ret; 180 return ret;
184 } else { 181 } else {
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 185
189 rt = vcpu->arch.mmio_decode.rt; 186 rt = vcpu->arch.mmio_decode.rt;
190 187
191 if (mmio.is_write) { 188 if (is_write) {
192 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), 189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
193 mmio.len); 190
191 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
192 mmio_write_buf(data_buf, len, data);
194 193
195 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, 194 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
196 fault_ipa, data); 195 data_buf);
197 mmio_write_buf(mmio.data, mmio.len, data);
198 } else { 196 } else {
199 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, 197 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
200 fault_ipa, 0); 198 fault_ipa, 0);
199
200 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
201 data_buf);
201 } 202 }
202 203
203 if (vgic_handle_mmio(vcpu, run, &mmio)) 204 /* Now prepare kvm_run for the potential return to userland. */
205 run->mmio.is_write = is_write;
206 run->mmio.phys_addr = fault_ipa;
207 run->mmio.len = len;
208 memcpy(run->mmio.data, data_buf, len);
209
210 if (!ret) {
211 /* We handled the access successfully in the kernel. */
212 kvm_handle_mmio_return(vcpu, run);
204 return 1; 213 return 1;
214 }
205 215
206 kvm_prepare_mmio(run, &mmio); 216 run->exit_reason = KVM_EXIT_MMIO;
207 return 0; 217 return 0;
208} 218}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 5656d79c5a44..15b050d46fc9 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1330 1330
1331out_unlock: 1331out_unlock:
1332 spin_unlock(&kvm->mmu_lock); 1332 spin_unlock(&kvm->mmu_lock);
1333 kvm_set_pfn_accessed(pfn);
1333 kvm_release_pfn_clean(pfn); 1334 kvm_release_pfn_clean(pfn);
1334 return ret; 1335 return ret;
1335} 1336}
1336 1337
1338/*
1339 * Resolve the access fault by making the page young again.
1340 * Note that because the faulting entry is guaranteed not to be
1341 * cached in the TLB, we don't need to invalidate anything.
1342 */
1343static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1344{
1345 pmd_t *pmd;
1346 pte_t *pte;
1347 pfn_t pfn;
1348 bool pfn_valid = false;
1349
1350 trace_kvm_access_fault(fault_ipa);
1351
1352 spin_lock(&vcpu->kvm->mmu_lock);
1353
1354 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
1355 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1356 goto out;
1357
1358 if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
1359 *pmd = pmd_mkyoung(*pmd);
1360 pfn = pmd_pfn(*pmd);
1361 pfn_valid = true;
1362 goto out;
1363 }
1364
1365 pte = pte_offset_kernel(pmd, fault_ipa);
1366 if (pte_none(*pte)) /* Nothing there either */
1367 goto out;
1368
1369 *pte = pte_mkyoung(*pte); /* Just a page... */
1370 pfn = pte_pfn(*pte);
1371 pfn_valid = true;
1372out:
1373 spin_unlock(&vcpu->kvm->mmu_lock);
1374 if (pfn_valid)
1375 kvm_set_pfn_accessed(pfn);
1376}
1377
1337/** 1378/**
1338 * kvm_handle_guest_abort - handles all 2nd stage aborts 1379 * kvm_handle_guest_abort - handles all 2nd stage aborts
1339 * @vcpu: the VCPU pointer 1380 * @vcpu: the VCPU pointer
@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1364 1405
1365 /* Check the stage-2 fault is trans. fault or write fault */ 1406 /* Check the stage-2 fault is trans. fault or write fault */
1366 fault_status = kvm_vcpu_trap_get_fault_type(vcpu); 1407 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1367 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { 1408 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1409 fault_status != FSC_ACCESS) {
1368 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", 1410 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1369 kvm_vcpu_trap_get_class(vcpu), 1411 kvm_vcpu_trap_get_class(vcpu),
1370 (unsigned long)kvm_vcpu_trap_get_fault(vcpu), 1412 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1400 /* Userspace should not be able to register out-of-bounds IPAs */ 1442 /* Userspace should not be able to register out-of-bounds IPAs */
1401 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); 1443 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1402 1444
1445 if (fault_status == FSC_ACCESS) {
1446 handle_access_fault(vcpu, fault_ipa);
1447 ret = 1;
1448 goto out_unlock;
1449 }
1450
1403 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); 1451 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1404 if (ret == 0) 1452 if (ret == 0)
1405 ret = 1; 1453 ret = 1;
@@ -1408,15 +1456,16 @@ out_unlock:
1408 return ret; 1456 return ret;
1409} 1457}
1410 1458
1411static void handle_hva_to_gpa(struct kvm *kvm, 1459static int handle_hva_to_gpa(struct kvm *kvm,
1412 unsigned long start, 1460 unsigned long start,
1413 unsigned long end, 1461 unsigned long end,
1414 void (*handler)(struct kvm *kvm, 1462 int (*handler)(struct kvm *kvm,
1415 gpa_t gpa, void *data), 1463 gpa_t gpa, void *data),
1416 void *data) 1464 void *data)
1417{ 1465{
1418 struct kvm_memslots *slots; 1466 struct kvm_memslots *slots;
1419 struct kvm_memory_slot *memslot; 1467 struct kvm_memory_slot *memslot;
1468 int ret = 0;
1420 1469
1421 slots = kvm_memslots(kvm); 1470 slots = kvm_memslots(kvm);
1422 1471
@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
1440 1489
1441 for (; gfn < gfn_end; ++gfn) { 1490 for (; gfn < gfn_end; ++gfn) {
1442 gpa_t gpa = gfn << PAGE_SHIFT; 1491 gpa_t gpa = gfn << PAGE_SHIFT;
1443 handler(kvm, gpa, data); 1492 ret |= handler(kvm, gpa, data);
1444 } 1493 }
1445 } 1494 }
1495
1496 return ret;
1446} 1497}
1447 1498
1448static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 1499static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1449{ 1500{
1450 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 1501 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1502 return 0;
1451} 1503}
1452 1504
1453int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1505int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
1473 return 0; 1525 return 0;
1474} 1526}
1475 1527
1476static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) 1528static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1477{ 1529{
1478 pte_t *pte = (pte_t *)data; 1530 pte_t *pte = (pte_t *)data;
1479 1531
@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1485 * through this calling path. 1537 * through this calling path.
1486 */ 1538 */
1487 stage2_set_pte(kvm, NULL, gpa, pte, 0); 1539 stage2_set_pte(kvm, NULL, gpa, pte, 0);
1540 return 0;
1488} 1541}
1489 1542
1490 1543
@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1501 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); 1554 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1502} 1555}
1503 1556
1557static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1558{
1559 pmd_t *pmd;
1560 pte_t *pte;
1561
1562 pmd = stage2_get_pmd(kvm, NULL, gpa);
1563 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1564 return 0;
1565
1566 if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
1567 if (pmd_young(*pmd)) {
1568 *pmd = pmd_mkold(*pmd);
1569 return 1;
1570 }
1571
1572 return 0;
1573 }
1574
1575 pte = pte_offset_kernel(pmd, gpa);
1576 if (pte_none(*pte))
1577 return 0;
1578
1579 if (pte_young(*pte)) {
1580 *pte = pte_mkold(*pte); /* Just a page... */
1581 return 1;
1582 }
1583
1584 return 0;
1585}
1586
1587static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1588{
1589 pmd_t *pmd;
1590 pte_t *pte;
1591
1592 pmd = stage2_get_pmd(kvm, NULL, gpa);
1593 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1594 return 0;
1595
1596 if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
1597 return pmd_young(*pmd);
1598
1599 pte = pte_offset_kernel(pmd, gpa);
1600 if (!pte_none(*pte)) /* Just a page... */
1601 return pte_young(*pte);
1602
1603 return 0;
1604}
1605
1606int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1607{
1608 trace_kvm_age_hva(start, end);
1609 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1610}
1611
1612int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1613{
1614 trace_kvm_test_age_hva(hva);
1615 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1616}
1617
1504void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) 1618void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1505{ 1619{
1506 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 1620 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 6817664b46b8..0ec35392d208 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
68 __entry->hxfar, __entry->vcpu_pc) 68 __entry->hxfar, __entry->vcpu_pc)
69); 69);
70 70
71TRACE_EVENT(kvm_access_fault,
72 TP_PROTO(unsigned long ipa),
73 TP_ARGS(ipa),
74
75 TP_STRUCT__entry(
76 __field( unsigned long, ipa )
77 ),
78
79 TP_fast_assign(
80 __entry->ipa = ipa;
81 ),
82
83 TP_printk("IPA: %lx", __entry->ipa)
84);
85
71TRACE_EVENT(kvm_irq_line, 86TRACE_EVENT(kvm_irq_line,
72 TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), 87 TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
73 TP_ARGS(type, vcpu_idx, irq_num, level), 88 TP_ARGS(type, vcpu_idx, irq_num, level),
@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva,
210 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) 225 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
211); 226);
212 227
228TRACE_EVENT(kvm_age_hva,
229 TP_PROTO(unsigned long start, unsigned long end),
230 TP_ARGS(start, end),
231
232 TP_STRUCT__entry(
233 __field( unsigned long, start )
234 __field( unsigned long, end )
235 ),
236
237 TP_fast_assign(
238 __entry->start = start;
239 __entry->end = end;
240 ),
241
242 TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
243 __entry->start, __entry->end)
244);
245
246TRACE_EVENT(kvm_test_age_hva,
247 TP_PROTO(unsigned long hva),
248 TP_ARGS(hva),
249
250 TP_STRUCT__entry(
251 __field( unsigned long, hva )
252 ),
253
254 TP_fast_assign(
255 __entry->hva = hva;
256 ),
257
258 TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
259);
260
213TRACE_EVENT(kvm_hvc, 261TRACE_EVENT(kvm_hvc,
214 TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), 262 TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
215 TP_ARGS(vcpu_pc, r0, imm), 263 TP_ARGS(vcpu_pc, r0, imm),
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 92bbae381598..70522450ca23 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -90,6 +90,7 @@
90#define ESR_ELx_FSC (0x3F) 90#define ESR_ELx_FSC (0x3F)
91#define ESR_ELx_FSC_TYPE (0x3C) 91#define ESR_ELx_FSC_TYPE (0x3C)
92#define ESR_ELx_FSC_EXTABT (0x10) 92#define ESR_ELx_FSC_EXTABT (0x10)
93#define ESR_ELx_FSC_ACCESS (0x08)
93#define ESR_ELx_FSC_FAULT (0x04) 94#define ESR_ELx_FSC_FAULT (0x04)
94#define ESR_ELx_FSC_PERM (0x0C) 95#define ESR_ELx_FSC_PERM (0x0C)
95#define ESR_ELx_CV (UL(1) << 24) 96#define ESR_ELx_CV (UL(1) << 24)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 54bb4ba97441..ac6fafb95fe7 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -188,6 +188,7 @@
188 188
189/* For compatibility with fault code shared with 32-bit */ 189/* For compatibility with fault code shared with 32-bit */
190#define FSC_FAULT ESR_ELx_FSC_FAULT 190#define FSC_FAULT ESR_ELx_FSC_FAULT
191#define FSC_ACCESS ESR_ELx_FSC_ACCESS
191#define FSC_PERM ESR_ELx_FSC_PERM 192#define FSC_PERM ESR_ELx_FSC_PERM
192 193
193/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ 194/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8ac3c70fe3c6..f0f58c9beec0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -28,6 +28,8 @@
28#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
29#include <asm/kvm_mmio.h> 29#include <asm/kvm_mmio.h>
30 30
31#define __KVM_HAVE_ARCH_INTC_INITIALIZED
32
31#if defined(CONFIG_KVM_ARM_MAX_VCPUS) 33#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
32#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 34#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
33#else 35#else
@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
177int kvm_unmap_hva_range(struct kvm *kvm, 179int kvm_unmap_hva_range(struct kvm *kvm,
178 unsigned long start, unsigned long end); 180 unsigned long start, unsigned long end);
179void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 181void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
182int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
183int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
180 184
181/* We do not have shadow page tables, hence the empty hooks */ 185/* We do not have shadow page tables, hence the empty hooks */
182static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
183 unsigned long end)
184{
185 return 0;
186}
187
188static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
189{
190 return 0;
191}
192
193static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, 186static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
194 unsigned long address) 187 unsigned long address)
195{ 188{
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 9f52beb7cb13..889c908ee631 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -31,28 +31,6 @@ struct kvm_decode {
31 bool sign_extend; 31 bool sign_extend;
32}; 32};
33 33
34/*
35 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
36 * which is an anonymous type. Use our own type instead.
37 */
38struct kvm_exit_mmio {
39 phys_addr_t phys_addr;
40 u8 data[8];
41 u32 len;
42 bool is_write;
43 void *private;
44};
45
46static inline void kvm_prepare_mmio(struct kvm_run *run,
47 struct kvm_exit_mmio *mmio)
48{
49 run->mmio.phys_addr = mmio->phys_addr;
50 run->mmio.len = mmio->len;
51 run->mmio.is_write = mmio->is_write;
52 memcpy(run->mmio.data, mmio->data, mmio->len);
53 run->exit_reason = KVM_EXIT_MMIO;
54}
55
56int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
57int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, 35int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
58 phys_addr_t fault_ipa); 36 phys_addr_t fault_ipa);
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3ef77a466018..c154c0b7eb60 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot {
191/* Highest supported SPI, from VGIC_NR_IRQS */ 191/* Highest supported SPI, from VGIC_NR_IRQS */
192#define KVM_ARM_IRQ_GIC_MAX 127 192#define KVM_ARM_IRQ_GIC_MAX 127
193 193
194/* One single KVM irqchip, ie. the VGIC */
195#define KVM_NR_IRQCHIPS 1
196
194/* PSCI interface */ 197/* PSCI interface */
195#define KVM_PSCI_FN_BASE 0x95c1ba5e 198#define KVM_PSCI_FN_BASE 0x95c1ba5e
196#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) 199#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c81d95f..5105e297ed5f 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
18 18
19config KVM 19config KVM
20 bool "Kernel-based Virtual Machine (KVM) support" 20 bool "Kernel-based Virtual Machine (KVM) support"
21 depends on OF
21 select MMU_NOTIFIER 22 select MMU_NOTIFIER
22 select PREEMPT_NOTIFIERS 23 select PREEMPT_NOTIFIERS
23 select ANON_INODES 24 select ANON_INODES
@@ -25,10 +26,10 @@ config KVM
25 select HAVE_KVM_ARCH_TLB_FLUSH_ALL 26 select HAVE_KVM_ARCH_TLB_FLUSH_ALL
26 select KVM_MMIO 27 select KVM_MMIO
27 select KVM_ARM_HOST 28 select KVM_ARM_HOST
28 select KVM_ARM_VGIC
29 select KVM_ARM_TIMER
30 select KVM_GENERIC_DIRTYLOG_READ_PROTECT 29 select KVM_GENERIC_DIRTYLOG_READ_PROTECT
31 select SRCU 30 select SRCU
31 select HAVE_KVM_EVENTFD
32 select HAVE_KVM_IRQFD
32 ---help--- 33 ---help---
33 Support hosting virtualized guest machines. 34 Support hosting virtualized guest machines.
34 35
@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS
50 large, so only choose a reasonable number that you expect to 51 large, so only choose a reasonable number that you expect to
51 actually use. 52 actually use.
52 53
53config KVM_ARM_VGIC
54 bool
55 depends on KVM_ARM_HOST && OF
56 select HAVE_KVM_IRQCHIP
57 ---help---
58 Adds support for a hardware assisted, in-kernel GIC emulation.
59
60config KVM_ARM_TIMER
61 bool
62 depends on KVM_ARM_VGIC
63 ---help---
64 Adds support for the Architected Timers in virtual machines.
65
66endif # VIRTUALIZATION 54endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 4e6e09ee4033..d5904f876cdb 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for Kernel-based Virtual Machine module 2# Makefile for Kernel-based Virtual Machine module
3# 3#
4 4
5ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm 5ccflags-y += -Iarch/arm64/kvm
6CFLAGS_arm.o := -I. 6CFLAGS_arm.o := -I.
7CFLAGS_mmu.o := -I. 7CFLAGS_mmu.o := -I.
8 8
@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
11 11
12obj-$(CONFIG_KVM_ARM_HOST) += kvm.o 12obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
13 13
14kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o 14kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
15kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o 15kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
16kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o 16kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
17 17
@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o 19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o 20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
21 21
22kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 22kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
23kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o 23kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
24kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o 24kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
25kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o 25kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
26kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o 26kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
27kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o 27kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
28kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o 28kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
29kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o 29kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 39b3a8f816f2..4703fadd2737 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -34,7 +34,7 @@
34#include <asm/kvm_para.h> 34#include <asm/kvm_para.h>
35#include <asm/kvm_host.h> 35#include <asm/kvm_host.h>
36#include <asm/kvm_ppc.h> 36#include <asm/kvm_ppc.h>
37#include "iodev.h" 37#include <kvm/iodev.h>
38 38
39#define MAX_CPU 32 39#define MAX_CPU 32
40#define MAX_SRC 256 40#define MAX_SRC 256
@@ -1374,8 +1374,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
1374 return -ENXIO; 1374 return -ENXIO;
1375} 1375}
1376 1376
1377static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr, 1377static int kvm_mpic_read(struct kvm_vcpu *vcpu,
1378 int len, void *ptr) 1378 struct kvm_io_device *this,
1379 gpa_t addr, int len, void *ptr)
1379{ 1380{
1380 struct openpic *opp = container_of(this, struct openpic, mmio); 1381 struct openpic *opp = container_of(this, struct openpic, mmio);
1381 int ret; 1382 int ret;
@@ -1415,8 +1416,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
1415 return ret; 1416 return ret;
1416} 1417}
1417 1418
1418static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr, 1419static int kvm_mpic_write(struct kvm_vcpu *vcpu,
1419 int len, const void *ptr) 1420 struct kvm_io_device *this,
1421 gpa_t addr, int len, const void *ptr)
1420{ 1422{
1421 struct openpic *opp = container_of(this, struct openpic, mmio); 1423 struct openpic *opp = container_of(this, struct openpic, mmio);
1422 int ret; 1424 int ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 27c0face86f4..24bfe401373e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
807 807
808 idx = srcu_read_lock(&vcpu->kvm->srcu); 808 idx = srcu_read_lock(&vcpu->kvm->srcu);
809 809
810 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 810 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
811 bytes, &run->mmio.data); 811 bytes, &run->mmio.data);
812 812
813 srcu_read_unlock(&vcpu->kvm->srcu, idx); 813 srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
880 880
881 idx = srcu_read_lock(&vcpu->kvm->srcu); 881 idx = srcu_read_lock(&vcpu->kvm->srcu);
882 882
883 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 883 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
884 bytes, &run->mmio.data); 884 bytes, &run->mmio.data);
885 885
886 srcu_read_unlock(&vcpu->kvm->srcu, idx); 886 srcu_read_unlock(&vcpu->kvm->srcu, idx);
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 89140ddb998c..fc7ec95848c3 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
213 * - gpr 3 contains the virtqueue index (passed as datamatch) 213 * - gpr 3 contains the virtqueue index (passed as datamatch)
214 * - gpr 4 contains the index on the bus (optionally) 214 * - gpr 4 contains the index on the bus (optionally)
215 */ 215 */
216 ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, 216 ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
217 vcpu->run->s.regs.gprs[2] & 0xffffffff, 217 vcpu->run->s.regs.gprs[2] & 0xffffffff,
218 8, &vcpu->run->s.regs.gprs[3], 218 8, &vcpu->run->s.regs.gprs[3],
219 vcpu->run->s.regs.gprs[4]); 219 vcpu->run->s.regs.gprs[4]);
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 08f790dfadc9..16e8f962eaad 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
1 1
2ccflags-y += -Ivirt/kvm -Iarch/x86/kvm 2ccflags-y += -Iarch/x86/kvm
3 3
4CFLAGS_x86.o := -I. 4CFLAGS_x86.o := -I.
5CFLAGS_svm.o := -I. 5CFLAGS_svm.o := -I.
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 298781d4cfb4..4dce6f8b6129 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -443,7 +443,8 @@ static inline int pit_in_range(gpa_t addr)
443 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); 443 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
444} 444}
445 445
446static int pit_ioport_write(struct kvm_io_device *this, 446static int pit_ioport_write(struct kvm_vcpu *vcpu,
447 struct kvm_io_device *this,
447 gpa_t addr, int len, const void *data) 448 gpa_t addr, int len, const void *data)
448{ 449{
449 struct kvm_pit *pit = dev_to_pit(this); 450 struct kvm_pit *pit = dev_to_pit(this);
@@ -519,7 +520,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
519 return 0; 520 return 0;
520} 521}
521 522
522static int pit_ioport_read(struct kvm_io_device *this, 523static int pit_ioport_read(struct kvm_vcpu *vcpu,
524 struct kvm_io_device *this,
523 gpa_t addr, int len, void *data) 525 gpa_t addr, int len, void *data)
524{ 526{
525 struct kvm_pit *pit = dev_to_pit(this); 527 struct kvm_pit *pit = dev_to_pit(this);
@@ -589,7 +591,8 @@ static int pit_ioport_read(struct kvm_io_device *this,
589 return 0; 591 return 0;
590} 592}
591 593
592static int speaker_ioport_write(struct kvm_io_device *this, 594static int speaker_ioport_write(struct kvm_vcpu *vcpu,
595 struct kvm_io_device *this,
593 gpa_t addr, int len, const void *data) 596 gpa_t addr, int len, const void *data)
594{ 597{
595 struct kvm_pit *pit = speaker_to_pit(this); 598 struct kvm_pit *pit = speaker_to_pit(this);
@@ -606,8 +609,9 @@ static int speaker_ioport_write(struct kvm_io_device *this,
606 return 0; 609 return 0;
607} 610}
608 611
609static int speaker_ioport_read(struct kvm_io_device *this, 612static int speaker_ioport_read(struct kvm_vcpu *vcpu,
610 gpa_t addr, int len, void *data) 613 struct kvm_io_device *this,
614 gpa_t addr, int len, void *data)
611{ 615{
612 struct kvm_pit *pit = speaker_to_pit(this); 616 struct kvm_pit *pit = speaker_to_pit(this);
613 struct kvm_kpit_state *pit_state = &pit->pit_state; 617 struct kvm_kpit_state *pit_state = &pit->pit_state;
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index dd1b16b611b0..c84990b42b5b 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/kthread.h> 4#include <linux/kthread.h>
5 5
6#include "iodev.h" 6#include <kvm/iodev.h>
7 7
8struct kvm_kpit_channel_state { 8struct kvm_kpit_channel_state {
9 u32 count; /* can be 65536 */ 9 u32 count; /* can be 65536 */
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 9541ba34126b..fef922ff2635 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -529,42 +529,42 @@ static int picdev_read(struct kvm_pic *s,
529 return 0; 529 return 0;
530} 530}
531 531
532static int picdev_master_write(struct kvm_io_device *dev, 532static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
533 gpa_t addr, int len, const void *val) 533 gpa_t addr, int len, const void *val)
534{ 534{
535 return picdev_write(container_of(dev, struct kvm_pic, dev_master), 535 return picdev_write(container_of(dev, struct kvm_pic, dev_master),
536 addr, len, val); 536 addr, len, val);
537} 537}
538 538
539static int picdev_master_read(struct kvm_io_device *dev, 539static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
540 gpa_t addr, int len, void *val) 540 gpa_t addr, int len, void *val)
541{ 541{
542 return picdev_read(container_of(dev, struct kvm_pic, dev_master), 542 return picdev_read(container_of(dev, struct kvm_pic, dev_master),
543 addr, len, val); 543 addr, len, val);
544} 544}
545 545
546static int picdev_slave_write(struct kvm_io_device *dev, 546static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
547 gpa_t addr, int len, const void *val) 547 gpa_t addr, int len, const void *val)
548{ 548{
549 return picdev_write(container_of(dev, struct kvm_pic, dev_slave), 549 return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
550 addr, len, val); 550 addr, len, val);
551} 551}
552 552
553static int picdev_slave_read(struct kvm_io_device *dev, 553static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
554 gpa_t addr, int len, void *val) 554 gpa_t addr, int len, void *val)
555{ 555{
556 return picdev_read(container_of(dev, struct kvm_pic, dev_slave), 556 return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
557 addr, len, val); 557 addr, len, val);
558} 558}
559 559
560static int picdev_eclr_write(struct kvm_io_device *dev, 560static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
561 gpa_t addr, int len, const void *val) 561 gpa_t addr, int len, const void *val)
562{ 562{
563 return picdev_write(container_of(dev, struct kvm_pic, dev_eclr), 563 return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
564 addr, len, val); 564 addr, len, val);
565} 565}
566 566
567static int picdev_eclr_read(struct kvm_io_device *dev, 567static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
568 gpa_t addr, int len, void *val) 568 gpa_t addr, int len, void *val)
569{ 569{
570 return picdev_read(container_of(dev, struct kvm_pic, dev_eclr), 570 return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 24f0f17639d6..51889ec847b0 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -493,8 +493,8 @@ static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
493 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); 493 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
494} 494}
495 495
496static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, 496static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
497 void *val) 497 gpa_t addr, int len, void *val)
498{ 498{
499 struct kvm_ioapic *ioapic = to_ioapic(this); 499 struct kvm_ioapic *ioapic = to_ioapic(this);
500 u32 result; 500 u32 result;
@@ -536,8 +536,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
536 return 0; 536 return 0;
537} 537}
538 538
539static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, 539static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
540 const void *val) 540 gpa_t addr, int len, const void *val)
541{ 541{
542 struct kvm_ioapic *ioapic = to_ioapic(this); 542 struct kvm_ioapic *ioapic = to_ioapic(this);
543 u32 data; 543 u32 data;
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 6e265cfcd86a..ca0b0b4e6256 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5 5
6#include "iodev.h" 6#include <kvm/iodev.h>
7 7
8struct kvm; 8struct kvm;
9struct kvm_vcpu; 9struct kvm_vcpu;
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 2d03568e9498..ad68c73008c5 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -27,7 +27,7 @@
27#include <linux/kvm_host.h> 27#include <linux/kvm_host.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29 29
30#include "iodev.h" 30#include <kvm/iodev.h>
31#include "ioapic.h" 31#include "ioapic.h"
32#include "lapic.h" 32#include "lapic.h"
33 33
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34de24c7..44f7b9afbedb 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1038,7 +1038,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1038 addr < apic->base_address + LAPIC_MMIO_LENGTH; 1038 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1039} 1039}
1040 1040
1041static int apic_mmio_read(struct kvm_io_device *this, 1041static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1042 gpa_t address, int len, void *data) 1042 gpa_t address, int len, void *data)
1043{ 1043{
1044 struct kvm_lapic *apic = to_lapic(this); 1044 struct kvm_lapic *apic = to_lapic(this);
@@ -1358,7 +1358,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1358 return ret; 1358 return ret;
1359} 1359}
1360 1360
1361static int apic_mmio_write(struct kvm_io_device *this, 1361static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1362 gpa_t address, int len, const void *data) 1362 gpa_t address, int len, const void *data)
1363{ 1363{
1364 struct kvm_lapic *apic = to_lapic(this); 1364 struct kvm_lapic *apic = to_lapic(this);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 0bc6c656625b..e284c2880c56 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -1,7 +1,7 @@
1#ifndef __KVM_X86_LAPIC_H 1#ifndef __KVM_X86_LAPIC_H
2#define __KVM_X86_LAPIC_H 2#define __KVM_X86_LAPIC_H
3 3
4#include "iodev.h" 4#include <kvm/iodev.h>
5 5
6#include <linux/kvm_host.h> 6#include <linux/kvm_host.h>
7 7
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0caaf56eb459..b5a6425d8d97 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5824,7 +5824,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5824 gpa_t gpa; 5824 gpa_t gpa;
5825 5825
5826 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5826 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5827 if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 5827 if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5828 skip_emulated_instruction(vcpu); 5828 skip_emulated_instruction(vcpu);
5829 return 1; 5829 return 1;
5830 } 5830 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cc2c759f69a3..a284c927551e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4115,8 +4115,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
4115 do { 4115 do {
4116 n = min(len, 8); 4116 n = min(len, 8);
4117 if (!(vcpu->arch.apic && 4117 if (!(vcpu->arch.apic &&
4118 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) 4118 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
4119 && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) 4119 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
4120 break; 4120 break;
4121 handled += n; 4121 handled += n;
4122 addr += n; 4122 addr += n;
@@ -4135,8 +4135,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4135 do { 4135 do {
4136 n = min(len, 8); 4136 n = min(len, 8);
4137 if (!(vcpu->arch.apic && 4137 if (!(vcpu->arch.apic &&
4138 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) 4138 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
4139 && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) 4139 addr, n, v))
4140 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4140 break; 4141 break;
4141 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); 4142 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
4142 handled += n; 4143 handled += n;
@@ -4631,10 +4632,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4631 int r; 4632 int r;
4632 4633
4633 if (vcpu->arch.pio.in) 4634 if (vcpu->arch.pio.in)
4634 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, 4635 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4635 vcpu->arch.pio.size, pd); 4636 vcpu->arch.pio.size, pd);
4636 else 4637 else
4637 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, 4638 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4638 vcpu->arch.pio.port, vcpu->arch.pio.size, 4639 vcpu->arch.pio.port, vcpu->arch.pio.size,
4639 pd); 4640 pd);
4640 return r; 4641 return r;
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b3f45a578344..e5966758c093 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -24,17 +24,14 @@
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25 25
26struct arch_timer_kvm { 26struct arch_timer_kvm {
27#ifdef CONFIG_KVM_ARM_TIMER
28 /* Is the timer enabled */ 27 /* Is the timer enabled */
29 bool enabled; 28 bool enabled;
30 29
31 /* Virtual offset */ 30 /* Virtual offset */
32 cycle_t cntvoff; 31 cycle_t cntvoff;
33#endif
34}; 32};
35 33
36struct arch_timer_cpu { 34struct arch_timer_cpu {
37#ifdef CONFIG_KVM_ARM_TIMER
38 /* Registers: control register, timer value */ 35 /* Registers: control register, timer value */
39 u32 cntv_ctl; /* Saved/restored */ 36 u32 cntv_ctl; /* Saved/restored */
40 cycle_t cntv_cval; /* Saved/restored */ 37 cycle_t cntv_cval; /* Saved/restored */
@@ -55,10 +52,8 @@ struct arch_timer_cpu {
55 52
56 /* Timer IRQ */ 53 /* Timer IRQ */
57 const struct kvm_irq_level *irq; 54 const struct kvm_irq_level *irq;
58#endif
59}; 55};
60 56
61#ifdef CONFIG_KVM_ARM_TIMER
62int kvm_timer_hyp_init(void); 57int kvm_timer_hyp_init(void);
63void kvm_timer_enable(struct kvm *kvm); 58void kvm_timer_enable(struct kvm *kvm);
64void kvm_timer_init(struct kvm *kvm); 59void kvm_timer_init(struct kvm *kvm);
@@ -72,30 +67,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
72u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); 67u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
73int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); 68int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
74 69
75#else 70bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
76static inline int kvm_timer_hyp_init(void)
77{
78 return 0;
79};
80
81static inline void kvm_timer_enable(struct kvm *kvm) {}
82static inline void kvm_timer_init(struct kvm *kvm) {}
83static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
84 const struct kvm_irq_level *irq) {}
85static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
86static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
87static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
88static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
89
90static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
91{
92 return 0;
93}
94
95static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
96{
97 return 0;
98}
99#endif
100 71
101#endif 72#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 66203b268984..133ea00aa83b 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -24,6 +24,7 @@
24#include <linux/irqreturn.h> 24#include <linux/irqreturn.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <kvm/iodev.h>
27 28
28#define VGIC_NR_IRQS_LEGACY 256 29#define VGIC_NR_IRQS_LEGACY 256
29#define VGIC_NR_SGIS 16 30#define VGIC_NR_SGIS 16
@@ -140,16 +141,21 @@ struct vgic_params {
140}; 141};
141 142
142struct vgic_vm_ops { 143struct vgic_vm_ops {
143 bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
144 struct kvm_exit_mmio *);
145 bool (*queue_sgi)(struct kvm_vcpu *, int irq); 144 bool (*queue_sgi)(struct kvm_vcpu *, int irq);
146 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source); 145 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
147 int (*init_model)(struct kvm *); 146 int (*init_model)(struct kvm *);
148 int (*map_resources)(struct kvm *, const struct vgic_params *); 147 int (*map_resources)(struct kvm *, const struct vgic_params *);
149}; 148};
150 149
150struct vgic_io_device {
151 gpa_t addr;
152 int len;
153 const struct vgic_io_range *reg_ranges;
154 struct kvm_vcpu *redist_vcpu;
155 struct kvm_io_device dev;
156};
157
151struct vgic_dist { 158struct vgic_dist {
152#ifdef CONFIG_KVM_ARM_VGIC
153 spinlock_t lock; 159 spinlock_t lock;
154 bool in_kernel; 160 bool in_kernel;
155 bool ready; 161 bool ready;
@@ -197,6 +203,9 @@ struct vgic_dist {
197 /* Level-triggered interrupt queued on VCPU interface */ 203 /* Level-triggered interrupt queued on VCPU interface */
198 struct vgic_bitmap irq_queued; 204 struct vgic_bitmap irq_queued;
199 205
206 /* Interrupt was active when unqueue from VCPU interface */
207 struct vgic_bitmap irq_active;
208
200 /* Interrupt priority. Not used yet. */ 209 /* Interrupt priority. Not used yet. */
201 struct vgic_bytemap irq_priority; 210 struct vgic_bytemap irq_priority;
202 211
@@ -237,8 +246,12 @@ struct vgic_dist {
237 /* Bitmap indicating which CPU has something pending */ 246 /* Bitmap indicating which CPU has something pending */
238 unsigned long *irq_pending_on_cpu; 247 unsigned long *irq_pending_on_cpu;
239 248
249 /* Bitmap indicating which CPU has active IRQs */
250 unsigned long *irq_active_on_cpu;
251
240 struct vgic_vm_ops vm_ops; 252 struct vgic_vm_ops vm_ops;
241#endif 253 struct vgic_io_device dist_iodev;
254 struct vgic_io_device *redist_iodevs;
242}; 255};
243 256
244struct vgic_v2_cpu_if { 257struct vgic_v2_cpu_if {
@@ -266,13 +279,18 @@ struct vgic_v3_cpu_if {
266}; 279};
267 280
268struct vgic_cpu { 281struct vgic_cpu {
269#ifdef CONFIG_KVM_ARM_VGIC
270 /* per IRQ to LR mapping */ 282 /* per IRQ to LR mapping */
271 u8 *vgic_irq_lr_map; 283 u8 *vgic_irq_lr_map;
272 284
273 /* Pending interrupts on this VCPU */ 285 /* Pending/active/both interrupts on this VCPU */
274 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); 286 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
287 DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS);
288 DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
289
290 /* Pending/active/both shared interrupts, dynamically sized */
275 unsigned long *pending_shared; 291 unsigned long *pending_shared;
292 unsigned long *active_shared;
293 unsigned long *pend_act_shared;
276 294
277 /* Bitmap of used/free list registers */ 295 /* Bitmap of used/free list registers */
278 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); 296 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
@@ -285,7 +303,6 @@ struct vgic_cpu {
285 struct vgic_v2_cpu_if vgic_v2; 303 struct vgic_v2_cpu_if vgic_v2;
286 struct vgic_v3_cpu_if vgic_v3; 304 struct vgic_v3_cpu_if vgic_v3;
287 }; 305 };
288#endif
289}; 306};
290 307
291#define LR_EMPTY 0xff 308#define LR_EMPTY 0xff
@@ -295,10 +312,7 @@ struct vgic_cpu {
295 312
296struct kvm; 313struct kvm;
297struct kvm_vcpu; 314struct kvm_vcpu;
298struct kvm_run;
299struct kvm_exit_mmio;
300 315
301#ifdef CONFIG_KVM_ARM_VGIC
302int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); 316int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
303int kvm_vgic_hyp_init(void); 317int kvm_vgic_hyp_init(void);
304int kvm_vgic_map_resources(struct kvm *kvm); 318int kvm_vgic_map_resources(struct kvm *kvm);
@@ -312,8 +326,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
312 bool level); 326 bool level);
313void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 327void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
314int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 328int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
315bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 329int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
316 struct kvm_exit_mmio *mmio);
317 330
318#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 331#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
319#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 332#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
@@ -335,84 +348,4 @@ static inline int vgic_v3_probe(struct device_node *vgic_node,
335} 348}
336#endif 349#endif
337 350
338#else
339static inline int kvm_vgic_hyp_init(void)
340{
341 return 0;
342}
343
344static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
345{
346 return 0;
347}
348
349static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
350{
351 return -ENXIO;
352}
353
354static inline int kvm_vgic_map_resources(struct kvm *kvm)
355{
356 return 0;
357}
358
359static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
360{
361 return 0;
362}
363
364static inline void kvm_vgic_destroy(struct kvm *kvm)
365{
366}
367
368static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
369{
370}
371
372static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
373{
374 return 0;
375}
376
377static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
378static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
379
380static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
381 unsigned int irq_num, bool level)
382{
383 return 0;
384}
385
386static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
387{
388 return 0;
389}
390
391static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
392 struct kvm_exit_mmio *mmio)
393{
394 return false;
395}
396
397static inline int irqchip_in_kernel(struct kvm *kvm)
398{
399 return 0;
400}
401
402static inline bool vgic_initialized(struct kvm *kvm)
403{
404 return true;
405}
406
407static inline bool vgic_ready(struct kvm *kvm)
408{
409 return true;
410}
411
412static inline int kvm_vgic_get_max_vcpus(void)
413{
414 return KVM_MAX_VCPUS;
415}
416#endif
417
418#endif 351#endif
diff --git a/virt/kvm/iodev.h b/include/kvm/iodev.h
index 12fd3caffd2b..a6d208b916f5 100644
--- a/virt/kvm/iodev.h
+++ b/include/kvm/iodev.h
@@ -9,17 +9,17 @@
9 * GNU General Public License for more details. 9 * GNU General Public License for more details.
10 * 10 *
11 * You should have received a copy of the GNU General Public License 11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software 12 * along with this program. If not, see <http://www.gnu.org/licenses/>.
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 */ 13 */
15 14
16#ifndef __KVM_IODEV_H__ 15#ifndef __KVM_IODEV_H__
17#define __KVM_IODEV_H__ 16#define __KVM_IODEV_H__
18 17
19#include <linux/kvm_types.h> 18#include <linux/kvm_types.h>
20#include <asm/errno.h> 19#include <linux/errno.h>
21 20
22struct kvm_io_device; 21struct kvm_io_device;
22struct kvm_vcpu;
23 23
24/** 24/**
25 * kvm_io_device_ops are called under kvm slots_lock. 25 * kvm_io_device_ops are called under kvm slots_lock.
@@ -27,11 +27,13 @@ struct kvm_io_device;
27 * or non-zero to have it passed to the next device. 27 * or non-zero to have it passed to the next device.
28 **/ 28 **/
29struct kvm_io_device_ops { 29struct kvm_io_device_ops {
30 int (*read)(struct kvm_io_device *this, 30 int (*read)(struct kvm_vcpu *vcpu,
31 struct kvm_io_device *this,
31 gpa_t addr, 32 gpa_t addr,
32 int len, 33 int len,
33 void *val); 34 void *val);
34 int (*write)(struct kvm_io_device *this, 35 int (*write)(struct kvm_vcpu *vcpu,
36 struct kvm_io_device *this,
35 gpa_t addr, 37 gpa_t addr,
36 int len, 38 int len,
37 const void *val); 39 const void *val);
@@ -49,16 +51,20 @@ static inline void kvm_iodevice_init(struct kvm_io_device *dev,
49 dev->ops = ops; 51 dev->ops = ops;
50} 52}
51 53
52static inline int kvm_iodevice_read(struct kvm_io_device *dev, 54static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu,
53 gpa_t addr, int l, void *v) 55 struct kvm_io_device *dev, gpa_t addr,
56 int l, void *v)
54{ 57{
55 return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP; 58 return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v)
59 : -EOPNOTSUPP;
56} 60}
57 61
58static inline int kvm_iodevice_write(struct kvm_io_device *dev, 62static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
59 gpa_t addr, int l, const void *v) 63 struct kvm_io_device *dev, gpa_t addr,
64 int l, const void *v)
60{ 65{
61 return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP; 66 return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v)
67 : -EOPNOTSUPP;
62} 68}
63 69
64static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) 70static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0f574ebc82f4..27bd53b69080 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -165,12 +165,12 @@ enum kvm_bus {
165 KVM_NR_BUSES 165 KVM_NR_BUSES
166}; 166};
167 167
168int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 168int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
169 int len, const void *val); 169 int len, const void *val);
170int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 170int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
171 int len, const void *val, long cookie); 171 gpa_t addr, int len, const void *val, long cookie);
172int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, 172int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
173 void *val); 173 int len, void *val);
174int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 174int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
175 int len, struct kvm_io_device *dev); 175 int len, struct kvm_io_device *dev);
176int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 176int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -699,6 +699,20 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
699#endif 699#endif
700} 700}
701 701
702#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
703/*
704 * returns true if the virtual interrupt controller is initialized and
705 * ready to accept virtual IRQ. On some architectures the virtual interrupt
706 * controller is dynamically instantiated and this is not always true.
707 */
708bool kvm_arch_intc_initialized(struct kvm *kvm);
709#else
710static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
711{
712 return true;
713}
714#endif
715
702int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 716int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
703void kvm_arch_destroy_vm(struct kvm *kvm); 717void kvm_arch_destroy_vm(struct kvm *kvm);
704void kvm_arch_sync_events(struct kvm *kvm); 718void kvm_arch_sync_events(struct kvm *kvm);
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 6e54f3542126..98c95f2fcba4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -85,13 +85,22 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
85 return IRQ_HANDLED; 85 return IRQ_HANDLED;
86} 86}
87 87
88/*
89 * Work function for handling the backup timer that we schedule when a vcpu is
90 * no longer running, but had a timer programmed to fire in the future.
91 */
88static void kvm_timer_inject_irq_work(struct work_struct *work) 92static void kvm_timer_inject_irq_work(struct work_struct *work)
89{ 93{
90 struct kvm_vcpu *vcpu; 94 struct kvm_vcpu *vcpu;
91 95
92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 96 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
93 vcpu->arch.timer_cpu.armed = false; 97 vcpu->arch.timer_cpu.armed = false;
94 kvm_timer_inject_irq(vcpu); 98
99 /*
100 * If the vcpu is blocked we want to wake it up so that it will see
101 * the timer has expired when entering the guest.
102 */
103 kvm_vcpu_kick(vcpu);
95} 104}
96 105
97static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) 106static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
@@ -102,6 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
102 return HRTIMER_NORESTART; 111 return HRTIMER_NORESTART;
103} 112}
104 113
114bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
115{
116 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
117 cycle_t cval, now;
118
119 if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
120 !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
121 return false;
122
123 cval = timer->cntv_cval;
124 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
125
126 return cval <= now;
127}
128
105/** 129/**
106 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu 130 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
107 * @vcpu: The vcpu pointer 131 * @vcpu: The vcpu pointer
@@ -119,6 +143,13 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
119 * populate the CPU timer again. 143 * populate the CPU timer again.
120 */ 144 */
121 timer_disarm(timer); 145 timer_disarm(timer);
146
147 /*
148 * If the timer expired while we were not scheduled, now is the time
149 * to inject it.
150 */
151 if (kvm_timer_should_fire(vcpu))
152 kvm_timer_inject_irq(vcpu);
122} 153}
123 154
124/** 155/**
@@ -134,16 +165,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
134 cycle_t cval, now; 165 cycle_t cval, now;
135 u64 ns; 166 u64 ns;
136 167
137 if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
138 !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
139 return;
140
141 cval = timer->cntv_cval;
142 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
143
144 BUG_ON(timer_is_armed(timer)); 168 BUG_ON(timer_is_armed(timer));
145 169
146 if (cval <= now) { 170 if (kvm_timer_should_fire(vcpu)) {
147 /* 171 /*
148 * Timer has already expired while we were not 172 * Timer has already expired while we were not
149 * looking. Inject the interrupt and carry on. 173 * looking. Inject the interrupt and carry on.
@@ -152,6 +176,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
152 return; 176 return;
153 } 177 }
154 178
179 cval = timer->cntv_cval;
180 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
181
155 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, 182 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
156 &timecounter->frac); 183 &timecounter->frac);
157 timer_arm(timer, ns); 184 timer_arm(timer, ns);
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 19c6210f02cf..13907970d11c 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -107,6 +107,22 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
107 vcpu->vcpu_id); 107 vcpu->vcpu_id);
108} 108}
109 109
110static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset)
113{
114 return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
115 vcpu->vcpu_id);
116}
117
118static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119 struct kvm_exit_mmio *mmio,
120 phys_addr_t offset)
121{
122 return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
123 vcpu->vcpu_id);
124}
125
110static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, 126static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio, 127 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset) 128 phys_addr_t offset)
@@ -303,7 +319,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
303 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); 319 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
304} 320}
305 321
306static const struct kvm_mmio_range vgic_dist_ranges[] = { 322static const struct vgic_io_range vgic_dist_ranges[] = {
307 { 323 {
308 .base = GIC_DIST_CTRL, 324 .base = GIC_DIST_CTRL,
309 .len = 12, 325 .len = 12,
@@ -344,13 +360,13 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
344 .base = GIC_DIST_ACTIVE_SET, 360 .base = GIC_DIST_ACTIVE_SET,
345 .len = VGIC_MAX_IRQS / 8, 361 .len = VGIC_MAX_IRQS / 8,
346 .bits_per_irq = 1, 362 .bits_per_irq = 1,
347 .handle_mmio = handle_mmio_raz_wi, 363 .handle_mmio = handle_mmio_set_active_reg,
348 }, 364 },
349 { 365 {
350 .base = GIC_DIST_ACTIVE_CLEAR, 366 .base = GIC_DIST_ACTIVE_CLEAR,
351 .len = VGIC_MAX_IRQS / 8, 367 .len = VGIC_MAX_IRQS / 8,
352 .bits_per_irq = 1, 368 .bits_per_irq = 1,
353 .handle_mmio = handle_mmio_raz_wi, 369 .handle_mmio = handle_mmio_clear_active_reg,
354 }, 370 },
355 { 371 {
356 .base = GIC_DIST_PRI, 372 .base = GIC_DIST_PRI,
@@ -388,24 +404,6 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
388 {} 404 {}
389}; 405};
390 406
391static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
392 struct kvm_exit_mmio *mmio)
393{
394 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
395
396 if (!is_in_range(mmio->phys_addr, mmio->len, base,
397 KVM_VGIC_V2_DIST_SIZE))
398 return false;
399
400 /* GICv2 does not support accesses wider than 32 bits */
401 if (mmio->len > 4) {
402 kvm_inject_dabt(vcpu, mmio->phys_addr);
403 return true;
404 }
405
406 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
407}
408
409static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) 407static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
410{ 408{
411 struct kvm *kvm = vcpu->kvm; 409 struct kvm *kvm = vcpu->kvm;
@@ -490,6 +488,7 @@ static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
490static int vgic_v2_map_resources(struct kvm *kvm, 488static int vgic_v2_map_resources(struct kvm *kvm,
491 const struct vgic_params *params) 489 const struct vgic_params *params)
492{ 490{
491 struct vgic_dist *dist = &kvm->arch.vgic;
493 int ret = 0; 492 int ret = 0;
494 493
495 if (!irqchip_in_kernel(kvm)) 494 if (!irqchip_in_kernel(kvm))
@@ -500,13 +499,17 @@ static int vgic_v2_map_resources(struct kvm *kvm,
500 if (vgic_ready(kvm)) 499 if (vgic_ready(kvm))
501 goto out; 500 goto out;
502 501
503 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || 502 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
504 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { 503 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
505 kvm_err("Need to set vgic cpu and dist addresses first\n"); 504 kvm_err("Need to set vgic cpu and dist addresses first\n");
506 ret = -ENXIO; 505 ret = -ENXIO;
507 goto out; 506 goto out;
508 } 507 }
509 508
509 vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
510 KVM_VGIC_V2_DIST_SIZE,
511 vgic_dist_ranges, -1, &dist->dist_iodev);
512
510 /* 513 /*
511 * Initialize the vgic if this hasn't already been done on demand by 514 * Initialize the vgic if this hasn't already been done on demand by
512 * accessing the vgic state from userspace. 515 * accessing the vgic state from userspace.
@@ -514,18 +517,23 @@ static int vgic_v2_map_resources(struct kvm *kvm,
514 ret = vgic_init(kvm); 517 ret = vgic_init(kvm);
515 if (ret) { 518 if (ret) {
516 kvm_err("Unable to allocate maps\n"); 519 kvm_err("Unable to allocate maps\n");
517 goto out; 520 goto out_unregister;
518 } 521 }
519 522
520 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, 523 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
521 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, 524 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
522 true); 525 true);
523 if (ret) { 526 if (ret) {
524 kvm_err("Unable to remap VGIC CPU to VCPU\n"); 527 kvm_err("Unable to remap VGIC CPU to VCPU\n");
525 goto out; 528 goto out_unregister;
526 } 529 }
527 530
528 kvm->arch.vgic.ready = true; 531 dist->ready = true;
532 goto out;
533
534out_unregister:
535 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
536
529out: 537out:
530 if (ret) 538 if (ret)
531 kvm_vgic_destroy(kvm); 539 kvm_vgic_destroy(kvm);
@@ -554,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm)
554{ 562{
555 struct vgic_dist *dist = &kvm->arch.vgic; 563 struct vgic_dist *dist = &kvm->arch.vgic;
556 564
557 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
558 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; 565 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
559 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; 566 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
560 dist->vm_ops.init_model = vgic_v2_init_model; 567 dist->vm_ops.init_model = vgic_v2_init_model;
@@ -631,7 +638,7 @@ static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
631 * CPU Interface Register accesses - these are not accessed by the VM, but by 638 * CPU Interface Register accesses - these are not accessed by the VM, but by
632 * user space for saving and restoring VGIC state. 639 * user space for saving and restoring VGIC state.
633 */ 640 */
634static const struct kvm_mmio_range vgic_cpu_ranges[] = { 641static const struct vgic_io_range vgic_cpu_ranges[] = {
635 { 642 {
636 .base = GIC_CPU_CTRL, 643 .base = GIC_CPU_CTRL,
637 .len = 12, 644 .len = 12,
@@ -658,12 +665,13 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
658 struct kvm_device_attr *attr, 665 struct kvm_device_attr *attr,
659 u32 *reg, bool is_write) 666 u32 *reg, bool is_write)
660{ 667{
661 const struct kvm_mmio_range *r = NULL, *ranges; 668 const struct vgic_io_range *r = NULL, *ranges;
662 phys_addr_t offset; 669 phys_addr_t offset;
663 int ret, cpuid, c; 670 int ret, cpuid, c;
664 struct kvm_vcpu *vcpu, *tmp_vcpu; 671 struct kvm_vcpu *vcpu, *tmp_vcpu;
665 struct vgic_dist *vgic; 672 struct vgic_dist *vgic;
666 struct kvm_exit_mmio mmio; 673 struct kvm_exit_mmio mmio;
674 u32 data;
667 675
668 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 676 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
669 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> 677 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
@@ -685,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
685 693
686 mmio.len = 4; 694 mmio.len = 4;
687 mmio.is_write = is_write; 695 mmio.is_write = is_write;
696 mmio.data = &data;
688 if (is_write) 697 if (is_write)
689 mmio_data_write(&mmio, ~0, *reg); 698 mmio_data_write(&mmio, ~0, *reg);
690 switch (attr->group) { 699 switch (attr->group) {
@@ -699,7 +708,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
699 default: 708 default:
700 BUG(); 709 BUG();
701 } 710 }
702 r = vgic_find_range(ranges, &mmio, offset); 711 r = vgic_find_range(ranges, 4, offset);
703 712
704 if (unlikely(!r || !r->handle_mmio)) { 713 if (unlikely(!r || !r->handle_mmio)) {
705 ret = -ENXIO; 714 ret = -ENXIO;
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index b3f154631515..e9c3a7a83833 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -340,7 +340,7 @@ static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
340 return false; 340 return false;
341} 341}
342 342
343static const struct kvm_mmio_range vgic_v3_dist_ranges[] = { 343static const struct vgic_io_range vgic_v3_dist_ranges[] = {
344 { 344 {
345 .base = GICD_CTLR, 345 .base = GICD_CTLR,
346 .len = 0x04, 346 .len = 0x04,
@@ -502,6 +502,43 @@ static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
502 {}, 502 {},
503}; 503};
504 504
505static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset)
508{
509 /* since we don't support LPIs, this register is zero for now */
510 vgic_reg_access(mmio, NULL, offset,
511 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
512 return false;
513}
514
515static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
516 struct kvm_exit_mmio *mmio,
517 phys_addr_t offset)
518{
519 u32 reg;
520 u64 mpidr;
521 struct kvm_vcpu *redist_vcpu = mmio->private;
522 int target_vcpu_id = redist_vcpu->vcpu_id;
523
524 /* the upper 32 bits contain the affinity value */
525 if ((offset & ~3) == 4) {
526 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
527 reg = compress_mpidr(mpidr);
528
529 vgic_reg_access(mmio, &reg, offset,
530 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
531 return false;
532 }
533
534 reg = redist_vcpu->vcpu_id << 8;
535 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
536 reg |= GICR_TYPER_LAST;
537 vgic_reg_access(mmio, &reg, offset,
538 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
539 return false;
540}
541
505static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu, 542static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio, 543 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset) 544 phys_addr_t offset)
@@ -570,186 +607,107 @@ static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
570 return vgic_handle_cfg_reg(reg, mmio, offset); 607 return vgic_handle_cfg_reg(reg, mmio, offset);
571} 608}
572 609
573static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = { 610#define SGI_base(x) ((x) + SZ_64K)
611
612static const struct vgic_io_range vgic_redist_ranges[] = {
613 {
614 .base = GICR_CTLR,
615 .len = 0x04,
616 .bits_per_irq = 0,
617 .handle_mmio = handle_mmio_ctlr_redist,
618 },
619 {
620 .base = GICR_TYPER,
621 .len = 0x08,
622 .bits_per_irq = 0,
623 .handle_mmio = handle_mmio_typer_redist,
624 },
625 {
626 .base = GICR_IIDR,
627 .len = 0x04,
628 .bits_per_irq = 0,
629 .handle_mmio = handle_mmio_iidr,
630 },
631 {
632 .base = GICR_WAKER,
633 .len = 0x04,
634 .bits_per_irq = 0,
635 .handle_mmio = handle_mmio_raz_wi,
636 },
574 { 637 {
575 .base = GICR_IGROUPR0, 638 .base = GICR_IDREGS,
639 .len = 0x30,
640 .bits_per_irq = 0,
641 .handle_mmio = handle_mmio_idregs,
642 },
643 {
644 .base = SGI_base(GICR_IGROUPR0),
576 .len = 0x04, 645 .len = 0x04,
577 .bits_per_irq = 1, 646 .bits_per_irq = 1,
578 .handle_mmio = handle_mmio_rao_wi, 647 .handle_mmio = handle_mmio_rao_wi,
579 }, 648 },
580 { 649 {
581 .base = GICR_ISENABLER0, 650 .base = SGI_base(GICR_ISENABLER0),
582 .len = 0x04, 651 .len = 0x04,
583 .bits_per_irq = 1, 652 .bits_per_irq = 1,
584 .handle_mmio = handle_mmio_set_enable_reg_redist, 653 .handle_mmio = handle_mmio_set_enable_reg_redist,
585 }, 654 },
586 { 655 {
587 .base = GICR_ICENABLER0, 656 .base = SGI_base(GICR_ICENABLER0),
588 .len = 0x04, 657 .len = 0x04,
589 .bits_per_irq = 1, 658 .bits_per_irq = 1,
590 .handle_mmio = handle_mmio_clear_enable_reg_redist, 659 .handle_mmio = handle_mmio_clear_enable_reg_redist,
591 }, 660 },
592 { 661 {
593 .base = GICR_ISPENDR0, 662 .base = SGI_base(GICR_ISPENDR0),
594 .len = 0x04, 663 .len = 0x04,
595 .bits_per_irq = 1, 664 .bits_per_irq = 1,
596 .handle_mmio = handle_mmio_set_pending_reg_redist, 665 .handle_mmio = handle_mmio_set_pending_reg_redist,
597 }, 666 },
598 { 667 {
599 .base = GICR_ICPENDR0, 668 .base = SGI_base(GICR_ICPENDR0),
600 .len = 0x04, 669 .len = 0x04,
601 .bits_per_irq = 1, 670 .bits_per_irq = 1,
602 .handle_mmio = handle_mmio_clear_pending_reg_redist, 671 .handle_mmio = handle_mmio_clear_pending_reg_redist,
603 }, 672 },
604 { 673 {
605 .base = GICR_ISACTIVER0, 674 .base = SGI_base(GICR_ISACTIVER0),
606 .len = 0x04, 675 .len = 0x04,
607 .bits_per_irq = 1, 676 .bits_per_irq = 1,
608 .handle_mmio = handle_mmio_raz_wi, 677 .handle_mmio = handle_mmio_raz_wi,
609 }, 678 },
610 { 679 {
611 .base = GICR_ICACTIVER0, 680 .base = SGI_base(GICR_ICACTIVER0),
612 .len = 0x04, 681 .len = 0x04,
613 .bits_per_irq = 1, 682 .bits_per_irq = 1,
614 .handle_mmio = handle_mmio_raz_wi, 683 .handle_mmio = handle_mmio_raz_wi,
615 }, 684 },
616 { 685 {
617 .base = GICR_IPRIORITYR0, 686 .base = SGI_base(GICR_IPRIORITYR0),
618 .len = 0x20, 687 .len = 0x20,
619 .bits_per_irq = 8, 688 .bits_per_irq = 8,
620 .handle_mmio = handle_mmio_priority_reg_redist, 689 .handle_mmio = handle_mmio_priority_reg_redist,
621 }, 690 },
622 { 691 {
623 .base = GICR_ICFGR0, 692 .base = SGI_base(GICR_ICFGR0),
624 .len = 0x08, 693 .len = 0x08,
625 .bits_per_irq = 2, 694 .bits_per_irq = 2,
626 .handle_mmio = handle_mmio_cfg_reg_redist, 695 .handle_mmio = handle_mmio_cfg_reg_redist,
627 }, 696 },
628 { 697 {
629 .base = GICR_IGRPMODR0, 698 .base = SGI_base(GICR_IGRPMODR0),
630 .len = 0x04, 699 .len = 0x04,
631 .bits_per_irq = 1, 700 .bits_per_irq = 1,
632 .handle_mmio = handle_mmio_raz_wi, 701 .handle_mmio = handle_mmio_raz_wi,
633 }, 702 },
634 { 703 {
635 .base = GICR_NSACR, 704 .base = SGI_base(GICR_NSACR),
636 .len = 0x04, 705 .len = 0x04,
637 .handle_mmio = handle_mmio_raz_wi, 706 .handle_mmio = handle_mmio_raz_wi,
638 }, 707 },
639 {}, 708 {},
640}; 709};
641 710
642static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
643 struct kvm_exit_mmio *mmio,
644 phys_addr_t offset)
645{
646 /* since we don't support LPIs, this register is zero for now */
647 vgic_reg_access(mmio, NULL, offset,
648 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
649 return false;
650}
651
652static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
653 struct kvm_exit_mmio *mmio,
654 phys_addr_t offset)
655{
656 u32 reg;
657 u64 mpidr;
658 struct kvm_vcpu *redist_vcpu = mmio->private;
659 int target_vcpu_id = redist_vcpu->vcpu_id;
660
661 /* the upper 32 bits contain the affinity value */
662 if ((offset & ~3) == 4) {
663 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
664 reg = compress_mpidr(mpidr);
665
666 vgic_reg_access(mmio, &reg, offset,
667 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
668 return false;
669 }
670
671 reg = redist_vcpu->vcpu_id << 8;
672 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
673 reg |= GICR_TYPER_LAST;
674 vgic_reg_access(mmio, &reg, offset,
675 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
676 return false;
677}
678
679static const struct kvm_mmio_range vgic_redist_ranges[] = {
680 {
681 .base = GICR_CTLR,
682 .len = 0x04,
683 .bits_per_irq = 0,
684 .handle_mmio = handle_mmio_ctlr_redist,
685 },
686 {
687 .base = GICR_TYPER,
688 .len = 0x08,
689 .bits_per_irq = 0,
690 .handle_mmio = handle_mmio_typer_redist,
691 },
692 {
693 .base = GICR_IIDR,
694 .len = 0x04,
695 .bits_per_irq = 0,
696 .handle_mmio = handle_mmio_iidr,
697 },
698 {
699 .base = GICR_WAKER,
700 .len = 0x04,
701 .bits_per_irq = 0,
702 .handle_mmio = handle_mmio_raz_wi,
703 },
704 {
705 .base = GICR_IDREGS,
706 .len = 0x30,
707 .bits_per_irq = 0,
708 .handle_mmio = handle_mmio_idregs,
709 },
710 {},
711};
712
713/*
714 * This function splits accesses between the distributor and the two
715 * redistributor parts (private/SPI). As each redistributor is accessible
716 * from any CPU, we have to determine the affected VCPU by taking the faulting
717 * address into account. We then pass this VCPU to the handler function via
718 * the private parameter.
719 */
720#define SGI_BASE_OFFSET SZ_64K
721static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
722 struct kvm_exit_mmio *mmio)
723{
724 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
725 unsigned long dbase = dist->vgic_dist_base;
726 unsigned long rdbase = dist->vgic_redist_base;
727 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
728 int vcpu_id;
729 const struct kvm_mmio_range *mmio_range;
730
731 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
732 return vgic_handle_mmio_range(vcpu, run, mmio,
733 vgic_v3_dist_ranges, dbase);
734 }
735
736 if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
737 GIC_V3_REDIST_SIZE * nrcpus))
738 return false;
739
740 vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
741 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
742 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
743
744 if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
745 rdbase += SGI_BASE_OFFSET;
746 mmio_range = vgic_redist_sgi_ranges;
747 } else {
748 mmio_range = vgic_redist_ranges;
749 }
750 return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
751}
752
753static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) 711static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
754{ 712{
755 if (vgic_queue_irq(vcpu, 0, irq)) { 713 if (vgic_queue_irq(vcpu, 0, irq)) {
@@ -766,6 +724,9 @@ static int vgic_v3_map_resources(struct kvm *kvm,
766{ 724{
767 int ret = 0; 725 int ret = 0;
768 struct vgic_dist *dist = &kvm->arch.vgic; 726 struct vgic_dist *dist = &kvm->arch.vgic;
727 gpa_t rdbase = dist->vgic_redist_base;
728 struct vgic_io_device *iodevs = NULL;
729 int i;
769 730
770 if (!irqchip_in_kernel(kvm)) 731 if (!irqchip_in_kernel(kvm))
771 return 0; 732 return 0;
@@ -791,7 +752,41 @@ static int vgic_v3_map_resources(struct kvm *kvm,
791 goto out; 752 goto out;
792 } 753 }
793 754
794 kvm->arch.vgic.ready = true; 755 ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
756 GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
757 -1, &dist->dist_iodev);
758 if (ret)
759 goto out;
760
761 iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
762 if (!iodevs) {
763 ret = -ENOMEM;
764 goto out_unregister;
765 }
766
767 for (i = 0; i < dist->nr_cpus; i++) {
768 ret = vgic_register_kvm_io_dev(kvm, rdbase,
769 SZ_128K, vgic_redist_ranges,
770 i, &iodevs[i]);
771 if (ret)
772 goto out_unregister;
773 rdbase += GIC_V3_REDIST_SIZE;
774 }
775
776 dist->redist_iodevs = iodevs;
777 dist->ready = true;
778 goto out;
779
780out_unregister:
781 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
782 if (iodevs) {
783 for (i = 0; i < dist->nr_cpus; i++) {
784 if (iodevs[i].dev.ops)
785 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
786 &iodevs[i].dev);
787 }
788 }
789
795out: 790out:
796 if (ret) 791 if (ret)
797 kvm_vgic_destroy(kvm); 792 kvm_vgic_destroy(kvm);
@@ -832,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm)
832{ 827{
833 struct vgic_dist *dist = &kvm->arch.vgic; 828 struct vgic_dist *dist = &kvm->arch.vgic;
834 829
835 dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
836 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; 830 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
837 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; 831 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
838 dist->vm_ops.init_model = vgic_v3_init_model; 832 dist->vm_ops.init_model = vgic_v3_init_model;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c9f60f524588..8d550ff14700 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -31,6 +31,9 @@
31#include <asm/kvm_emulate.h> 31#include <asm/kvm_emulate.h>
32#include <asm/kvm_arm.h> 32#include <asm/kvm_arm.h>
33#include <asm/kvm_mmu.h> 33#include <asm/kvm_mmu.h>
34#include <trace/events/kvm.h>
35#include <asm/kvm.h>
36#include <kvm/iodev.h>
34 37
35/* 38/*
36 * How the whole thing works (courtesy of Christoffer Dall): 39 * How the whole thing works (courtesy of Christoffer Dall):
@@ -263,6 +266,13 @@ static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
263 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); 266 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
264} 267}
265 268
269static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
270{
271 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
272
273 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
274}
275
266static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) 276static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
267{ 277{
268 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 278 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -277,6 +287,20 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
277 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); 287 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
278} 288}
279 289
290static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
291{
292 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
293
294 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
295}
296
297static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
298{
299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
300
301 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
302}
303
280static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) 304static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
281{ 305{
282 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 306 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -520,6 +544,44 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
520 return false; 544 return false;
521} 545}
522 546
547bool vgic_handle_set_active_reg(struct kvm *kvm,
548 struct kvm_exit_mmio *mmio,
549 phys_addr_t offset, int vcpu_id)
550{
551 u32 *reg;
552 struct vgic_dist *dist = &kvm->arch.vgic;
553
554 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
555 vgic_reg_access(mmio, reg, offset,
556 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
557
558 if (mmio->is_write) {
559 vgic_update_state(kvm);
560 return true;
561 }
562
563 return false;
564}
565
566bool vgic_handle_clear_active_reg(struct kvm *kvm,
567 struct kvm_exit_mmio *mmio,
568 phys_addr_t offset, int vcpu_id)
569{
570 u32 *reg;
571 struct vgic_dist *dist = &kvm->arch.vgic;
572
573 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
574 vgic_reg_access(mmio, reg, offset,
575 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
576
577 if (mmio->is_write) {
578 vgic_update_state(kvm);
579 return true;
580 }
581
582 return false;
583}
584
523static u32 vgic_cfg_expand(u16 val) 585static u32 vgic_cfg_expand(u16 val)
524{ 586{
525 u32 res = 0; 587 u32 res = 0;
@@ -588,16 +650,12 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
588} 650}
589 651
590/** 652/**
591 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor 653 * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
592 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs 654 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
593 * 655 *
594 * Move any pending IRQs that have already been assigned to LRs back to the 656 * Move any IRQs that have already been assigned to LRs back to the
595 * emulated distributor state so that the complete emulated state can be read 657 * emulated distributor state so that the complete emulated state can be read
596 * from the main emulation structures without investigating the LRs. 658 * from the main emulation structures without investigating the LRs.
597 *
598 * Note that IRQs in the active state in the LRs get their pending state moved
599 * to the distributor but the active state stays in the LRs, because we don't
600 * track the active state on the distributor side.
601 */ 659 */
602void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) 660void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
603{ 661{
@@ -613,12 +671,22 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
613 * 01: pending 671 * 01: pending
614 * 10: active 672 * 10: active
615 * 11: pending and active 673 * 11: pending and active
616 *
617 * If the LR holds only an active interrupt (not pending) then
618 * just leave it alone.
619 */ 674 */
620 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) 675 BUG_ON(!(lr.state & LR_STATE_MASK));
621 continue; 676
677 /* Reestablish SGI source for pending and active IRQs */
678 if (lr.irq < VGIC_NR_SGIS)
679 add_sgi_source(vcpu, lr.irq, lr.source);
680
681 /*
682 * If the LR holds an active (10) or a pending and active (11)
683 * interrupt then move the active state to the
684 * distributor tracking bit.
685 */
686 if (lr.state & LR_STATE_ACTIVE) {
687 vgic_irq_set_active(vcpu, lr.irq);
688 lr.state &= ~LR_STATE_ACTIVE;
689 }
622 690
623 /* 691 /*
624 * Reestablish the pending state on the distributor and the 692 * Reestablish the pending state on the distributor and the
@@ -626,21 +694,19 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
626 * is fine, then we are only setting a few bits that were 694 * is fine, then we are only setting a few bits that were
627 * already set. 695 * already set.
628 */ 696 */
629 vgic_dist_irq_set_pending(vcpu, lr.irq); 697 if (lr.state & LR_STATE_PENDING) {
630 if (lr.irq < VGIC_NR_SGIS) 698 vgic_dist_irq_set_pending(vcpu, lr.irq);
631 add_sgi_source(vcpu, lr.irq, lr.source); 699 lr.state &= ~LR_STATE_PENDING;
632 lr.state &= ~LR_STATE_PENDING; 700 }
701
633 vgic_set_lr(vcpu, i, lr); 702 vgic_set_lr(vcpu, i, lr);
634 703
635 /* 704 /*
636 * If there's no state left on the LR (it could still be 705 * Mark the LR as free for other use.
637 * active), then the LR does not hold any useful info and can
638 * be marked as free for other use.
639 */ 706 */
640 if (!(lr.state & LR_STATE_MASK)) { 707 BUG_ON(lr.state & LR_STATE_MASK);
641 vgic_retire_lr(i, lr.irq, vcpu); 708 vgic_retire_lr(i, lr.irq, vcpu);
642 vgic_irq_clear_queued(vcpu, lr.irq); 709 vgic_irq_clear_queued(vcpu, lr.irq);
643 }
644 710
645 /* Finally update the VGIC state. */ 711 /* Finally update the VGIC state. */
646 vgic_update_state(vcpu->kvm); 712 vgic_update_state(vcpu->kvm);
@@ -648,24 +714,21 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
648} 714}
649 715
650const 716const
651struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, 717struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
652 struct kvm_exit_mmio *mmio, 718 int len, gpa_t offset)
653 phys_addr_t offset) 719{
654{ 720 while (ranges->len) {
655 const struct kvm_mmio_range *r = ranges; 721 if (offset >= ranges->base &&
656 722 (offset + len) <= (ranges->base + ranges->len))
657 while (r->len) { 723 return ranges;
658 if (offset >= r->base && 724 ranges++;
659 (offset + mmio->len) <= (r->base + r->len))
660 return r;
661 r++;
662 } 725 }
663 726
664 return NULL; 727 return NULL;
665} 728}
666 729
667static bool vgic_validate_access(const struct vgic_dist *dist, 730static bool vgic_validate_access(const struct vgic_dist *dist,
668 const struct kvm_mmio_range *range, 731 const struct vgic_io_range *range,
669 unsigned long offset) 732 unsigned long offset)
670{ 733{
671 int irq; 734 int irq;
@@ -693,9 +756,8 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
693static bool call_range_handler(struct kvm_vcpu *vcpu, 756static bool call_range_handler(struct kvm_vcpu *vcpu,
694 struct kvm_exit_mmio *mmio, 757 struct kvm_exit_mmio *mmio,
695 unsigned long offset, 758 unsigned long offset,
696 const struct kvm_mmio_range *range) 759 const struct vgic_io_range *range)
697{ 760{
698 u32 *data32 = (void *)mmio->data;
699 struct kvm_exit_mmio mmio32; 761 struct kvm_exit_mmio mmio32;
700 bool ret; 762 bool ret;
701 763
@@ -712,91 +774,142 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
712 mmio32.private = mmio->private; 774 mmio32.private = mmio->private;
713 775
714 mmio32.phys_addr = mmio->phys_addr + 4; 776 mmio32.phys_addr = mmio->phys_addr + 4;
715 if (mmio->is_write) 777 mmio32.data = &((u32 *)mmio->data)[1];
716 *(u32 *)mmio32.data = data32[1];
717 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); 778 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
718 if (!mmio->is_write)
719 data32[1] = *(u32 *)mmio32.data;
720 779
721 mmio32.phys_addr = mmio->phys_addr; 780 mmio32.phys_addr = mmio->phys_addr;
722 if (mmio->is_write) 781 mmio32.data = &((u32 *)mmio->data)[0];
723 *(u32 *)mmio32.data = data32[0];
724 ret |= range->handle_mmio(vcpu, &mmio32, offset); 782 ret |= range->handle_mmio(vcpu, &mmio32, offset);
725 if (!mmio->is_write)
726 data32[0] = *(u32 *)mmio32.data;
727 783
728 return ret; 784 return ret;
729} 785}
730 786
731/** 787/**
732 * vgic_handle_mmio_range - handle an in-kernel MMIO access 788 * vgic_handle_mmio_access - handle an in-kernel MMIO access
789 * This is called by the read/write KVM IO device wrappers below.
733 * @vcpu: pointer to the vcpu performing the access 790 * @vcpu: pointer to the vcpu performing the access
734 * @run: pointer to the kvm_run structure 791 * @this: pointer to the KVM IO device in charge
735 * @mmio: pointer to the data describing the access 792 * @addr: guest physical address of the access
736 * @ranges: array of MMIO ranges in a given region 793 * @len: size of the access
737 * @mmio_base: base address of that region 794 * @val: pointer to the data region
795 * @is_write: read or write access
738 * 796 *
739 * returns true if the MMIO access could be performed 797 * returns true if the MMIO access could be performed
740 */ 798 */
741bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, 799static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
742 struct kvm_exit_mmio *mmio, 800 struct kvm_io_device *this, gpa_t addr,
743 const struct kvm_mmio_range *ranges, 801 int len, void *val, bool is_write)
744 unsigned long mmio_base)
745{ 802{
746 const struct kvm_mmio_range *range;
747 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 803 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
804 struct vgic_io_device *iodev = container_of(this,
805 struct vgic_io_device, dev);
806 struct kvm_run *run = vcpu->run;
807 const struct vgic_io_range *range;
808 struct kvm_exit_mmio mmio;
748 bool updated_state; 809 bool updated_state;
749 unsigned long offset; 810 gpa_t offset;
750 811
751 offset = mmio->phys_addr - mmio_base; 812 offset = addr - iodev->addr;
752 range = vgic_find_range(ranges, mmio, offset); 813 range = vgic_find_range(iodev->reg_ranges, len, offset);
753 if (unlikely(!range || !range->handle_mmio)) { 814 if (unlikely(!range || !range->handle_mmio)) {
754 pr_warn("Unhandled access %d %08llx %d\n", 815 pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
755 mmio->is_write, mmio->phys_addr, mmio->len); 816 return -ENXIO;
756 return false;
757 } 817 }
758 818
759 spin_lock(&vcpu->kvm->arch.vgic.lock); 819 mmio.phys_addr = addr;
820 mmio.len = len;
821 mmio.is_write = is_write;
822 mmio.data = val;
823 mmio.private = iodev->redist_vcpu;
824
825 spin_lock(&dist->lock);
760 offset -= range->base; 826 offset -= range->base;
761 if (vgic_validate_access(dist, range, offset)) { 827 if (vgic_validate_access(dist, range, offset)) {
762 updated_state = call_range_handler(vcpu, mmio, offset, range); 828 updated_state = call_range_handler(vcpu, &mmio, offset, range);
763 } else { 829 } else {
764 if (!mmio->is_write) 830 if (!is_write)
765 memset(mmio->data, 0, mmio->len); 831 memset(val, 0, len);
766 updated_state = false; 832 updated_state = false;
767 } 833 }
768 spin_unlock(&vcpu->kvm->arch.vgic.lock); 834 spin_unlock(&dist->lock);
769 kvm_prepare_mmio(run, mmio); 835 run->mmio.is_write = is_write;
836 run->mmio.len = len;
837 run->mmio.phys_addr = addr;
838 memcpy(run->mmio.data, val, len);
839
770 kvm_handle_mmio_return(vcpu, run); 840 kvm_handle_mmio_return(vcpu, run);
771 841
772 if (updated_state) 842 if (updated_state)
773 vgic_kick_vcpus(vcpu->kvm); 843 vgic_kick_vcpus(vcpu->kvm);
774 844
775 return true; 845 return 0;
846}
847
848static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
849 struct kvm_io_device *this,
850 gpa_t addr, int len, void *val)
851{
852 return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
776} 853}
777 854
855static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
856 struct kvm_io_device *this,
857 gpa_t addr, int len, const void *val)
858{
859 return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
860 true);
861}
862
863struct kvm_io_device_ops vgic_io_ops = {
864 .read = vgic_handle_mmio_read,
865 .write = vgic_handle_mmio_write,
866};
867
778/** 868/**
779 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation 869 * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
780 * @vcpu: pointer to the vcpu performing the access 870 * @kvm: The VM structure pointer
781 * @run: pointer to the kvm_run structure 871 * @base: The (guest) base address for the register frame
782 * @mmio: pointer to the data describing the access 872 * @len: Length of the register frame window
873 * @ranges: Describing the handler functions for each register
874 * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
875 * @iodev: Points to memory to be passed on to the handler
783 * 876 *
784 * returns true if the MMIO access has been performed in kernel space, 877 * @iodev stores the parameters of this function to be usable by the handler
785 * and false if it needs to be emulated in user space. 878 * respectively the dispatcher function (since the KVM I/O bus framework lacks
786 * Calls the actual handling routine for the selected VGIC model. 879 * an opaque parameter). Initialization is done in this function, but the
880 * reference should be valid and unique for the whole VGIC lifetime.
881 * If the register frame is not mapped for a specific VCPU, pass -1 to
882 * @redist_vcpu_id.
787 */ 883 */
788bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 884int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
789 struct kvm_exit_mmio *mmio) 885 const struct vgic_io_range *ranges,
886 int redist_vcpu_id,
887 struct vgic_io_device *iodev)
790{ 888{
791 if (!irqchip_in_kernel(vcpu->kvm)) 889 struct kvm_vcpu *vcpu = NULL;
792 return false; 890 int ret;
793 891
794 /* 892 if (redist_vcpu_id >= 0)
795 * This will currently call either vgic_v2_handle_mmio() or 893 vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
796 * vgic_v3_handle_mmio(), which in turn will call 894
797 * vgic_handle_mmio_range() defined above. 895 iodev->addr = base;
798 */ 896 iodev->len = len;
799 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio); 897 iodev->reg_ranges = ranges;
898 iodev->redist_vcpu = vcpu;
899
900 kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
901
902 mutex_lock(&kvm->slots_lock);
903
904 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
905 &iodev->dev);
906 mutex_unlock(&kvm->slots_lock);
907
908 /* Mark the iodev as invalid if registration fails. */
909 if (ret)
910 iodev->dev.ops = NULL;
911
912 return ret;
800} 913}
801 914
802static int vgic_nr_shared_irqs(struct vgic_dist *dist) 915static int vgic_nr_shared_irqs(struct vgic_dist *dist)
@@ -804,6 +917,36 @@ static int vgic_nr_shared_irqs(struct vgic_dist *dist)
804 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; 917 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
805} 918}
806 919
920static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
921{
922 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
923 unsigned long *active, *enabled, *act_percpu, *act_shared;
924 unsigned long active_private, active_shared;
925 int nr_shared = vgic_nr_shared_irqs(dist);
926 int vcpu_id;
927
928 vcpu_id = vcpu->vcpu_id;
929 act_percpu = vcpu->arch.vgic_cpu.active_percpu;
930 act_shared = vcpu->arch.vgic_cpu.active_shared;
931
932 active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
933 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
934 bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
935
936 active = vgic_bitmap_get_shared_map(&dist->irq_active);
937 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
938 bitmap_and(act_shared, active, enabled, nr_shared);
939 bitmap_and(act_shared, act_shared,
940 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
941 nr_shared);
942
943 active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
944 active_shared = find_first_bit(act_shared, nr_shared);
945
946 return (active_private < VGIC_NR_PRIVATE_IRQS ||
947 active_shared < nr_shared);
948}
949
807static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) 950static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
808{ 951{
809 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 952 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -835,7 +978,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
835 978
836/* 979/*
837 * Update the interrupt state and determine which CPUs have pending 980 * Update the interrupt state and determine which CPUs have pending
838 * interrupts. Must be called with distributor lock held. 981 * or active interrupts. Must be called with distributor lock held.
839 */ 982 */
840void vgic_update_state(struct kvm *kvm) 983void vgic_update_state(struct kvm *kvm)
841{ 984{
@@ -849,10 +992,13 @@ void vgic_update_state(struct kvm *kvm)
849 } 992 }
850 993
851 kvm_for_each_vcpu(c, vcpu, kvm) { 994 kvm_for_each_vcpu(c, vcpu, kvm) {
852 if (compute_pending_for_cpu(vcpu)) { 995 if (compute_pending_for_cpu(vcpu))
853 pr_debug("CPU%d has pending interrupts\n", c);
854 set_bit(c, dist->irq_pending_on_cpu); 996 set_bit(c, dist->irq_pending_on_cpu);
855 } 997
998 if (compute_active_for_cpu(vcpu))
999 set_bit(c, dist->irq_active_on_cpu);
1000 else
1001 clear_bit(c, dist->irq_active_on_cpu);
856 } 1002 }
857} 1003}
858 1004
@@ -955,6 +1101,26 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
955 } 1101 }
956} 1102}
957 1103
1104static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1105 int lr_nr, struct vgic_lr vlr)
1106{
1107 if (vgic_irq_is_active(vcpu, irq)) {
1108 vlr.state |= LR_STATE_ACTIVE;
1109 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1110 vgic_irq_clear_active(vcpu, irq);
1111 vgic_update_state(vcpu->kvm);
1112 } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
1113 vlr.state |= LR_STATE_PENDING;
1114 kvm_debug("Set pending: 0x%x\n", vlr.state);
1115 }
1116
1117 if (!vgic_irq_is_edge(vcpu, irq))
1118 vlr.state |= LR_EOI_INT;
1119
1120 vgic_set_lr(vcpu, lr_nr, vlr);
1121 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
1122}
1123
958/* 1124/*
959 * Queue an interrupt to a CPU virtual interface. Return true on success, 1125 * Queue an interrupt to a CPU virtual interface. Return true on success,
960 * or false if it wasn't possible to queue it. 1126 * or false if it wasn't possible to queue it.
@@ -982,9 +1148,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
982 if (vlr.source == sgi_source_id) { 1148 if (vlr.source == sgi_source_id) {
983 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); 1149 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
984 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 1150 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
985 vlr.state |= LR_STATE_PENDING; 1151 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
986 vgic_set_lr(vcpu, lr, vlr);
987 vgic_sync_lr_elrsr(vcpu, lr, vlr);
988 return true; 1152 return true;
989 } 1153 }
990 } 1154 }
@@ -1001,12 +1165,8 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1001 1165
1002 vlr.irq = irq; 1166 vlr.irq = irq;
1003 vlr.source = sgi_source_id; 1167 vlr.source = sgi_source_id;
1004 vlr.state = LR_STATE_PENDING; 1168 vlr.state = 0;
1005 if (!vgic_irq_is_edge(vcpu, irq)) 1169 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1006 vlr.state |= LR_EOI_INT;
1007
1008 vgic_set_lr(vcpu, lr, vlr);
1009 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1010 1170
1011 return true; 1171 return true;
1012} 1172}
@@ -1038,39 +1198,49 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1038{ 1198{
1039 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1199 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1040 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1200 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1201 unsigned long *pa_percpu, *pa_shared;
1041 int i, vcpu_id; 1202 int i, vcpu_id;
1042 int overflow = 0; 1203 int overflow = 0;
1204 int nr_shared = vgic_nr_shared_irqs(dist);
1043 1205
1044 vcpu_id = vcpu->vcpu_id; 1206 vcpu_id = vcpu->vcpu_id;
1045 1207
1208 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1209 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1210
1211 bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1212 VGIC_NR_PRIVATE_IRQS);
1213 bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1214 nr_shared);
1046 /* 1215 /*
1047 * We may not have any pending interrupt, or the interrupts 1216 * We may not have any pending interrupt, or the interrupts
1048 * may have been serviced from another vcpu. In all cases, 1217 * may have been serviced from another vcpu. In all cases,
1049 * move along. 1218 * move along.
1050 */ 1219 */
1051 if (!kvm_vgic_vcpu_pending_irq(vcpu)) { 1220 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
1052 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1053 goto epilog; 1221 goto epilog;
1054 }
1055 1222
1056 /* SGIs */ 1223 /* SGIs */
1057 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { 1224 for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1058 if (!queue_sgi(vcpu, i)) 1225 if (!queue_sgi(vcpu, i))
1059 overflow = 1; 1226 overflow = 1;
1060 } 1227 }
1061 1228
1062 /* PPIs */ 1229 /* PPIs */
1063 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { 1230 for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1064 if (!vgic_queue_hwirq(vcpu, i)) 1231 if (!vgic_queue_hwirq(vcpu, i))
1065 overflow = 1; 1232 overflow = 1;
1066 } 1233 }
1067 1234
1068 /* SPIs */ 1235 /* SPIs */
1069 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) { 1236 for_each_set_bit(i, pa_shared, nr_shared) {
1070 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) 1237 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1071 overflow = 1; 1238 overflow = 1;
1072 } 1239 }
1073 1240
1241
1242
1243
1074epilog: 1244epilog:
1075 if (overflow) { 1245 if (overflow) {
1076 vgic_enable_underflow(vcpu); 1246 vgic_enable_underflow(vcpu);
@@ -1089,7 +1259,9 @@ epilog:
1089static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1259static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1090{ 1260{
1091 u32 status = vgic_get_interrupt_status(vcpu); 1261 u32 status = vgic_get_interrupt_status(vcpu);
1262 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1092 bool level_pending = false; 1263 bool level_pending = false;
1264 struct kvm *kvm = vcpu->kvm;
1093 1265
1094 kvm_debug("STATUS = %08x\n", status); 1266 kvm_debug("STATUS = %08x\n", status);
1095 1267
@@ -1106,6 +1278,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1106 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1278 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1107 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); 1279 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1108 1280
1281 spin_lock(&dist->lock);
1109 vgic_irq_clear_queued(vcpu, vlr.irq); 1282 vgic_irq_clear_queued(vcpu, vlr.irq);
1110 WARN_ON(vlr.state & LR_STATE_MASK); 1283 WARN_ON(vlr.state & LR_STATE_MASK);
1111 vlr.state = 0; 1284 vlr.state = 0;
@@ -1124,6 +1297,17 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1124 */ 1297 */
1125 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); 1298 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1126 1299
1300 /*
1301 * kvm_notify_acked_irq calls kvm_set_irq()
1302 * to reset the IRQ level. Need to release the
1303 * lock for kvm_set_irq to grab it.
1304 */
1305 spin_unlock(&dist->lock);
1306
1307 kvm_notify_acked_irq(kvm, 0,
1308 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1309 spin_lock(&dist->lock);
1310
1127 /* Any additional pending interrupt? */ 1311 /* Any additional pending interrupt? */
1128 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { 1312 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1129 vgic_cpu_irq_set(vcpu, vlr.irq); 1313 vgic_cpu_irq_set(vcpu, vlr.irq);
@@ -1133,6 +1317,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1133 vgic_cpu_irq_clear(vcpu, vlr.irq); 1317 vgic_cpu_irq_clear(vcpu, vlr.irq);
1134 } 1318 }
1135 1319
1320 spin_unlock(&dist->lock);
1321
1136 /* 1322 /*
1137 * Despite being EOIed, the LR may not have 1323 * Despite being EOIed, the LR may not have
1138 * been marked as empty. 1324 * been marked as empty.
@@ -1155,10 +1341,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1155 return level_pending; 1341 return level_pending;
1156} 1342}
1157 1343
1158/* 1344/* Sync back the VGIC state after a guest run */
1159 * Sync back the VGIC state after a guest run. The distributor lock is
1160 * needed so we don't get preempted in the middle of the state processing.
1161 */
1162static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1345static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1163{ 1346{
1164 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1347 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1205,14 +1388,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1205 1388
1206void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1389void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1207{ 1390{
1208 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1209
1210 if (!irqchip_in_kernel(vcpu->kvm)) 1391 if (!irqchip_in_kernel(vcpu->kvm))
1211 return; 1392 return;
1212 1393
1213 spin_lock(&dist->lock);
1214 __kvm_vgic_sync_hwstate(vcpu); 1394 __kvm_vgic_sync_hwstate(vcpu);
1215 spin_unlock(&dist->lock);
1216} 1395}
1217 1396
1218int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 1397int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
@@ -1225,6 +1404,17 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1225 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1404 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1226} 1405}
1227 1406
1407int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1408{
1409 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1410
1411 if (!irqchip_in_kernel(vcpu->kvm))
1412 return 0;
1413
1414 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1415}
1416
1417
1228void vgic_kick_vcpus(struct kvm *kvm) 1418void vgic_kick_vcpus(struct kvm *kvm)
1229{ 1419{
1230 struct kvm_vcpu *vcpu; 1420 struct kvm_vcpu *vcpu;
@@ -1397,8 +1587,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1397 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1587 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1398 1588
1399 kfree(vgic_cpu->pending_shared); 1589 kfree(vgic_cpu->pending_shared);
1590 kfree(vgic_cpu->active_shared);
1591 kfree(vgic_cpu->pend_act_shared);
1400 kfree(vgic_cpu->vgic_irq_lr_map); 1592 kfree(vgic_cpu->vgic_irq_lr_map);
1401 vgic_cpu->pending_shared = NULL; 1593 vgic_cpu->pending_shared = NULL;
1594 vgic_cpu->active_shared = NULL;
1595 vgic_cpu->pend_act_shared = NULL;
1402 vgic_cpu->vgic_irq_lr_map = NULL; 1596 vgic_cpu->vgic_irq_lr_map = NULL;
1403} 1597}
1404 1598
@@ -1408,9 +1602,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1408 1602
1409 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1603 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1410 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1604 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1605 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1606 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1411 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); 1607 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1412 1608
1413 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { 1609 if (!vgic_cpu->pending_shared
1610 || !vgic_cpu->active_shared
1611 || !vgic_cpu->pend_act_shared
1612 || !vgic_cpu->vgic_irq_lr_map) {
1414 kvm_vgic_vcpu_destroy(vcpu); 1613 kvm_vgic_vcpu_destroy(vcpu);
1415 return -ENOMEM; 1614 return -ENOMEM;
1416 } 1615 }
@@ -1463,10 +1662,12 @@ void kvm_vgic_destroy(struct kvm *kvm)
1463 kfree(dist->irq_spi_mpidr); 1662 kfree(dist->irq_spi_mpidr);
1464 kfree(dist->irq_spi_target); 1663 kfree(dist->irq_spi_target);
1465 kfree(dist->irq_pending_on_cpu); 1664 kfree(dist->irq_pending_on_cpu);
1665 kfree(dist->irq_active_on_cpu);
1466 dist->irq_sgi_sources = NULL; 1666 dist->irq_sgi_sources = NULL;
1467 dist->irq_spi_cpu = NULL; 1667 dist->irq_spi_cpu = NULL;
1468 dist->irq_spi_target = NULL; 1668 dist->irq_spi_target = NULL;
1469 dist->irq_pending_on_cpu = NULL; 1669 dist->irq_pending_on_cpu = NULL;
1670 dist->irq_active_on_cpu = NULL;
1470 dist->nr_cpus = 0; 1671 dist->nr_cpus = 0;
1471} 1672}
1472 1673
@@ -1502,6 +1703,7 @@ int vgic_init(struct kvm *kvm)
1502 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); 1703 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1503 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); 1704 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1504 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); 1705 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1706 ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
1505 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); 1707 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1506 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); 1708 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1507 1709
@@ -1514,10 +1716,13 @@ int vgic_init(struct kvm *kvm)
1514 GFP_KERNEL); 1716 GFP_KERNEL);
1515 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), 1717 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1516 GFP_KERNEL); 1718 GFP_KERNEL);
1719 dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1720 GFP_KERNEL);
1517 if (!dist->irq_sgi_sources || 1721 if (!dist->irq_sgi_sources ||
1518 !dist->irq_spi_cpu || 1722 !dist->irq_spi_cpu ||
1519 !dist->irq_spi_target || 1723 !dist->irq_spi_target ||
1520 !dist->irq_pending_on_cpu) { 1724 !dist->irq_pending_on_cpu ||
1725 !dist->irq_active_on_cpu) {
1521 ret = -ENOMEM; 1726 ret = -ENOMEM;
1522 goto out; 1727 goto out;
1523 } 1728 }
@@ -1845,12 +2050,9 @@ int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1845 return r; 2050 return r;
1846} 2051}
1847 2052
1848int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) 2053int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
1849{ 2054{
1850 struct kvm_exit_mmio dev_attr_mmio; 2055 if (vgic_find_range(ranges, 4, offset))
1851
1852 dev_attr_mmio.len = 4;
1853 if (vgic_find_range(ranges, &dev_attr_mmio, offset))
1854 return 0; 2056 return 0;
1855 else 2057 else
1856 return -ENXIO; 2058 return -ENXIO;
@@ -1883,8 +2085,10 @@ static struct notifier_block vgic_cpu_nb = {
1883}; 2085};
1884 2086
1885static const struct of_device_id vgic_ids[] = { 2087static const struct of_device_id vgic_ids[] = {
1886 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, 2088 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1887 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, 2089 { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
2090 { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
2091 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
1888 {}, 2092 {},
1889}; 2093};
1890 2094
@@ -1932,3 +2136,38 @@ out_free_irq:
1932 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); 2136 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1933 return ret; 2137 return ret;
1934} 2138}
2139
2140int kvm_irq_map_gsi(struct kvm *kvm,
2141 struct kvm_kernel_irq_routing_entry *entries,
2142 int gsi)
2143{
2144 return gsi;
2145}
2146
2147int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
2148{
2149 return pin;
2150}
2151
2152int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2153 u32 irq, int level, bool line_status)
2154{
2155 unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
2156
2157 trace_kvm_set_irq(irq, level, irq_source_id);
2158
2159 BUG_ON(!vgic_initialized(kvm));
2160
2161 if (spi > kvm->arch.vgic.nr_irqs)
2162 return -EINVAL;
2163 return kvm_vgic_inject_irq(kvm, 0, spi, level);
2164
2165}
2166
2167/* MSI not implemented yet */
2168int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
2169 struct kvm *kvm, int irq_source_id,
2170 int level, bool line_status)
2171{
2172 return 0;
2173}
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 1e83bdf5f499..0df74cbb6200 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -20,6 +20,8 @@
20#ifndef __KVM_VGIC_H__ 20#ifndef __KVM_VGIC_H__
21#define __KVM_VGIC_H__ 21#define __KVM_VGIC_H__
22 22
23#include <kvm/iodev.h>
24
23#define VGIC_ADDR_UNDEF (-1) 25#define VGIC_ADDR_UNDEF (-1)
24#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) 26#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
25 27
@@ -57,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
57bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); 59bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
58void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); 60void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
59 61
62struct kvm_exit_mmio {
63 phys_addr_t phys_addr;
64 void *data;
65 u32 len;
66 bool is_write;
67 void *private;
68};
69
60void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 70void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
61 phys_addr_t offset, int mode); 71 phys_addr_t offset, int mode);
62bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 72bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
@@ -74,7 +84,7 @@ void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
74 *((u32 *)mmio->data) = cpu_to_le32(value) & mask; 84 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
75} 85}
76 86
77struct kvm_mmio_range { 87struct vgic_io_range {
78 phys_addr_t base; 88 phys_addr_t base;
79 unsigned long len; 89 unsigned long len;
80 int bits_per_irq; 90 int bits_per_irq;
@@ -82,6 +92,11 @@ struct kvm_mmio_range {
82 phys_addr_t offset); 92 phys_addr_t offset);
83}; 93};
84 94
95int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
96 const struct vgic_io_range *ranges,
97 int redist_id,
98 struct vgic_io_device *iodev);
99
85static inline bool is_in_range(phys_addr_t addr, unsigned long len, 100static inline bool is_in_range(phys_addr_t addr, unsigned long len,
86 phys_addr_t baseaddr, unsigned long size) 101 phys_addr_t baseaddr, unsigned long size)
87{ 102{
@@ -89,14 +104,8 @@ static inline bool is_in_range(phys_addr_t addr, unsigned long len,
89} 104}
90 105
91const 106const
92struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, 107struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
93 struct kvm_exit_mmio *mmio, 108 int len, gpa_t offset);
94 phys_addr_t offset);
95
96bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
97 struct kvm_exit_mmio *mmio,
98 const struct kvm_mmio_range *ranges,
99 unsigned long mmio_base);
100 109
101bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 110bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
102 phys_addr_t offset, int vcpu_id, int access); 111 phys_addr_t offset, int vcpu_id, int access);
@@ -107,12 +116,20 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
107bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 116bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
108 phys_addr_t offset, int vcpu_id); 117 phys_addr_t offset, int vcpu_id);
109 118
119bool vgic_handle_set_active_reg(struct kvm *kvm,
120 struct kvm_exit_mmio *mmio,
121 phys_addr_t offset, int vcpu_id);
122
123bool vgic_handle_clear_active_reg(struct kvm *kvm,
124 struct kvm_exit_mmio *mmio,
125 phys_addr_t offset, int vcpu_id);
126
110bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 127bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
111 phys_addr_t offset); 128 phys_addr_t offset);
112 129
113void vgic_kick_vcpus(struct kvm *kvm); 130void vgic_kick_vcpus(struct kvm *kvm);
114 131
115int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset); 132int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset);
116int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); 133int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
117int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); 134int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
118 135
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 00d86427af0f..571c1ce37d15 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#include "iodev.h" 11#include <kvm/iodev.h>
12 12
13#include <linux/kvm_host.h> 13#include <linux/kvm_host.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
@@ -60,8 +60,9 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
60 return 1; 60 return 1;
61} 61}
62 62
63static int coalesced_mmio_write(struct kvm_io_device *this, 63static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 gpa_t addr, int len, const void *val) 64 struct kvm_io_device *this, gpa_t addr,
65 int len, const void *val)
65{ 66{
66 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 67 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
67 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 148b2392c762..9ff4193dfa49 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,7 +36,7 @@
36#include <linux/seqlock.h> 36#include <linux/seqlock.h>
37#include <trace/events/kvm.h> 37#include <trace/events/kvm.h>
38 38
39#include "iodev.h" 39#include <kvm/iodev.h>
40 40
41#ifdef CONFIG_HAVE_KVM_IRQFD 41#ifdef CONFIG_HAVE_KVM_IRQFD
42/* 42/*
@@ -311,6 +311,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
311 unsigned int events; 311 unsigned int events;
312 int idx; 312 int idx;
313 313
314 if (!kvm_arch_intc_initialized(kvm))
315 return -EAGAIN;
316
314 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); 317 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
315 if (!irqfd) 318 if (!irqfd)
316 return -ENOMEM; 319 return -ENOMEM;
@@ -712,8 +715,8 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
712 715
713/* MMIO/PIO writes trigger an event if the addr/val match */ 716/* MMIO/PIO writes trigger an event if the addr/val match */
714static int 717static int
715ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len, 718ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
716 const void *val) 719 int len, const void *val)
717{ 720{
718 struct _ioeventfd *p = to_ioeventfd(this); 721 struct _ioeventfd *p = to_ioeventfd(this);
719 722
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ce7888a15128..efe59ae64dc3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -16,7 +16,7 @@
16 * 16 *
17 */ 17 */
18 18
19#include "iodev.h" 19#include <kvm/iodev.h>
20 20
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/kvm.h> 22#include <linux/kvm.h>
@@ -2994,7 +2994,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
2994 return off; 2994 return off;
2995} 2995}
2996 2996
2997static int __kvm_io_bus_write(struct kvm_io_bus *bus, 2997static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
2998 struct kvm_io_range *range, const void *val) 2998 struct kvm_io_range *range, const void *val)
2999{ 2999{
3000 int idx; 3000 int idx;
@@ -3005,7 +3005,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
3005 3005
3006 while (idx < bus->dev_count && 3006 while (idx < bus->dev_count &&
3007 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3007 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3008 if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, 3008 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
3009 range->len, val)) 3009 range->len, val))
3010 return idx; 3010 return idx;
3011 idx++; 3011 idx++;
@@ -3015,7 +3015,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
3015} 3015}
3016 3016
3017/* kvm_io_bus_write - called under kvm->slots_lock */ 3017/* kvm_io_bus_write - called under kvm->slots_lock */
3018int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3018int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3019 int len, const void *val) 3019 int len, const void *val)
3020{ 3020{
3021 struct kvm_io_bus *bus; 3021 struct kvm_io_bus *bus;
@@ -3027,14 +3027,14 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3027 .len = len, 3027 .len = len,
3028 }; 3028 };
3029 3029
3030 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3030 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3031 r = __kvm_io_bus_write(bus, &range, val); 3031 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3032 return r < 0 ? r : 0; 3032 return r < 0 ? r : 0;
3033} 3033}
3034 3034
3035/* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3035/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3036int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3036int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3037 int len, const void *val, long cookie) 3037 gpa_t addr, int len, const void *val, long cookie)
3038{ 3038{
3039 struct kvm_io_bus *bus; 3039 struct kvm_io_bus *bus;
3040 struct kvm_io_range range; 3040 struct kvm_io_range range;
@@ -3044,12 +3044,12 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3044 .len = len, 3044 .len = len,
3045 }; 3045 };
3046 3046
3047 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3047 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3048 3048
3049 /* First try the device referenced by cookie. */ 3049 /* First try the device referenced by cookie. */
3050 if ((cookie >= 0) && (cookie < bus->dev_count) && 3050 if ((cookie >= 0) && (cookie < bus->dev_count) &&
3051 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3051 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3052 if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, 3052 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
3053 val)) 3053 val))
3054 return cookie; 3054 return cookie;
3055 3055
@@ -3057,11 +3057,11 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3057 * cookie contained garbage; fall back to search and return the 3057 * cookie contained garbage; fall back to search and return the
3058 * correct cookie value. 3058 * correct cookie value.
3059 */ 3059 */
3060 return __kvm_io_bus_write(bus, &range, val); 3060 return __kvm_io_bus_write(vcpu, bus, &range, val);
3061} 3061}
3062 3062
3063static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, 3063static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3064 void *val) 3064 struct kvm_io_range *range, void *val)
3065{ 3065{
3066 int idx; 3066 int idx;
3067 3067
@@ -3071,7 +3071,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
3071 3071
3072 while (idx < bus->dev_count && 3072 while (idx < bus->dev_count &&
3073 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3073 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3074 if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, 3074 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
3075 range->len, val)) 3075 range->len, val))
3076 return idx; 3076 return idx;
3077 idx++; 3077 idx++;
@@ -3082,7 +3082,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
3082EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3082EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3083 3083
3084/* kvm_io_bus_read - called under kvm->slots_lock */ 3084/* kvm_io_bus_read - called under kvm->slots_lock */
3085int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3085int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3086 int len, void *val) 3086 int len, void *val)
3087{ 3087{
3088 struct kvm_io_bus *bus; 3088 struct kvm_io_bus *bus;
@@ -3094,8 +3094,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3094 .len = len, 3094 .len = len,
3095 }; 3095 };
3096 3096
3097 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3097 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3098 r = __kvm_io_bus_read(bus, &range, val); 3098 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3099 return r < 0 ? r : 0; 3099 return r < 0 ? r : 0;
3100} 3100}
3101 3101