aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:47:45 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:47:45 -0400
commit5d5768660539b6d0da0d46113ffb0676540579a6 (patch)
treec0cd7a918fc7371c5f5b1b9b04c6358966850277
parent5167d09ffad5b16b574d35ce3047ed34caf1e837 (diff)
parentdedf97e8ff2c7513b1370e36b56e08b6bd0f0290 (diff)
Merge tag 'kvm-arm-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm
KVM/ARM New features for 3.17 include: - Fixes and code refactoring for stage2 kvm MMU unmap_range - Support unmapping IPAs on deleting memslots for arm and arm64 - Support MMIO mappings in stage2 faults - KVM VGIC v2 emulation on GICv3 hardware - Big-Endian support for arm/arm64 (guest and host) - Debug Architecture support for arm64 (arm32 is on Christoffer's todo list) Conflicts: virt/kvm/arm/vgic.c [last minute cherry-pick from 3.17 to 3.16]
-rw-r--r--Documentation/arm64/booting.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt79
-rw-r--r--arch/arm/include/asm/kvm_asm.h18
-rw-r--r--arch/arm/include/asm/kvm_emulate.h22
-rw-r--r--arch/arm/include/asm/kvm_host.h8
-rw-r--r--arch/arm/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm/kernel/asm-offsets.c14
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kvm/Kconfig2
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/kvm/arm.c37
-rw-r--r--arch/arm/kvm/coproc.c88
-rw-r--r--arch/arm/kvm/guest.c10
-rw-r--r--arch/arm/kvm/init.S4
-rw-r--r--arch/arm/kvm/interrupts.S9
-rw-r--r--arch/arm/kvm/interrupts_head.S48
-rw-r--r--arch/arm/kvm/mmu.c214
-rw-r--r--arch/arm64/include/asm/debug-monitors.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h5
-rw-r--r--arch/arm64/include/asm/kvm_asm.h53
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h22
-rw-r--r--arch/arm64/include/asm/kvm_host.h48
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h15
-rw-r--r--arch/arm64/include/asm/virt.h4
-rw-r--r--arch/arm64/kernel/asm-offsets.c26
-rw-r--r--arch/arm64/kernel/debug-monitors.c9
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/guest.c68
-rw-r--r--arch/arm64/kvm/handle_exit.c4
-rw-r--r--arch/arm64/kvm/hyp.S600
-rw-r--r--arch/arm64/kvm/sys_regs.c546
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S133
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S267
-rw-r--r--include/kvm/arm_arch_timer.h14
-rw-r--r--include/kvm/arm_vgic.h115
-rw-r--r--virt/kvm/arm/vgic-v2.c265
-rw-r--r--virt/kvm/arm/vgic-v3.c247
-rw-r--r--virt/kvm/arm/vgic.c389
39 files changed, 2858 insertions, 576 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 85af34d55cee..f3c05b5f9f08 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -168,6 +168,14 @@ Before jumping into the kernel, the following conditions must be met:
168 the kernel image will be entered must be initialised by software at a 168 the kernel image will be entered must be initialised by software at a
169 higher exception level to prevent execution in an UNKNOWN state. 169 higher exception level to prevent execution in an UNKNOWN state.
170 170
171 For systems with a GICv3 interrupt controller:
172 - If EL3 is present:
173 ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
174 ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1.
175 - If the kernel is entered at EL1:
176 ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1
177 ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1.
178
171The requirements described above for CPU mode, caches, MMUs, architected 179The requirements described above for CPU mode, caches, MMUs, architected
172timers, coherency and system registers apply to all CPUs. All CPUs must 180timers, coherency and system registers apply to all CPUs. All CPUs must
173enter the kernel in the same exception level. 181enter the kernel in the same exception level.
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
new file mode 100644
index 000000000000..33cd05e6c125
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -0,0 +1,79 @@
1* ARM Generic Interrupt Controller, version 3
2
3AArch64 SMP cores are often associated with a GICv3, providing Private
4Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
5Software Generated Interrupts (SGI), and Locality-specific Peripheral
6Interrupts (LPI).
7
8Main node required properties:
9
10- compatible : should at least contain "arm,gic-v3".
11- interrupt-controller : Identifies the node as an interrupt controller
12- #interrupt-cells : Specifies the number of cells needed to encode an
13 interrupt source. Must be a single cell with a value of at least 3.
14
15 The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
16 interrupts. Other values are reserved for future use.
17
18 The 2nd cell contains the interrupt number for the interrupt type.
19 SPI interrupts are in the range [0-987]. PPI interrupts are in the
20 range [0-15].
21
22 The 3rd cell is the flags, encoded as follows:
23 bits[3:0] trigger type and level flags.
24 1 = edge triggered
25 4 = level triggered
26
27 Cells 4 and beyond are reserved for future use. When the 1st cell
28 has a value of 0 or 1, cells 4 and beyond act as padding, and may be
29 ignored. It is recommended that padding cells have a value of 0.
30
31- reg : Specifies base physical address(s) and size of the GIC
32 registers, in the following order:
33 - GIC Distributor interface (GICD)
34 - GIC Redistributors (GICR), one range per redistributor region
35 - GIC CPU interface (GICC)
36 - GIC Hypervisor interface (GICH)
37 - GIC Virtual CPU interface (GICV)
38
39 GICC, GICH and GICV are optional.
40
41- interrupts : Interrupt source of the VGIC maintenance interrupt.
42
43Optional
44
45- redistributor-stride : If using padding pages, specifies the stride
46 of consecutive redistributors. Must be a multiple of 64kB.
47
48- #redistributor-regions: The number of independent contiguous regions
49 occupied by the redistributors. Required if more than one such
50 region is present.
51
52Examples:
53
54 gic: interrupt-controller@2cf00000 {
55 compatible = "arm,gic-v3";
56 #interrupt-cells = <3>;
57 interrupt-controller;
58 reg = <0x0 0x2f000000 0 0x10000>, // GICD
59 <0x0 0x2f100000 0 0x200000>, // GICR
60 <0x0 0x2c000000 0 0x2000>, // GICC
61 <0x0 0x2c010000 0 0x2000>, // GICH
62 <0x0 0x2c020000 0 0x2000>; // GICV
63 interrupts = <1 9 4>;
64 };
65
66 gic: interrupt-controller@2c010000 {
67 compatible = "arm,gic-v3";
68 #interrupt-cells = <3>;
69 interrupt-controller;
70 redistributor-stride = <0x0 0x40000>; // 256kB stride
71 #redistributor-regions = <2>;
72 reg = <0x0 0x2c010000 0 0x10000>, // GICD
73 <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
74 <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
75 <0x0 0x2c040000 0 0x2000>, // GICC
76 <0x0 0x2c060000 0 0x2000>, // GICH
77 <0x0 0x2c080000 0 0x2000>; // GICV
78 interrupts = <1 9 4>;
79 };
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 53b3c4a50d5c..3a67bec72d0c 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -61,6 +61,24 @@
61#define ARM_EXCEPTION_FIQ 6 61#define ARM_EXCEPTION_FIQ 6
62#define ARM_EXCEPTION_HVC 7 62#define ARM_EXCEPTION_HVC 7
63 63
64/*
65 * The rr_lo_hi macro swaps a pair of registers depending on
66 * current endianness. It is used in conjunction with ldrd and strd
67 * instructions that load/store a 64-bit value from/to memory to/from
68 * a pair of registers which are used with the mrrc and mcrr instructions.
69 * If used with the ldrd/strd instructions, the a1 parameter is the first
70 * source/destination register and the a2 parameter is the second
71 * source/destination register. Note that the ldrd/strd instructions
72 * already swap the bytes within the words correctly according to the
73 * endianness setting, but the order of the registers need to be effectively
74 * swapped when used with the mrrc/mcrr instructions.
75 */
76#ifdef CONFIG_CPU_ENDIAN_BE8
77#define rr_lo_hi(a1, a2) a2, a1
78#else
79#define rr_lo_hi(a1, a2) a1, a2
80#endif
81
64#ifndef __ASSEMBLY__ 82#ifndef __ASSEMBLY__
65struct kvm; 83struct kvm;
66struct kvm_vcpu; 84struct kvm_vcpu;
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 0fa90c962ac8..69b746955fca 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -185,9 +185,16 @@ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
185 default: 185 default:
186 return be32_to_cpu(data); 186 return be32_to_cpu(data);
187 } 187 }
188 } else {
189 switch (len) {
190 case 1:
191 return data & 0xff;
192 case 2:
193 return le16_to_cpu(data & 0xffff);
194 default:
195 return le32_to_cpu(data);
196 }
188 } 197 }
189
190 return data; /* Leave LE untouched */
191} 198}
192 199
193static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 200static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
@@ -203,9 +210,16 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
203 default: 210 default:
204 return cpu_to_be32(data); 211 return cpu_to_be32(data);
205 } 212 }
213 } else {
214 switch (len) {
215 case 1:
216 return data & 0xff;
217 case 2:
218 return cpu_to_le16(data & 0xffff);
219 default:
220 return cpu_to_le32(data);
221 }
206 } 222 }
207
208 return data; /* Leave LE untouched */
209} 223}
210 224
211#endif /* __ARM_KVM_EMULATE_H__ */ 225#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 193ceaf01bfd..6dfb404f6c46 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -225,10 +225,12 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
225 return 0; 225 return 0;
226} 226}
227 227
228static inline void vgic_arch_setup(const struct vgic_params *vgic)
229{
230 BUG_ON(vgic->type != VGIC_V2);
231}
232
228int kvm_perf_init(void); 233int kvm_perf_init(void);
229int kvm_perf_teardown(void); 234int kvm_perf_teardown(void);
230 235
231u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
232int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
233
234#endif /* __ARM_KVM_HOST_H__ */ 236#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 5c7aa3c1519f..5cc0b0f5f72f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -127,6 +127,18 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
127 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 127 (__boundary - 1 < (end) - 1)? __boundary: (end); \
128}) 128})
129 129
130static inline bool kvm_page_empty(void *ptr)
131{
132 struct page *ptr_page = virt_to_page(ptr);
133 return page_count(ptr_page) == 1;
134}
135
136
137#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
138#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
139#define kvm_pud_table_empty(pudp) (0)
140
141
130struct kvm; 142struct kvm;
131 143
132#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) 144#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 85598b5d1efd..713e807621d2 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -182,13 +182,13 @@ int main(void)
182 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); 182 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
183#ifdef CONFIG_KVM_ARM_VGIC 183#ifdef CONFIG_KVM_ARM_VGIC
184 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 184 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
185 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 185 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
186 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); 186 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
187 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); 187 DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
188 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); 188 DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
189 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); 189 DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
190 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); 190 DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
191 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); 191 DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
192 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); 192 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
193#ifdef CONFIG_KVM_ARM_TIMER 193#ifdef CONFIG_KVM_ARM_TIMER
194 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); 194 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 797b1a6a4906..7e666cfda634 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -134,9 +134,7 @@ ENTRY(__hyp_stub_install_secondary)
134 mcr p15, 4, r7, c1, c1, 3 @ HSTR 134 mcr p15, 4, r7, c1, c1, 3 @ HSTR
135 135
136THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE 136THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
137#ifdef CONFIG_CPU_BIG_ENDIAN 137ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
138 orr r7, #(1 << 9) @ HSCTLR.EE
139#endif
140 mcr p15, 4, r7, c1, c0, 0 @ HSCTLR 138 mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
141 139
142 mrc p15, 4, r7, c1, c1, 1 @ HDCR 140 mrc p15, 4, r7, c1, c1, 1 @ HDCR
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 4be5bb150bdd..466bd299b1a8 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,7 +23,7 @@ config KVM
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 23 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 24 select KVM_MMIO
25 select KVM_ARM_HOST 25 select KVM_ARM_HOST
26 depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN 26 depends on ARM_VIRT_EXT && ARM_LPAE
27 ---help--- 27 ---help---
28 Support hosting virtualized guest machines. You will also 28 Support hosting virtualized guest machines. You will also
29 need to select one or more of the processor modules below. 29 need to select one or more of the processor modules below.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 789bca9e64a7..f7057ed045b6 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o
21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o 22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
23obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 23obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
24obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
24obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o 25obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 3c82b37c0f9e..d7424ef80354 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
155 return VM_FAULT_SIGBUS; 155 return VM_FAULT_SIGBUS;
156} 156}
157 157
158void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
159 struct kvm_memory_slot *dont)
160{
161}
162
163int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
164 unsigned long npages)
165{
166 return 0;
167}
168 158
169/** 159/**
170 * kvm_arch_destroy_vm - destroy the VM data structure 160 * kvm_arch_destroy_vm - destroy the VM data structure
@@ -225,33 +215,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
225 return -EINVAL; 215 return -EINVAL;
226} 216}
227 217
228void kvm_arch_memslots_updated(struct kvm *kvm)
229{
230}
231
232int kvm_arch_prepare_memory_region(struct kvm *kvm,
233 struct kvm_memory_slot *memslot,
234 struct kvm_userspace_memory_region *mem,
235 enum kvm_mr_change change)
236{
237 return 0;
238}
239
240void kvm_arch_commit_memory_region(struct kvm *kvm,
241 struct kvm_userspace_memory_region *mem,
242 const struct kvm_memory_slot *old,
243 enum kvm_mr_change change)
244{
245}
246
247void kvm_arch_flush_shadow_all(struct kvm *kvm)
248{
249}
250
251void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
252 struct kvm_memory_slot *slot)
253{
254}
255 218
256struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 219struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
257{ 220{
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index c58a35116f63..37a0fe1bb9bb 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -44,6 +44,31 @@ static u32 cache_levels;
44/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 44/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
45#define CSSELR_MAX 12 45#define CSSELR_MAX 12
46 46
47/*
48 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
49 * of cp15 registers can be viewed either as couple of two u32 registers
50 * or one u64 register. Current u64 register encoding is that least
51 * significant u32 word is followed by most significant u32 word.
52 */
53static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
54 const struct coproc_reg *r,
55 u64 val)
56{
57 vcpu->arch.cp15[r->reg] = val & 0xffffffff;
58 vcpu->arch.cp15[r->reg + 1] = val >> 32;
59}
60
61static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
62 const struct coproc_reg *r)
63{
64 u64 val;
65
66 val = vcpu->arch.cp15[r->reg + 1];
67 val = val << 32;
68 val = val | vcpu->arch.cp15[r->reg];
69 return val;
70}
71
47int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) 72int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
48{ 73{
49 kvm_inject_undefined(vcpu); 74 kvm_inject_undefined(vcpu);
@@ -682,17 +707,23 @@ static struct coproc_reg invariant_cp15[] = {
682 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, 707 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
683}; 708};
684 709
710/*
711 * Reads a register value from a userspace address to a kernel
712 * variable. Make sure that register size matches sizeof(*__val).
713 */
685static int reg_from_user(void *val, const void __user *uaddr, u64 id) 714static int reg_from_user(void *val, const void __user *uaddr, u64 id)
686{ 715{
687 /* This Just Works because we are little endian. */
688 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 716 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
689 return -EFAULT; 717 return -EFAULT;
690 return 0; 718 return 0;
691} 719}
692 720
721/*
722 * Writes a register value to a userspace address from a kernel variable.
723 * Make sure that register size matches sizeof(*__val).
724 */
693static int reg_to_user(void __user *uaddr, const void *val, u64 id) 725static int reg_to_user(void __user *uaddr, const void *val, u64 id)
694{ 726{
695 /* This Just Works because we are little endian. */
696 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 727 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
697 return -EFAULT; 728 return -EFAULT;
698 return 0; 729 return 0;
@@ -702,6 +733,7 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
702{ 733{
703 struct coproc_params params; 734 struct coproc_params params;
704 const struct coproc_reg *r; 735 const struct coproc_reg *r;
736 int ret;
705 737
706 if (!index_to_params(id, &params)) 738 if (!index_to_params(id, &params))
707 return -ENOENT; 739 return -ENOENT;
@@ -710,7 +742,15 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
710 if (!r) 742 if (!r)
711 return -ENOENT; 743 return -ENOENT;
712 744
713 return reg_to_user(uaddr, &r->val, id); 745 ret = -ENOENT;
746 if (KVM_REG_SIZE(id) == 4) {
747 u32 val = r->val;
748
749 ret = reg_to_user(uaddr, &val, id);
750 } else if (KVM_REG_SIZE(id) == 8) {
751 ret = reg_to_user(uaddr, &r->val, id);
752 }
753 return ret;
714} 754}
715 755
716static int set_invariant_cp15(u64 id, void __user *uaddr) 756static int set_invariant_cp15(u64 id, void __user *uaddr)
@@ -718,7 +758,7 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
718 struct coproc_params params; 758 struct coproc_params params;
719 const struct coproc_reg *r; 759 const struct coproc_reg *r;
720 int err; 760 int err;
721 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 761 u64 val;
722 762
723 if (!index_to_params(id, &params)) 763 if (!index_to_params(id, &params))
724 return -ENOENT; 764 return -ENOENT;
@@ -726,7 +766,16 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
726 if (!r) 766 if (!r)
727 return -ENOENT; 767 return -ENOENT;
728 768
729 err = reg_from_user(&val, uaddr, id); 769 err = -ENOENT;
770 if (KVM_REG_SIZE(id) == 4) {
771 u32 val32;
772
773 err = reg_from_user(&val32, uaddr, id);
774 if (!err)
775 val = val32;
776 } else if (KVM_REG_SIZE(id) == 8) {
777 err = reg_from_user(&val, uaddr, id);
778 }
730 if (err) 779 if (err)
731 return err; 780 return err;
732 781
@@ -1004,6 +1053,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1004{ 1053{
1005 const struct coproc_reg *r; 1054 const struct coproc_reg *r;
1006 void __user *uaddr = (void __user *)(long)reg->addr; 1055 void __user *uaddr = (void __user *)(long)reg->addr;
1056 int ret;
1007 1057
1008 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1058 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1009 return demux_c15_get(reg->id, uaddr); 1059 return demux_c15_get(reg->id, uaddr);
@@ -1015,14 +1065,24 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1015 if (!r) 1065 if (!r)
1016 return get_invariant_cp15(reg->id, uaddr); 1066 return get_invariant_cp15(reg->id, uaddr);
1017 1067
1018 /* Note: copies two regs if size is 64 bit. */ 1068 ret = -ENOENT;
1019 return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 1069 if (KVM_REG_SIZE(reg->id) == 8) {
1070 u64 val;
1071
1072 val = vcpu_cp15_reg64_get(vcpu, r);
1073 ret = reg_to_user(uaddr, &val, reg->id);
1074 } else if (KVM_REG_SIZE(reg->id) == 4) {
1075 ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
1076 }
1077
1078 return ret;
1020} 1079}
1021 1080
1022int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1081int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1023{ 1082{
1024 const struct coproc_reg *r; 1083 const struct coproc_reg *r;
1025 void __user *uaddr = (void __user *)(long)reg->addr; 1084 void __user *uaddr = (void __user *)(long)reg->addr;
1085 int ret;
1026 1086
1027 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1087 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1028 return demux_c15_set(reg->id, uaddr); 1088 return demux_c15_set(reg->id, uaddr);
@@ -1034,8 +1094,18 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1034 if (!r) 1094 if (!r)
1035 return set_invariant_cp15(reg->id, uaddr); 1095 return set_invariant_cp15(reg->id, uaddr);
1036 1096
1037 /* Note: copies two regs if size is 64 bit */ 1097 ret = -ENOENT;
1038 return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 1098 if (KVM_REG_SIZE(reg->id) == 8) {
1099 u64 val;
1100
1101 ret = reg_from_user(&val, uaddr, reg->id);
1102 if (!ret)
1103 vcpu_cp15_reg64_set(vcpu, r, val);
1104 } else if (KVM_REG_SIZE(reg->id) == 4) {
1105 ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
1106 }
1107
1108 return ret;
1039} 1109}
1040 1110
1041static unsigned int num_demux_regs(void) 1111static unsigned int num_demux_regs(void)
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index b23a59c1c522..986e625b5dbd 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -124,16 +124,6 @@ static bool is_timer_reg(u64 index)
124 return false; 124 return false;
125} 125}
126 126
127int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
128{
129 return 0;
130}
131
132u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
133{
134 return 0;
135}
136
137#else 127#else
138 128
139#define NUM_TIMER_REGS 3 129#define NUM_TIMER_REGS 3
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 1b9844d369cc..2cc14dfad049 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -71,7 +71,7 @@ __do_hyp_init:
71 bne phase2 @ Yes, second stage init 71 bne phase2 @ Yes, second stage init
72 72
73 @ Set the HTTBR to point to the hypervisor PGD pointer passed 73 @ Set the HTTBR to point to the hypervisor PGD pointer passed
74 mcrr p15, 4, r2, r3, c2 74 mcrr p15, 4, rr_lo_hi(r2, r3), c2
75 75
76 @ Set the HTCR and VTCR to the same shareability and cacheability 76 @ Set the HTCR and VTCR to the same shareability and cacheability
77 @ settings as the non-secure TTBCR and with T0SZ == 0. 77 @ settings as the non-secure TTBCR and with T0SZ == 0.
@@ -137,7 +137,7 @@ phase2:
137 mov pc, r0 137 mov pc, r0
138 138
139target: @ We're now in the trampoline code, switch page tables 139target: @ We're now in the trampoline code, switch page tables
140 mcrr p15, 4, r2, r3, c2 140 mcrr p15, 4, rr_lo_hi(r2, r3), c2
141 isb 141 isb
142 142
143 @ Invalidate the old TLBs 143 @ Invalidate the old TLBs
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 0d68d4073068..01dcb0e752d9 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -52,7 +52,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
52 dsb ishst 52 dsb ishst
53 add r0, r0, #KVM_VTTBR 53 add r0, r0, #KVM_VTTBR
54 ldrd r2, r3, [r0] 54 ldrd r2, r3, [r0]
55 mcrr p15, 6, r2, r3, c2 @ Write VTTBR 55 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
56 isb 56 isb
57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) 57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
58 dsb ish 58 dsb ish
@@ -135,7 +135,7 @@ ENTRY(__kvm_vcpu_run)
135 ldr r1, [vcpu, #VCPU_KVM] 135 ldr r1, [vcpu, #VCPU_KVM]
136 add r1, r1, #KVM_VTTBR 136 add r1, r1, #KVM_VTTBR
137 ldrd r2, r3, [r1] 137 ldrd r2, r3, [r1]
138 mcrr p15, 6, r2, r3, c2 @ Write VTTBR 138 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
139 139
140 @ We're all done, just restore the GPRs and go to the guest 140 @ We're all done, just restore the GPRs and go to the guest
141 restore_guest_regs 141 restore_guest_regs
@@ -199,8 +199,13 @@ after_vfp_restore:
199 199
200 restore_host_regs 200 restore_host_regs
201 clrex @ Clear exclusive monitor 201 clrex @ Clear exclusive monitor
202#ifndef CONFIG_CPU_ENDIAN_BE8
202 mov r0, r1 @ Return the return code 203 mov r0, r1 @ Return the return code
203 mov r1, #0 @ Clear upper bits in return value 204 mov r1, #0 @ Clear upper bits in return value
205#else
206 @ r1 already has return code
207 mov r0, #0 @ Clear upper bits in return value
208#endif /* CONFIG_CPU_ENDIAN_BE8 */
204 bx lr @ return to IOCTL 209 bx lr @ return to IOCTL
205 210
206/******************************************************************** 211/********************************************************************
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 76af93025574..98c8c5b9a87f 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -1,4 +1,5 @@
1#include <linux/irqchip/arm-gic.h> 1#include <linux/irqchip/arm-gic.h>
2#include <asm/assembler.h>
2 3
3#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) 4#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
4#define VCPU_USR_SP (VCPU_USR_REG(13)) 5#define VCPU_USR_SP (VCPU_USR_REG(13))
@@ -420,15 +421,23 @@ vcpu .req r0 @ vcpu pointer always in r0
420 ldr r8, [r2, #GICH_ELRSR0] 421 ldr r8, [r2, #GICH_ELRSR0]
421 ldr r9, [r2, #GICH_ELRSR1] 422 ldr r9, [r2, #GICH_ELRSR1]
422 ldr r10, [r2, #GICH_APR] 423 ldr r10, [r2, #GICH_APR]
423 424ARM_BE8(rev r3, r3 )
424 str r3, [r11, #VGIC_CPU_HCR] 425ARM_BE8(rev r4, r4 )
425 str r4, [r11, #VGIC_CPU_VMCR] 426ARM_BE8(rev r5, r5 )
426 str r5, [r11, #VGIC_CPU_MISR] 427ARM_BE8(rev r6, r6 )
427 str r6, [r11, #VGIC_CPU_EISR] 428ARM_BE8(rev r7, r7 )
428 str r7, [r11, #(VGIC_CPU_EISR + 4)] 429ARM_BE8(rev r8, r8 )
429 str r8, [r11, #VGIC_CPU_ELRSR] 430ARM_BE8(rev r9, r9 )
430 str r9, [r11, #(VGIC_CPU_ELRSR + 4)] 431ARM_BE8(rev r10, r10 )
431 str r10, [r11, #VGIC_CPU_APR] 432
433 str r3, [r11, #VGIC_V2_CPU_HCR]
434 str r4, [r11, #VGIC_V2_CPU_VMCR]
435 str r5, [r11, #VGIC_V2_CPU_MISR]
436 str r6, [r11, #VGIC_V2_CPU_EISR]
437 str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
438 str r8, [r11, #VGIC_V2_CPU_ELRSR]
439 str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
440 str r10, [r11, #VGIC_V2_CPU_APR]
432 441
433 /* Clear GICH_HCR */ 442 /* Clear GICH_HCR */
434 mov r5, #0 443 mov r5, #0
@@ -436,9 +445,10 @@ vcpu .req r0 @ vcpu pointer always in r0
436 445
437 /* Save list registers */ 446 /* Save list registers */
438 add r2, r2, #GICH_LR0 447 add r2, r2, #GICH_LR0
439 add r3, r11, #VGIC_CPU_LR 448 add r3, r11, #VGIC_V2_CPU_LR
440 ldr r4, [r11, #VGIC_CPU_NR_LR] 449 ldr r4, [r11, #VGIC_CPU_NR_LR]
4411: ldr r6, [r2], #4 4501: ldr r6, [r2], #4
451ARM_BE8(rev r6, r6 )
442 str r6, [r3], #4 452 str r6, [r3], #4
443 subs r4, r4, #1 453 subs r4, r4, #1
444 bne 1b 454 bne 1b
@@ -463,9 +473,12 @@ vcpu .req r0 @ vcpu pointer always in r0
463 add r11, vcpu, #VCPU_VGIC_CPU 473 add r11, vcpu, #VCPU_VGIC_CPU
464 474
465 /* We only restore a minimal set of registers */ 475 /* We only restore a minimal set of registers */
466 ldr r3, [r11, #VGIC_CPU_HCR] 476 ldr r3, [r11, #VGIC_V2_CPU_HCR]
467 ldr r4, [r11, #VGIC_CPU_VMCR] 477 ldr r4, [r11, #VGIC_V2_CPU_VMCR]
468 ldr r8, [r11, #VGIC_CPU_APR] 478 ldr r8, [r11, #VGIC_V2_CPU_APR]
479ARM_BE8(rev r3, r3 )
480ARM_BE8(rev r4, r4 )
481ARM_BE8(rev r8, r8 )
469 482
470 str r3, [r2, #GICH_HCR] 483 str r3, [r2, #GICH_HCR]
471 str r4, [r2, #GICH_VMCR] 484 str r4, [r2, #GICH_VMCR]
@@ -473,9 +486,10 @@ vcpu .req r0 @ vcpu pointer always in r0
473 486
474 /* Restore list registers */ 487 /* Restore list registers */
475 add r2, r2, #GICH_LR0 488 add r2, r2, #GICH_LR0
476 add r3, r11, #VGIC_CPU_LR 489 add r3, r11, #VGIC_V2_CPU_LR
477 ldr r4, [r11, #VGIC_CPU_NR_LR] 490 ldr r4, [r11, #VGIC_CPU_NR_LR]
4781: ldr r6, [r3], #4 4911: ldr r6, [r3], #4
492ARM_BE8(rev r6, r6 )
479 str r6, [r2], #4 493 str r6, [r2], #4
480 subs r4, r4, #1 494 subs r4, r4, #1
481 bne 1b 495 bne 1b
@@ -506,7 +520,7 @@ vcpu .req r0 @ vcpu pointer always in r0
506 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL 520 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
507 isb 521 isb
508 522
509 mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL 523 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
510 ldr r4, =VCPU_TIMER_CNTV_CVAL 524 ldr r4, =VCPU_TIMER_CNTV_CVAL
511 add r5, vcpu, r4 525 add r5, vcpu, r4
512 strd r2, r3, [r5] 526 strd r2, r3, [r5]
@@ -546,12 +560,12 @@ vcpu .req r0 @ vcpu pointer always in r0
546 560
547 ldr r2, [r4, #KVM_TIMER_CNTVOFF] 561 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
548 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] 562 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
549 mcrr p15, 4, r2, r3, c14 @ CNTVOFF 563 mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
550 564
551 ldr r4, =VCPU_TIMER_CNTV_CVAL 565 ldr r4, =VCPU_TIMER_CNTV_CVAL
552 add r5, vcpu, r4 566 add r5, vcpu, r4
553 ldrd r2, r3, [r5] 567 ldrd r2, r3, [r5]
554 mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL 568 mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
555 isb 569 isb
556 570
557 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL] 571 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 16f804938b8f..16e7994bf347 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -90,104 +90,115 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
90 return p; 90 return p;
91} 91}
92 92
93static bool page_empty(void *ptr) 93static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
94{ 94{
95 struct page *ptr_page = virt_to_page(ptr); 95 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96 return page_count(ptr_page) == 1; 96 pgd_clear(pgd);
97 kvm_tlb_flush_vmid_ipa(kvm, addr);
98 pud_free(NULL, pud_table);
99 put_page(virt_to_page(pgd));
97} 100}
98 101
99static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 102static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
100{ 103{
101 if (pud_huge(*pud)) { 104 pmd_t *pmd_table = pmd_offset(pud, 0);
102 pud_clear(pud); 105 VM_BUG_ON(pud_huge(*pud));
103 kvm_tlb_flush_vmid_ipa(kvm, addr); 106 pud_clear(pud);
104 } else { 107 kvm_tlb_flush_vmid_ipa(kvm, addr);
105 pmd_t *pmd_table = pmd_offset(pud, 0); 108 pmd_free(NULL, pmd_table);
106 pud_clear(pud);
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pmd_free(NULL, pmd_table);
109 }
110 put_page(virt_to_page(pud)); 109 put_page(virt_to_page(pud));
111} 110}
112 111
113static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 112static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
114{ 113{
115 if (kvm_pmd_huge(*pmd)) { 114 pte_t *pte_table = pte_offset_kernel(pmd, 0);
116 pmd_clear(pmd); 115 VM_BUG_ON(kvm_pmd_huge(*pmd));
117 kvm_tlb_flush_vmid_ipa(kvm, addr); 116 pmd_clear(pmd);
118 } else { 117 kvm_tlb_flush_vmid_ipa(kvm, addr);
119 pte_t *pte_table = pte_offset_kernel(pmd, 0); 118 pte_free_kernel(NULL, pte_table);
120 pmd_clear(pmd);
121 kvm_tlb_flush_vmid_ipa(kvm, addr);
122 pte_free_kernel(NULL, pte_table);
123 }
124 put_page(virt_to_page(pmd)); 119 put_page(virt_to_page(pmd));
125} 120}
126 121
127static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end)
128{ 124{
129 if (pte_present(*pte)) { 125 phys_addr_t start_addr = addr;
130 kvm_set_pte(pte, __pte(0)); 126 pte_t *pte, *start_pte;
131 put_page(virt_to_page(pte)); 127
132 kvm_tlb_flush_vmid_ipa(kvm, addr); 128 start_pte = pte = pte_offset_kernel(pmd, addr);
133 } 129 do {
130 if (!pte_none(*pte)) {
131 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 }
135 } while (pte++, addr += PAGE_SIZE, addr != end);
136
137 if (kvm_pte_table_empty(start_pte))
138 clear_pmd_entry(kvm, pmd, start_addr);
134} 139}
135 140
136static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 141static void unmap_pmds(struct kvm *kvm, pud_t *pud,
137 unsigned long long start, u64 size) 142 phys_addr_t addr, phys_addr_t end)
138{ 143{
139 pgd_t *pgd; 144 phys_addr_t next, start_addr = addr;
140 pud_t *pud; 145 pmd_t *pmd, *start_pmd;
141 pmd_t *pmd;
142 pte_t *pte;
143 unsigned long long addr = start, end = start + size;
144 u64 next;
145
146 while (addr < end) {
147 pgd = pgdp + pgd_index(addr);
148 pud = pud_offset(pgd, addr);
149 pte = NULL;
150 if (pud_none(*pud)) {
151 addr = kvm_pud_addr_end(addr, end);
152 continue;
153 }
154 146
155 if (pud_huge(*pud)) { 147 start_pmd = pmd = pmd_offset(pud, addr);
156 /* 148 do {
157 * If we are dealing with a huge pud, just clear it and 149 next = kvm_pmd_addr_end(addr, end);
158 * move on. 150 if (!pmd_none(*pmd)) {
159 */ 151 if (kvm_pmd_huge(*pmd)) {
160 clear_pud_entry(kvm, pud, addr); 152 pmd_clear(pmd);
161 addr = kvm_pud_addr_end(addr, end); 153 kvm_tlb_flush_vmid_ipa(kvm, addr);
162 continue; 154 put_page(virt_to_page(pmd));
155 } else {
156 unmap_ptes(kvm, pmd, addr, next);
157 }
163 } 158 }
159 } while (pmd++, addr = next, addr != end);
164 160
165 pmd = pmd_offset(pud, addr); 161 if (kvm_pmd_table_empty(start_pmd))
166 if (pmd_none(*pmd)) { 162 clear_pud_entry(kvm, pud, start_addr);
167 addr = kvm_pmd_addr_end(addr, end); 163}
168 continue;
169 }
170 164
171 if (!kvm_pmd_huge(*pmd)) { 165static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
172 pte = pte_offset_kernel(pmd, addr); 166 phys_addr_t addr, phys_addr_t end)
173 clear_pte_entry(kvm, pte, addr); 167{
174 next = addr + PAGE_SIZE; 168 phys_addr_t next, start_addr = addr;
175 } 169 pud_t *pud, *start_pud;
176 170
177 /* 171 start_pud = pud = pud_offset(pgd, addr);
178 * If the pmd entry is to be cleared, walk back up the ladder 172 do {
179 */ 173 next = kvm_pud_addr_end(addr, end);
180 if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) { 174 if (!pud_none(*pud)) {
181 clear_pmd_entry(kvm, pmd, addr); 175 if (pud_huge(*pud)) {
182 next = kvm_pmd_addr_end(addr, end); 176 pud_clear(pud);
183 if (page_empty(pmd) && !page_empty(pud)) { 177 kvm_tlb_flush_vmid_ipa(kvm, addr);
184 clear_pud_entry(kvm, pud, addr); 178 put_page(virt_to_page(pud));
185 next = kvm_pud_addr_end(addr, end); 179 } else {
180 unmap_pmds(kvm, pud, addr, next);
186 } 181 }
187 } 182 }
183 } while (pud++, addr = next, addr != end);
188 184
189 addr = next; 185 if (kvm_pud_table_empty(start_pud))
190 } 186 clear_pgd_entry(kvm, pgd, start_addr);
187}
188
189
190static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191 phys_addr_t start, u64 size)
192{
193 pgd_t *pgd;
194 phys_addr_t addr = start, end = start + size;
195 phys_addr_t next;
196
197 pgd = pgdp + pgd_index(addr);
198 do {
199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end);
191} 202}
192 203
193static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, 204static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
@@ -748,6 +759,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
748 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 759 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
749 struct vm_area_struct *vma; 760 struct vm_area_struct *vma;
750 pfn_t pfn; 761 pfn_t pfn;
762 pgprot_t mem_type = PAGE_S2;
751 763
752 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); 764 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
753 if (fault_status == FSC_PERM && !write_fault) { 765 if (fault_status == FSC_PERM && !write_fault) {
@@ -798,6 +810,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
798 if (is_error_pfn(pfn)) 810 if (is_error_pfn(pfn))
799 return -EFAULT; 811 return -EFAULT;
800 812
813 if (kvm_is_mmio_pfn(pfn))
814 mem_type = PAGE_S2_DEVICE;
815
801 spin_lock(&kvm->mmu_lock); 816 spin_lock(&kvm->mmu_lock);
802 if (mmu_notifier_retry(kvm, mmu_seq)) 817 if (mmu_notifier_retry(kvm, mmu_seq))
803 goto out_unlock; 818 goto out_unlock;
@@ -805,7 +820,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
805 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 820 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
806 821
807 if (hugetlb) { 822 if (hugetlb) {
808 pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); 823 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
809 new_pmd = pmd_mkhuge(new_pmd); 824 new_pmd = pmd_mkhuge(new_pmd);
810 if (writable) { 825 if (writable) {
811 kvm_set_s2pmd_writable(&new_pmd); 826 kvm_set_s2pmd_writable(&new_pmd);
@@ -814,13 +829,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
814 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); 829 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
815 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 830 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
816 } else { 831 } else {
817 pte_t new_pte = pfn_pte(pfn, PAGE_S2); 832 pte_t new_pte = pfn_pte(pfn, mem_type);
818 if (writable) { 833 if (writable) {
819 kvm_set_s2pte_writable(&new_pte); 834 kvm_set_s2pte_writable(&new_pte);
820 kvm_set_pfn_dirty(pfn); 835 kvm_set_pfn_dirty(pfn);
821 } 836 }
822 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); 837 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
823 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); 838 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
839 mem_type == PAGE_S2_DEVICE);
824 } 840 }
825 841
826 842
@@ -1100,3 +1116,49 @@ out:
1100 free_hyp_pgds(); 1116 free_hyp_pgds();
1101 return err; 1117 return err;
1102} 1118}
1119
1120void kvm_arch_commit_memory_region(struct kvm *kvm,
1121 struct kvm_userspace_memory_region *mem,
1122 const struct kvm_memory_slot *old,
1123 enum kvm_mr_change change)
1124{
1125 gpa_t gpa = old->base_gfn << PAGE_SHIFT;
1126 phys_addr_t size = old->npages << PAGE_SHIFT;
1127 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1128 spin_lock(&kvm->mmu_lock);
1129 unmap_stage2_range(kvm, gpa, size);
1130 spin_unlock(&kvm->mmu_lock);
1131 }
1132}
1133
1134int kvm_arch_prepare_memory_region(struct kvm *kvm,
1135 struct kvm_memory_slot *memslot,
1136 struct kvm_userspace_memory_region *mem,
1137 enum kvm_mr_change change)
1138{
1139 return 0;
1140}
1141
1142void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1143 struct kvm_memory_slot *dont)
1144{
1145}
1146
1147int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1148 unsigned long npages)
1149{
1150 return 0;
1151}
1152
1153void kvm_arch_memslots_updated(struct kvm *kvm)
1154{
1155}
1156
1157void kvm_arch_flush_shadow_all(struct kvm *kvm)
1158{
1159}
1160
1161void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1162 struct kvm_memory_slot *slot)
1163{
1164}
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 6e9b5b36921c..7fb343779498 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,15 @@
18 18
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20 20
21/* Low-level stepping controls. */
22#define DBG_MDSCR_SS (1 << 0)
23#define DBG_SPSR_SS (1 << 21)
24
25/* MDSCR_EL1 enabling bits */
26#define DBG_MDSCR_KDE (1 << 13)
27#define DBG_MDSCR_MDE (1 << 15)
28#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
29
21#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) 30#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
22 31
23/* AArch64 */ 32/* AArch64 */
@@ -73,11 +82,6 @@
73 82
74#define CACHE_FLUSH_IS_SAFE 1 83#define CACHE_FLUSH_IS_SAFE 1
75 84
76enum debug_el {
77 DBG_ACTIVE_EL0 = 0,
78 DBG_ACTIVE_EL1,
79};
80
81/* AArch32 */ 85/* AArch32 */
82#define DBG_ESR_EVT_BKPT 0x4 86#define DBG_ESR_EVT_BKPT 0x4
83#define DBG_ESR_EVT_VECC 0x5 87#define DBG_ESR_EVT_VECC 0x5
@@ -115,6 +119,11 @@ void unregister_break_hook(struct break_hook *hook);
115 119
116u8 debug_monitors_arch(void); 120u8 debug_monitors_arch(void);
117 121
122enum debug_el {
123 DBG_ACTIVE_EL0 = 0,
124 DBG_ACTIVE_EL1,
125};
126
118void enable_debug_monitors(enum debug_el el); 127void enable_debug_monitors(enum debug_el el);
119void disable_debug_monitors(enum debug_el el); 128void disable_debug_monitors(enum debug_el el);
120 129
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 3d6903006a8a..cc83520459ed 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -76,9 +76,10 @@
76 */ 76 */
77#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ 77#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
78 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ 78 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
79 HCR_AMO | HCR_IMO | HCR_FMO | \ 79 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
80 HCR_SWIO | HCR_TIDCP | HCR_RW)
81#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) 80#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
81#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
82
82 83
83/* Hyp System Control Register (SCTLR_EL2) bits */ 84/* Hyp System Control Register (SCTLR_EL2) bits */
84#define SCTLR_EL2_EE (1 << 25) 85#define SCTLR_EL2_EE (1 << 25)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 9fcd54b1e16d..483842180f8f 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -18,6 +18,8 @@
18#ifndef __ARM_KVM_ASM_H__ 18#ifndef __ARM_KVM_ASM_H__
19#define __ARM_KVM_ASM_H__ 19#define __ARM_KVM_ASM_H__
20 20
21#include <asm/virt.h>
22
21/* 23/*
22 * 0 is reserved as an invalid value. 24 * 0 is reserved as an invalid value.
23 * Order *must* be kept in sync with the hyp switch code. 25 * Order *must* be kept in sync with the hyp switch code.
@@ -43,14 +45,25 @@
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 45#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 46#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45#define PAR_EL1 21 /* Physical Address Register */ 47#define PAR_EL1 21 /* Physical Address Register */
48#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
49#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
50#define DBGBCR15_EL1 38
51#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
52#define DBGBVR15_EL1 54
53#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
54#define DBGWCR15_EL1 70
55#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
56#define DBGWVR15_EL1 86
57#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
58
46/* 32bit specific registers. Keep them at the end of the range */ 59/* 32bit specific registers. Keep them at the end of the range */
47#define DACR32_EL2 22 /* Domain Access Control Register */ 60#define DACR32_EL2 88 /* Domain Access Control Register */
48#define IFSR32_EL2 23 /* Instruction Fault Status Register */ 61#define IFSR32_EL2 89 /* Instruction Fault Status Register */
49#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ 62#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
50#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ 63#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
51#define TEECR32_EL1 26 /* ThumbEE Configuration Register */ 64#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
52#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ 65#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
53#define NR_SYS_REGS 28 66#define NR_SYS_REGS 94
54 67
55/* 32bit mapping */ 68/* 32bit mapping */
56#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 69#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -82,11 +95,23 @@
82#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ 95#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
83#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ 96#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
84#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ 97#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
85#define NR_CP15_REGS (NR_SYS_REGS * 2) 98
99#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
100#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
101#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
102#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
103#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
104#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
105#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
106
107#define NR_COPRO_REGS (NR_SYS_REGS * 2)
86 108
87#define ARM_EXCEPTION_IRQ 0 109#define ARM_EXCEPTION_IRQ 0
88#define ARM_EXCEPTION_TRAP 1 110#define ARM_EXCEPTION_TRAP 1
89 111
112#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
113#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
114
90#ifndef __ASSEMBLY__ 115#ifndef __ASSEMBLY__
91struct kvm; 116struct kvm;
92struct kvm_vcpu; 117struct kvm_vcpu;
@@ -96,13 +121,21 @@ extern char __kvm_hyp_init_end[];
96 121
97extern char __kvm_hyp_vector[]; 122extern char __kvm_hyp_vector[];
98 123
99extern char __kvm_hyp_code_start[]; 124#define __kvm_hyp_code_start __hyp_text_start
100extern char __kvm_hyp_code_end[]; 125#define __kvm_hyp_code_end __hyp_text_end
101 126
102extern void __kvm_flush_vm_context(void); 127extern void __kvm_flush_vm_context(void);
103extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 128extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
104 129
105extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 130extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
131
132extern u64 __vgic_v3_get_ich_vtr_el2(void);
133
134extern char __save_vgic_v2_state[];
135extern char __restore_vgic_v2_state[];
136extern char __save_vgic_v3_state[];
137extern char __restore_vgic_v3_state[];
138
106#endif 139#endif
107 140
108#endif /* __ARM_KVM_ASM_H__ */ 141#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
index 9a59301cd014..0b52377a6c11 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -39,7 +39,8 @@ void kvm_register_target_sys_reg_table(unsigned int target,
39 struct kvm_sys_reg_target_table *table); 39 struct kvm_sys_reg_target_table *table);
40 40
41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 42int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 44int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 45int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
45int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); 46int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc3f995..fdc3e21abd8d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -213,6 +213,17 @@ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
213 default: 213 default:
214 return be64_to_cpu(data); 214 return be64_to_cpu(data);
215 } 215 }
216 } else {
217 switch (len) {
218 case 1:
219 return data & 0xff;
220 case 2:
221 return le16_to_cpu(data & 0xffff);
222 case 4:
223 return le32_to_cpu(data & 0xffffffff);
224 default:
225 return le64_to_cpu(data);
226 }
216 } 227 }
217 228
218 return data; /* Leave LE untouched */ 229 return data; /* Leave LE untouched */
@@ -233,6 +244,17 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
233 default: 244 default:
234 return cpu_to_be64(data); 245 return cpu_to_be64(data);
235 } 246 }
247 } else {
248 switch (len) {
249 case 1:
250 return data & 0xff;
251 case 2:
252 return cpu_to_le16(data & 0xffff);
253 case 4:
254 return cpu_to_le32(data & 0xffffffff);
255 default:
256 return cpu_to_le64(data);
257 }
236 } 258 }
237 259
238 return data; /* Leave LE untouched */ 260 return data; /* Leave LE untouched */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 92242ce06309..e10c45a578e3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -86,7 +86,7 @@ struct kvm_cpu_context {
86 struct kvm_regs gp_regs; 86 struct kvm_regs gp_regs;
87 union { 87 union {
88 u64 sys_regs[NR_SYS_REGS]; 88 u64 sys_regs[NR_SYS_REGS];
89 u32 cp15[NR_CP15_REGS]; 89 u32 copro[NR_COPRO_REGS];
90 }; 90 };
91}; 91};
92 92
@@ -101,6 +101,9 @@ struct kvm_vcpu_arch {
101 /* Exception Information */ 101 /* Exception Information */
102 struct kvm_vcpu_fault_info fault; 102 struct kvm_vcpu_fault_info fault;
103 103
104 /* Debug state */
105 u64 debug_flags;
106
104 /* Pointer to host CPU context */ 107 /* Pointer to host CPU context */
105 kvm_cpu_context_t *host_cpu_context; 108 kvm_cpu_context_t *host_cpu_context;
106 109
@@ -138,7 +141,20 @@ struct kvm_vcpu_arch {
138 141
139#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 142#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
140#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) 143#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
141#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) 144/*
145 * CP14 and CP15 live in the same array, as they are backed by the
146 * same system registers.
147 */
148#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
149#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
150
151#ifdef CONFIG_CPU_BIG_ENDIAN
152#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
153#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
154#else
155#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
156#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
157#endif
142 158
143struct kvm_vm_stat { 159struct kvm_vm_stat {
144 u32 remote_tlb_flush; 160 u32 remote_tlb_flush;
@@ -200,4 +216,32 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
200 hyp_stack_ptr, vector_ptr); 216 hyp_stack_ptr, vector_ptr);
201} 217}
202 218
219struct vgic_sr_vectors {
220 void *save_vgic;
221 void *restore_vgic;
222};
223
224static inline void vgic_arch_setup(const struct vgic_params *vgic)
225{
226 extern struct vgic_sr_vectors __vgic_sr_vectors;
227
228 switch(vgic->type)
229 {
230 case VGIC_V2:
231 __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
232 __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
233 break;
234
235#ifdef CONFIG_ARM_GIC_V3
236 case VGIC_V3:
237 __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
238 __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
239 break;
240#endif
241
242 default:
243 BUG();
244 }
245}
246
203#endif /* __ARM64_KVM_HOST_H__ */ 247#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7d29847a893b..8e138c7c53ac 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -125,6 +125,21 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
127 127
128static inline bool kvm_page_empty(void *ptr)
129{
130 struct page *ptr_page = virt_to_page(ptr);
131 return page_count(ptr_page) == 1;
132}
133
134#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
135#ifndef CONFIG_ARM64_64K_PAGES
136#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
137#else
138#define kvm_pmd_table_empty(pmdp) (0)
139#endif
140#define kvm_pud_table_empty(pudp) (0)
141
142
128struct kvm; 143struct kvm;
129 144
130#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 145#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 215ad4649dd7..7a5df5252dd7 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -50,6 +50,10 @@ static inline bool is_hyp_mode_mismatched(void)
50 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 50 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
51} 51}
52 52
53/* The section containing the hypervisor text */
54extern char __hyp_text_start[];
55extern char __hyp_text_end[];
56
53#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
54 58
55#endif /* ! __ASM__VIRT_H */ 59#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 646f888387cd..9a9fce090d58 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -120,6 +120,7 @@ int main(void)
120 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); 120 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
121 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); 121 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
122 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); 122 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
123 DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
123 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); 124 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
124 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 125 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
125 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); 126 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
@@ -129,13 +130,24 @@ int main(void)
129 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); 130 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
130 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 131 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
131 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 132 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
132 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 133 DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
133 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); 134 DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
134 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); 135 DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
135 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); 136 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
136 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); 137 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
137 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); 138 DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
138 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); 139 DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
140 DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
141 DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
142 DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
143 DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
144 DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
145 DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
146 DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
147 DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
148 DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
149 DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
150 DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
139 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); 151 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
140 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 152 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
141 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 153 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index fe5b94078d82..b056369fd47d 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -30,15 +30,6 @@
30#include <asm/cputype.h> 30#include <asm/cputype.h>
31#include <asm/system_misc.h> 31#include <asm/system_misc.h>
32 32
33/* Low-level stepping controls. */
34#define DBG_MDSCR_SS (1 << 0)
35#define DBG_SPSR_SS (1 << 21)
36
37/* MDSCR_EL1 enabling bits */
38#define DBG_MDSCR_KDE (1 << 13)
39#define DBG_MDSCR_MDE (1 << 15)
40#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
41
42/* Determine debug architecture. */ 33/* Determine debug architecture. */
43u8 debug_monitors_arch(void) 34u8 debug_monitors_arch(void)
44{ 35{
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 72a9fd583ad3..32a096174b94 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,4 +20,8 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o 20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
21 21
22kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 22kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
23kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
24kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
25kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
26kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
23kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o 27kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 60b5c31f3c10..8d1ec2887a26 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -136,13 +136,67 @@ static unsigned long num_core_regs(void)
136} 136}
137 137
138/** 138/**
139 * ARM64 versions of the TIMER registers, always available on arm64
140 */
141
142#define NUM_TIMER_REGS 3
143
144static bool is_timer_reg(u64 index)
145{
146 switch (index) {
147 case KVM_REG_ARM_TIMER_CTL:
148 case KVM_REG_ARM_TIMER_CNT:
149 case KVM_REG_ARM_TIMER_CVAL:
150 return true;
151 }
152 return false;
153}
154
155static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
156{
157 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
158 return -EFAULT;
159 uindices++;
160 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
161 return -EFAULT;
162 uindices++;
163 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
164 return -EFAULT;
165
166 return 0;
167}
168
169static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
170{
171 void __user *uaddr = (void __user *)(long)reg->addr;
172 u64 val;
173 int ret;
174
175 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
176 if (ret != 0)
177 return ret;
178
179 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
180}
181
182static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
183{
184 void __user *uaddr = (void __user *)(long)reg->addr;
185 u64 val;
186
187 val = kvm_arm_timer_get_reg(vcpu, reg->id);
188 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
189}
190
191/**
139 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 192 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
140 * 193 *
141 * This is for all registers. 194 * This is for all registers.
142 */ 195 */
143unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 196unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
144{ 197{
145 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); 198 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
199 + NUM_TIMER_REGS;
146} 200}
147 201
148/** 202/**
@@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
154{ 208{
155 unsigned int i; 209 unsigned int i;
156 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; 210 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
211 int ret;
157 212
158 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 213 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
159 if (put_user(core_reg | i, uindices)) 214 if (put_user(core_reg | i, uindices))
@@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
161 uindices++; 216 uindices++;
162 } 217 }
163 218
219 ret = copy_timer_indices(vcpu, uindices);
220 if (ret)
221 return ret;
222 uindices += NUM_TIMER_REGS;
223
164 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 224 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
165} 225}
166 226
@@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
174 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 234 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
175 return get_core_reg(vcpu, reg); 235 return get_core_reg(vcpu, reg);
176 236
237 if (is_timer_reg(reg->id))
238 return get_timer_reg(vcpu, reg);
239
177 return kvm_arm_sys_reg_get_reg(vcpu, reg); 240 return kvm_arm_sys_reg_get_reg(vcpu, reg);
178} 241}
179 242
@@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
187 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 250 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
188 return set_core_reg(vcpu, reg); 251 return set_core_reg(vcpu, reg);
189 252
253 if (is_timer_reg(reg->id))
254 return set_timer_reg(vcpu, reg);
255
190 return kvm_arm_sys_reg_set_reg(vcpu, reg); 256 return kvm_arm_sys_reg_set_reg(vcpu, reg);
191} 257}
192 258
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 182415e1a952..e28be510380c 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -73,9 +73,9 @@ static exit_handle_fn arm_exit_handlers[] = {
73 [ESR_EL2_EC_WFI] = kvm_handle_wfx, 73 [ESR_EL2_EC_WFI] = kvm_handle_wfx,
74 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, 74 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
75 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, 75 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
76 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, 76 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
77 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, 77 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
78 [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access, 78 [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
79 [ESR_EL2_EC_HVC32] = handle_hvc, 79 [ESR_EL2_EC_HVC32] = handle_hvc,
80 [ESR_EL2_EC_SMC32] = handle_smc, 80 [ESR_EL2_EC_SMC32] = handle_smc,
81 [ESR_EL2_EC_HVC64] = handle_hvc, 81 [ESR_EL2_EC_HVC64] = handle_hvc,
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index b0d1512acf08..b72aa9f9215c 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -16,11 +16,11 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20 19
21#include <asm/assembler.h> 20#include <asm/assembler.h>
22#include <asm/memory.h> 21#include <asm/memory.h>
23#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
23#include <asm/debug-monitors.h>
24#include <asm/fpsimdmacros.h> 24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h> 25#include <asm/kvm.h>
26#include <asm/kvm_asm.h> 26#include <asm/kvm_asm.h>
@@ -36,9 +36,6 @@
36 .pushsection .hyp.text, "ax" 36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT 37 .align PAGE_SHIFT
38 38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs 39.macro save_common_regs
43 // x2: base address for cpu context 40 // x2: base address for cpu context
44 // x3: tmp register 41 // x3: tmp register
@@ -215,6 +212,7 @@ __kvm_hyp_code_start:
215 mrs x22, amair_el1 212 mrs x22, amair_el1
216 mrs x23, cntkctl_el1 213 mrs x23, cntkctl_el1
217 mrs x24, par_el1 214 mrs x24, par_el1
215 mrs x25, mdscr_el1
218 216
219 stp x4, x5, [x3] 217 stp x4, x5, [x3]
220 stp x6, x7, [x3, #16] 218 stp x6, x7, [x3, #16]
@@ -226,7 +224,202 @@ __kvm_hyp_code_start:
226 stp x18, x19, [x3, #112] 224 stp x18, x19, [x3, #112]
227 stp x20, x21, [x3, #128] 225 stp x20, x21, [x3, #128]
228 stp x22, x23, [x3, #144] 226 stp x22, x23, [x3, #144]
229 str x24, [x3, #160] 227 stp x24, x25, [x3, #160]
228.endm
229
230.macro save_debug
231 // x2: base address for cpu context
232 // x3: tmp register
233
234 mrs x26, id_aa64dfr0_el1
235 ubfx x24, x26, #12, #4 // Extract BRPs
236 ubfx x25, x26, #20, #4 // Extract WRPs
237 mov w26, #15
238 sub w24, w26, w24 // How many BPs to skip
239 sub w25, w26, w25 // How many WPs to skip
240
241 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
242
243 adr x26, 1f
244 add x26, x26, x24, lsl #2
245 br x26
2461:
247 mrs x20, dbgbcr15_el1
248 mrs x19, dbgbcr14_el1
249 mrs x18, dbgbcr13_el1
250 mrs x17, dbgbcr12_el1
251 mrs x16, dbgbcr11_el1
252 mrs x15, dbgbcr10_el1
253 mrs x14, dbgbcr9_el1
254 mrs x13, dbgbcr8_el1
255 mrs x12, dbgbcr7_el1
256 mrs x11, dbgbcr6_el1
257 mrs x10, dbgbcr5_el1
258 mrs x9, dbgbcr4_el1
259 mrs x8, dbgbcr3_el1
260 mrs x7, dbgbcr2_el1
261 mrs x6, dbgbcr1_el1
262 mrs x5, dbgbcr0_el1
263
264 adr x26, 1f
265 add x26, x26, x24, lsl #2
266 br x26
267
2681:
269 str x20, [x3, #(15 * 8)]
270 str x19, [x3, #(14 * 8)]
271 str x18, [x3, #(13 * 8)]
272 str x17, [x3, #(12 * 8)]
273 str x16, [x3, #(11 * 8)]
274 str x15, [x3, #(10 * 8)]
275 str x14, [x3, #(9 * 8)]
276 str x13, [x3, #(8 * 8)]
277 str x12, [x3, #(7 * 8)]
278 str x11, [x3, #(6 * 8)]
279 str x10, [x3, #(5 * 8)]
280 str x9, [x3, #(4 * 8)]
281 str x8, [x3, #(3 * 8)]
282 str x7, [x3, #(2 * 8)]
283 str x6, [x3, #(1 * 8)]
284 str x5, [x3, #(0 * 8)]
285
286 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
287
288 adr x26, 1f
289 add x26, x26, x24, lsl #2
290 br x26
2911:
292 mrs x20, dbgbvr15_el1
293 mrs x19, dbgbvr14_el1
294 mrs x18, dbgbvr13_el1
295 mrs x17, dbgbvr12_el1
296 mrs x16, dbgbvr11_el1
297 mrs x15, dbgbvr10_el1
298 mrs x14, dbgbvr9_el1
299 mrs x13, dbgbvr8_el1
300 mrs x12, dbgbvr7_el1
301 mrs x11, dbgbvr6_el1
302 mrs x10, dbgbvr5_el1
303 mrs x9, dbgbvr4_el1
304 mrs x8, dbgbvr3_el1
305 mrs x7, dbgbvr2_el1
306 mrs x6, dbgbvr1_el1
307 mrs x5, dbgbvr0_el1
308
309 adr x26, 1f
310 add x26, x26, x24, lsl #2
311 br x26
312
3131:
314 str x20, [x3, #(15 * 8)]
315 str x19, [x3, #(14 * 8)]
316 str x18, [x3, #(13 * 8)]
317 str x17, [x3, #(12 * 8)]
318 str x16, [x3, #(11 * 8)]
319 str x15, [x3, #(10 * 8)]
320 str x14, [x3, #(9 * 8)]
321 str x13, [x3, #(8 * 8)]
322 str x12, [x3, #(7 * 8)]
323 str x11, [x3, #(6 * 8)]
324 str x10, [x3, #(5 * 8)]
325 str x9, [x3, #(4 * 8)]
326 str x8, [x3, #(3 * 8)]
327 str x7, [x3, #(2 * 8)]
328 str x6, [x3, #(1 * 8)]
329 str x5, [x3, #(0 * 8)]
330
331 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
332
333 adr x26, 1f
334 add x26, x26, x25, lsl #2
335 br x26
3361:
337 mrs x20, dbgwcr15_el1
338 mrs x19, dbgwcr14_el1
339 mrs x18, dbgwcr13_el1
340 mrs x17, dbgwcr12_el1
341 mrs x16, dbgwcr11_el1
342 mrs x15, dbgwcr10_el1
343 mrs x14, dbgwcr9_el1
344 mrs x13, dbgwcr8_el1
345 mrs x12, dbgwcr7_el1
346 mrs x11, dbgwcr6_el1
347 mrs x10, dbgwcr5_el1
348 mrs x9, dbgwcr4_el1
349 mrs x8, dbgwcr3_el1
350 mrs x7, dbgwcr2_el1
351 mrs x6, dbgwcr1_el1
352 mrs x5, dbgwcr0_el1
353
354 adr x26, 1f
355 add x26, x26, x25, lsl #2
356 br x26
357
3581:
359 str x20, [x3, #(15 * 8)]
360 str x19, [x3, #(14 * 8)]
361 str x18, [x3, #(13 * 8)]
362 str x17, [x3, #(12 * 8)]
363 str x16, [x3, #(11 * 8)]
364 str x15, [x3, #(10 * 8)]
365 str x14, [x3, #(9 * 8)]
366 str x13, [x3, #(8 * 8)]
367 str x12, [x3, #(7 * 8)]
368 str x11, [x3, #(6 * 8)]
369 str x10, [x3, #(5 * 8)]
370 str x9, [x3, #(4 * 8)]
371 str x8, [x3, #(3 * 8)]
372 str x7, [x3, #(2 * 8)]
373 str x6, [x3, #(1 * 8)]
374 str x5, [x3, #(0 * 8)]
375
376 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
377
378 adr x26, 1f
379 add x26, x26, x25, lsl #2
380 br x26
3811:
382 mrs x20, dbgwvr15_el1
383 mrs x19, dbgwvr14_el1
384 mrs x18, dbgwvr13_el1
385 mrs x17, dbgwvr12_el1
386 mrs x16, dbgwvr11_el1
387 mrs x15, dbgwvr10_el1
388 mrs x14, dbgwvr9_el1
389 mrs x13, dbgwvr8_el1
390 mrs x12, dbgwvr7_el1
391 mrs x11, dbgwvr6_el1
392 mrs x10, dbgwvr5_el1
393 mrs x9, dbgwvr4_el1
394 mrs x8, dbgwvr3_el1
395 mrs x7, dbgwvr2_el1
396 mrs x6, dbgwvr1_el1
397 mrs x5, dbgwvr0_el1
398
399 adr x26, 1f
400 add x26, x26, x25, lsl #2
401 br x26
402
4031:
404 str x20, [x3, #(15 * 8)]
405 str x19, [x3, #(14 * 8)]
406 str x18, [x3, #(13 * 8)]
407 str x17, [x3, #(12 * 8)]
408 str x16, [x3, #(11 * 8)]
409 str x15, [x3, #(10 * 8)]
410 str x14, [x3, #(9 * 8)]
411 str x13, [x3, #(8 * 8)]
412 str x12, [x3, #(7 * 8)]
413 str x11, [x3, #(6 * 8)]
414 str x10, [x3, #(5 * 8)]
415 str x9, [x3, #(4 * 8)]
416 str x8, [x3, #(3 * 8)]
417 str x7, [x3, #(2 * 8)]
418 str x6, [x3, #(1 * 8)]
419 str x5, [x3, #(0 * 8)]
420
421 mrs x21, mdccint_el1
422 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
230.endm 423.endm
231 424
232.macro restore_sysregs 425.macro restore_sysregs
@@ -245,7 +438,7 @@ __kvm_hyp_code_start:
245 ldp x18, x19, [x3, #112] 438 ldp x18, x19, [x3, #112]
246 ldp x20, x21, [x3, #128] 439 ldp x20, x21, [x3, #128]
247 ldp x22, x23, [x3, #144] 440 ldp x22, x23, [x3, #144]
248 ldr x24, [x3, #160] 441 ldp x24, x25, [x3, #160]
249 442
250 msr vmpidr_el2, x4 443 msr vmpidr_el2, x4
251 msr csselr_el1, x5 444 msr csselr_el1, x5
@@ -268,6 +461,198 @@ __kvm_hyp_code_start:
268 msr amair_el1, x22 461 msr amair_el1, x22
269 msr cntkctl_el1, x23 462 msr cntkctl_el1, x23
270 msr par_el1, x24 463 msr par_el1, x24
464 msr mdscr_el1, x25
465.endm
466
467.macro restore_debug
468 // x2: base address for cpu context
469 // x3: tmp register
470
471 mrs x26, id_aa64dfr0_el1
472 ubfx x24, x26, #12, #4 // Extract BRPs
473 ubfx x25, x26, #20, #4 // Extract WRPs
474 mov w26, #15
475 sub w24, w26, w24 // How many BPs to skip
476 sub w25, w26, w25 // How many WPs to skip
477
478 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
479
480 adr x26, 1f
481 add x26, x26, x24, lsl #2
482 br x26
4831:
484 ldr x20, [x3, #(15 * 8)]
485 ldr x19, [x3, #(14 * 8)]
486 ldr x18, [x3, #(13 * 8)]
487 ldr x17, [x3, #(12 * 8)]
488 ldr x16, [x3, #(11 * 8)]
489 ldr x15, [x3, #(10 * 8)]
490 ldr x14, [x3, #(9 * 8)]
491 ldr x13, [x3, #(8 * 8)]
492 ldr x12, [x3, #(7 * 8)]
493 ldr x11, [x3, #(6 * 8)]
494 ldr x10, [x3, #(5 * 8)]
495 ldr x9, [x3, #(4 * 8)]
496 ldr x8, [x3, #(3 * 8)]
497 ldr x7, [x3, #(2 * 8)]
498 ldr x6, [x3, #(1 * 8)]
499 ldr x5, [x3, #(0 * 8)]
500
501 adr x26, 1f
502 add x26, x26, x24, lsl #2
503 br x26
5041:
505 msr dbgbcr15_el1, x20
506 msr dbgbcr14_el1, x19
507 msr dbgbcr13_el1, x18
508 msr dbgbcr12_el1, x17
509 msr dbgbcr11_el1, x16
510 msr dbgbcr10_el1, x15
511 msr dbgbcr9_el1, x14
512 msr dbgbcr8_el1, x13
513 msr dbgbcr7_el1, x12
514 msr dbgbcr6_el1, x11
515 msr dbgbcr5_el1, x10
516 msr dbgbcr4_el1, x9
517 msr dbgbcr3_el1, x8
518 msr dbgbcr2_el1, x7
519 msr dbgbcr1_el1, x6
520 msr dbgbcr0_el1, x5
521
522 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
523
524 adr x26, 1f
525 add x26, x26, x24, lsl #2
526 br x26
5271:
528 ldr x20, [x3, #(15 * 8)]
529 ldr x19, [x3, #(14 * 8)]
530 ldr x18, [x3, #(13 * 8)]
531 ldr x17, [x3, #(12 * 8)]
532 ldr x16, [x3, #(11 * 8)]
533 ldr x15, [x3, #(10 * 8)]
534 ldr x14, [x3, #(9 * 8)]
535 ldr x13, [x3, #(8 * 8)]
536 ldr x12, [x3, #(7 * 8)]
537 ldr x11, [x3, #(6 * 8)]
538 ldr x10, [x3, #(5 * 8)]
539 ldr x9, [x3, #(4 * 8)]
540 ldr x8, [x3, #(3 * 8)]
541 ldr x7, [x3, #(2 * 8)]
542 ldr x6, [x3, #(1 * 8)]
543 ldr x5, [x3, #(0 * 8)]
544
545 adr x26, 1f
546 add x26, x26, x24, lsl #2
547 br x26
5481:
549 msr dbgbvr15_el1, x20
550 msr dbgbvr14_el1, x19
551 msr dbgbvr13_el1, x18
552 msr dbgbvr12_el1, x17
553 msr dbgbvr11_el1, x16
554 msr dbgbvr10_el1, x15
555 msr dbgbvr9_el1, x14
556 msr dbgbvr8_el1, x13
557 msr dbgbvr7_el1, x12
558 msr dbgbvr6_el1, x11
559 msr dbgbvr5_el1, x10
560 msr dbgbvr4_el1, x9
561 msr dbgbvr3_el1, x8
562 msr dbgbvr2_el1, x7
563 msr dbgbvr1_el1, x6
564 msr dbgbvr0_el1, x5
565
566 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
567
568 adr x26, 1f
569 add x26, x26, x25, lsl #2
570 br x26
5711:
572 ldr x20, [x3, #(15 * 8)]
573 ldr x19, [x3, #(14 * 8)]
574 ldr x18, [x3, #(13 * 8)]
575 ldr x17, [x3, #(12 * 8)]
576 ldr x16, [x3, #(11 * 8)]
577 ldr x15, [x3, #(10 * 8)]
578 ldr x14, [x3, #(9 * 8)]
579 ldr x13, [x3, #(8 * 8)]
580 ldr x12, [x3, #(7 * 8)]
581 ldr x11, [x3, #(6 * 8)]
582 ldr x10, [x3, #(5 * 8)]
583 ldr x9, [x3, #(4 * 8)]
584 ldr x8, [x3, #(3 * 8)]
585 ldr x7, [x3, #(2 * 8)]
586 ldr x6, [x3, #(1 * 8)]
587 ldr x5, [x3, #(0 * 8)]
588
589 adr x26, 1f
590 add x26, x26, x25, lsl #2
591 br x26
5921:
593 msr dbgwcr15_el1, x20
594 msr dbgwcr14_el1, x19
595 msr dbgwcr13_el1, x18
596 msr dbgwcr12_el1, x17
597 msr dbgwcr11_el1, x16
598 msr dbgwcr10_el1, x15
599 msr dbgwcr9_el1, x14
600 msr dbgwcr8_el1, x13
601 msr dbgwcr7_el1, x12
602 msr dbgwcr6_el1, x11
603 msr dbgwcr5_el1, x10
604 msr dbgwcr4_el1, x9
605 msr dbgwcr3_el1, x8
606 msr dbgwcr2_el1, x7
607 msr dbgwcr1_el1, x6
608 msr dbgwcr0_el1, x5
609
610 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
611
612 adr x26, 1f
613 add x26, x26, x25, lsl #2
614 br x26
6151:
616 ldr x20, [x3, #(15 * 8)]
617 ldr x19, [x3, #(14 * 8)]
618 ldr x18, [x3, #(13 * 8)]
619 ldr x17, [x3, #(12 * 8)]
620 ldr x16, [x3, #(11 * 8)]
621 ldr x15, [x3, #(10 * 8)]
622 ldr x14, [x3, #(9 * 8)]
623 ldr x13, [x3, #(8 * 8)]
624 ldr x12, [x3, #(7 * 8)]
625 ldr x11, [x3, #(6 * 8)]
626 ldr x10, [x3, #(5 * 8)]
627 ldr x9, [x3, #(4 * 8)]
628 ldr x8, [x3, #(3 * 8)]
629 ldr x7, [x3, #(2 * 8)]
630 ldr x6, [x3, #(1 * 8)]
631 ldr x5, [x3, #(0 * 8)]
632
633 adr x26, 1f
634 add x26, x26, x25, lsl #2
635 br x26
6361:
637 msr dbgwvr15_el1, x20
638 msr dbgwvr14_el1, x19
639 msr dbgwvr13_el1, x18
640 msr dbgwvr12_el1, x17
641 msr dbgwvr11_el1, x16
642 msr dbgwvr10_el1, x15
643 msr dbgwvr9_el1, x14
644 msr dbgwvr8_el1, x13
645 msr dbgwvr7_el1, x12
646 msr dbgwvr6_el1, x11
647 msr dbgwvr5_el1, x10
648 msr dbgwvr4_el1, x9
649 msr dbgwvr3_el1, x8
650 msr dbgwvr2_el1, x7
651 msr dbgwvr1_el1, x6
652 msr dbgwvr0_el1, x5
653
654 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
655 msr mdccint_el1, x21
271.endm 656.endm
272 657
273.macro skip_32bit_state tmp, target 658.macro skip_32bit_state tmp, target
@@ -282,6 +667,35 @@ __kvm_hyp_code_start:
282 tbz \tmp, #12, \target 667 tbz \tmp, #12, \target
283.endm 668.endm
284 669
670.macro skip_debug_state tmp, target
671 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
672 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
673.endm
674
675.macro compute_debug_state target
676 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
677 // is set, we do a full save/restore cycle and disable trapping.
678 add x25, x0, #VCPU_CONTEXT
679
680 // Check the state of MDSCR_EL1
681 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
682 and x26, x25, #DBG_MDSCR_KDE
683 and x25, x25, #DBG_MDSCR_MDE
684 adds xzr, x25, x26
685 b.eq 9998f // Nothing to see there
686
687 // If any interesting bits was set, we must set the flag
688 mov x26, #KVM_ARM64_DEBUG_DIRTY
689 str x26, [x0, #VCPU_DEBUG_FLAGS]
690 b 9999f // Don't skip restore
691
6929998:
693 // Otherwise load the flags from memory in case we recently
694 // trapped
695 skip_debug_state x25, \target
6969999:
697.endm
698
285.macro save_guest_32bit_state 699.macro save_guest_32bit_state
286 skip_32bit_state x3, 1f 700 skip_32bit_state x3, 1f
287 701
@@ -297,10 +711,13 @@ __kvm_hyp_code_start:
297 mrs x4, dacr32_el2 711 mrs x4, dacr32_el2
298 mrs x5, ifsr32_el2 712 mrs x5, ifsr32_el2
299 mrs x6, fpexc32_el2 713 mrs x6, fpexc32_el2
300 mrs x7, dbgvcr32_el2
301 stp x4, x5, [x3] 714 stp x4, x5, [x3]
302 stp x6, x7, [x3, #16] 715 str x6, [x3, #16]
303 716
717 skip_debug_state x8, 2f
718 mrs x7, dbgvcr32_el2
719 str x7, [x3, #24]
7202:
304 skip_tee_state x8, 1f 721 skip_tee_state x8, 1f
305 722
306 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) 723 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -323,12 +740,15 @@ __kvm_hyp_code_start:
323 740
324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) 741 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
325 ldp x4, x5, [x3] 742 ldp x4, x5, [x3]
326 ldp x6, x7, [x3, #16] 743 ldr x6, [x3, #16]
327 msr dacr32_el2, x4 744 msr dacr32_el2, x4
328 msr ifsr32_el2, x5 745 msr ifsr32_el2, x5
329 msr fpexc32_el2, x6 746 msr fpexc32_el2, x6
330 msr dbgvcr32_el2, x7
331 747
748 skip_debug_state x8, 2f
749 ldr x7, [x3, #24]
750 msr dbgvcr32_el2, x7
7512:
332 skip_tee_state x8, 1f 752 skip_tee_state x8, 1f
333 753
334 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) 754 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -339,11 +759,8 @@ __kvm_hyp_code_start:
339.endm 759.endm
340 760
341.macro activate_traps 761.macro activate_traps
342 ldr x2, [x0, #VCPU_IRQ_LINES] 762 ldr x2, [x0, #VCPU_HCR_EL2]
343 ldr x1, [x0, #VCPU_HCR_EL2] 763 msr hcr_el2, x2
344 orr x2, x2, x1
345 msr hcr_el2, x2
346
347 ldr x2, =(CPTR_EL2_TTA) 764 ldr x2, =(CPTR_EL2_TTA)
348 msr cptr_el2, x2 765 msr cptr_el2, x2
349 766
@@ -353,6 +770,14 @@ __kvm_hyp_code_start:
353 mrs x2, mdcr_el2 770 mrs x2, mdcr_el2
354 and x2, x2, #MDCR_EL2_HPMN_MASK 771 and x2, x2, #MDCR_EL2_HPMN_MASK
355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) 772 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
773 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
774
775 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
776 // if not dirty.
777 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
778 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
779 orr x2, x2, #MDCR_EL2_TDA
7801:
356 msr mdcr_el2, x2 781 msr mdcr_el2, x2
357.endm 782.endm
358 783
@@ -379,100 +804,33 @@ __kvm_hyp_code_start:
379.endm 804.endm
380 805
381/* 806/*
382 * Save the VGIC CPU state into memory 807 * Call into the vgic backend for state saving
383 * x0: Register pointing to VCPU struct
384 * Do not corrupt x1!!!
385 */ 808 */
386.macro save_vgic_state 809.macro save_vgic_state
387 /* Get VGIC VCTRL base into x2 */ 810 adr x24, __vgic_sr_vectors
388 ldr x2, [x0, #VCPU_KVM] 811 ldr x24, [x24, VGIC_SAVE_FN]
389 kern_hyp_va x2 812 kern_hyp_va x24
390 ldr x2, [x2, #KVM_VGIC_VCTRL] 813 blr x24
391 kern_hyp_va x2 814 mrs x24, hcr_el2
392 cbz x2, 2f // disabled 815 mov x25, #HCR_INT_OVERRIDE
393 816 neg x25, x25
394 /* Compute the address of struct vgic_cpu */ 817 and x24, x24, x25
395 add x3, x0, #VCPU_VGIC_CPU 818 msr hcr_el2, x24
396
397 /* Save all interesting registers */
398 ldr w4, [x2, #GICH_HCR]
399 ldr w5, [x2, #GICH_VMCR]
400 ldr w6, [x2, #GICH_MISR]
401 ldr w7, [x2, #GICH_EISR0]
402 ldr w8, [x2, #GICH_EISR1]
403 ldr w9, [x2, #GICH_ELRSR0]
404 ldr w10, [x2, #GICH_ELRSR1]
405 ldr w11, [x2, #GICH_APR]
406CPU_BE( rev w4, w4 )
407CPU_BE( rev w5, w5 )
408CPU_BE( rev w6, w6 )
409CPU_BE( rev w7, w7 )
410CPU_BE( rev w8, w8 )
411CPU_BE( rev w9, w9 )
412CPU_BE( rev w10, w10 )
413CPU_BE( rev w11, w11 )
414
415 str w4, [x3, #VGIC_CPU_HCR]
416 str w5, [x3, #VGIC_CPU_VMCR]
417 str w6, [x3, #VGIC_CPU_MISR]
418 str w7, [x3, #VGIC_CPU_EISR]
419 str w8, [x3, #(VGIC_CPU_EISR + 4)]
420 str w9, [x3, #VGIC_CPU_ELRSR]
421 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
422 str w11, [x3, #VGIC_CPU_APR]
423
424 /* Clear GICH_HCR */
425 str wzr, [x2, #GICH_HCR]
426
427 /* Save list registers */
428 add x2, x2, #GICH_LR0
429 ldr w4, [x3, #VGIC_CPU_NR_LR]
430 add x3, x3, #VGIC_CPU_LR
4311: ldr w5, [x2], #4
432CPU_BE( rev w5, w5 )
433 str w5, [x3], #4
434 sub w4, w4, #1
435 cbnz w4, 1b
4362:
437.endm 819.endm
438 820
439/* 821/*
440 * Restore the VGIC CPU state from memory 822 * Call into the vgic backend for state restoring
441 * x0: Register pointing to VCPU struct
442 */ 823 */
443.macro restore_vgic_state 824.macro restore_vgic_state
444 /* Get VGIC VCTRL base into x2 */ 825 mrs x24, hcr_el2
445 ldr x2, [x0, #VCPU_KVM] 826 ldr x25, [x0, #VCPU_IRQ_LINES]
446 kern_hyp_va x2 827 orr x24, x24, #HCR_INT_OVERRIDE
447 ldr x2, [x2, #KVM_VGIC_VCTRL] 828 orr x24, x24, x25
448 kern_hyp_va x2 829 msr hcr_el2, x24
449 cbz x2, 2f // disabled 830 adr x24, __vgic_sr_vectors
450 831 ldr x24, [x24, #VGIC_RESTORE_FN]
451 /* Compute the address of struct vgic_cpu */ 832 kern_hyp_va x24
452 add x3, x0, #VCPU_VGIC_CPU 833 blr x24
453
454 /* We only restore a minimal set of registers */
455 ldr w4, [x3, #VGIC_CPU_HCR]
456 ldr w5, [x3, #VGIC_CPU_VMCR]
457 ldr w6, [x3, #VGIC_CPU_APR]
458CPU_BE( rev w4, w4 )
459CPU_BE( rev w5, w5 )
460CPU_BE( rev w6, w6 )
461
462 str w4, [x2, #GICH_HCR]
463 str w5, [x2, #GICH_VMCR]
464 str w6, [x2, #GICH_APR]
465
466 /* Restore list registers */
467 add x2, x2, #GICH_LR0
468 ldr w4, [x3, #VGIC_CPU_NR_LR]
469 add x3, x3, #VGIC_CPU_LR
4701: ldr w5, [x3], #4
471CPU_BE( rev w5, w5 )
472 str w5, [x2], #4
473 sub w4, w4, #1
474 cbnz w4, 1b
4752:
476.endm 834.endm
477 835
478.macro save_timer_state 836.macro save_timer_state
@@ -537,6 +895,14 @@ __restore_sysregs:
537 restore_sysregs 895 restore_sysregs
538 ret 896 ret
539 897
898__save_debug:
899 save_debug
900 ret
901
902__restore_debug:
903 restore_debug
904 ret
905
540__save_fpsimd: 906__save_fpsimd:
541 save_fpsimd 907 save_fpsimd
542 ret 908 ret
@@ -568,6 +934,9 @@ ENTRY(__kvm_vcpu_run)
568 bl __save_fpsimd 934 bl __save_fpsimd
569 bl __save_sysregs 935 bl __save_sysregs
570 936
937 compute_debug_state 1f
938 bl __save_debug
9391:
571 activate_traps 940 activate_traps
572 activate_vm 941 activate_vm
573 942
@@ -579,6 +948,10 @@ ENTRY(__kvm_vcpu_run)
579 948
580 bl __restore_sysregs 949 bl __restore_sysregs
581 bl __restore_fpsimd 950 bl __restore_fpsimd
951
952 skip_debug_state x3, 1f
953 bl __restore_debug
9541:
582 restore_guest_32bit_state 955 restore_guest_32bit_state
583 restore_guest_regs 956 restore_guest_regs
584 957
@@ -595,6 +968,10 @@ __kvm_vcpu_return:
595 save_guest_regs 968 save_guest_regs
596 bl __save_fpsimd 969 bl __save_fpsimd
597 bl __save_sysregs 970 bl __save_sysregs
971
972 skip_debug_state x3, 1f
973 bl __save_debug
9741:
598 save_guest_32bit_state 975 save_guest_32bit_state
599 976
600 save_timer_state 977 save_timer_state
@@ -609,6 +986,14 @@ __kvm_vcpu_return:
609 986
610 bl __restore_sysregs 987 bl __restore_sysregs
611 bl __restore_fpsimd 988 bl __restore_fpsimd
989
990 skip_debug_state x3, 1f
991 // Clear the dirty flag for the next run, as all the state has
992 // already been saved. Note that we nuke the whole 64bit word.
993 // If we ever add more flags, we'll have to be more careful...
994 str xzr, [x0, #VCPU_DEBUG_FLAGS]
995 bl __restore_debug
9961:
612 restore_host_regs 997 restore_host_regs
613 998
614 mov x0, x1 999 mov x0, x1
@@ -653,6 +1038,12 @@ ENTRY(__kvm_flush_vm_context)
653 ret 1038 ret
654ENDPROC(__kvm_flush_vm_context) 1039ENDPROC(__kvm_flush_vm_context)
655 1040
1041 // struct vgic_sr_vectors __vgi_sr_vectors;
1042 .align 3
1043ENTRY(__vgic_sr_vectors)
1044 .skip VGIC_SR_VECTOR_SZ
1045ENDPROC(__vgic_sr_vectors)
1046
656__kvm_hyp_panic: 1047__kvm_hyp_panic:
657 // Guess the context by looking at VTTBR: 1048 // Guess the context by looking at VTTBR:
658 // If zero, then we're already a host. 1049 // If zero, then we're already a host.
@@ -830,7 +1221,7 @@ el1_trap:
830 mrs x2, far_el2 1221 mrs x2, far_el2
831 1222
8322: mrs x0, tpidr_el2 12232: mrs x0, tpidr_el2
833 str x1, [x0, #VCPU_ESR_EL2] 1224 str w1, [x0, #VCPU_ESR_EL2]
834 str x2, [x0, #VCPU_FAR_EL2] 1225 str x2, [x0, #VCPU_FAR_EL2]
835 str x3, [x0, #VCPU_HPFAR_EL2] 1226 str x3, [x0, #VCPU_HPFAR_EL2]
836 1227
@@ -880,7 +1271,4 @@ ENTRY(__kvm_hyp_vector)
880 ventry el1_error_invalid // Error 32-bit EL1 1271 ventry el1_error_invalid // Error 32-bit EL1
881ENDPROC(__kvm_hyp_vector) 1272ENDPROC(__kvm_hyp_vector)
882 1273
883__kvm_hyp_code_end:
884 .globl __kvm_hyp_code_end
885
886 .popsection 1274 .popsection
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c59a1bdab5eb..5805e7c4a4dd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -30,6 +30,7 @@
30#include <asm/kvm_mmu.h> 30#include <asm/kvm_mmu.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/cputype.h> 32#include <asm/cputype.h>
33#include <asm/debug-monitors.h>
33#include <trace/events/kvm.h> 34#include <trace/events/kvm.h>
34 35
35#include "sys_regs.h" 36#include "sys_regs.h"
@@ -137,10 +138,11 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
137 if (!p->is_aarch32) { 138 if (!p->is_aarch32) {
138 vcpu_sys_reg(vcpu, r->reg) = val; 139 vcpu_sys_reg(vcpu, r->reg) = val;
139 } else { 140 } else {
140 vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
141 if (!p->is_32bit) 141 if (!p->is_32bit)
142 vcpu_cp15(vcpu, r->reg + 1) = val >> 32; 142 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
143 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
143 } 144 }
145
144 return true; 146 return true;
145} 147}
146 148
@@ -163,18 +165,9 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
163 return true; 165 return true;
164} 166}
165 167
166/* 168static bool trap_raz_wi(struct kvm_vcpu *vcpu,
167 * We could trap ID_DFR0 and tell the guest we don't support performance 169 const struct sys_reg_params *p,
168 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 170 const struct sys_reg_desc *r)
169 * NAKed, so it will read the PMCR anyway.
170 *
171 * Therefore we tell the guest we have 0 counters. Unfortunately, we
172 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
173 * all PM registers, which doesn't crash the guest kernel at least.
174 */
175static bool pm_fake(struct kvm_vcpu *vcpu,
176 const struct sys_reg_params *p,
177 const struct sys_reg_desc *r)
178{ 171{
179 if (p->is_write) 172 if (p->is_write)
180 return ignore_write(vcpu, p); 173 return ignore_write(vcpu, p);
@@ -182,6 +175,73 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
182 return read_zero(vcpu, p); 175 return read_zero(vcpu, p);
183} 176}
184 177
178static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
179 const struct sys_reg_params *p,
180 const struct sys_reg_desc *r)
181{
182 if (p->is_write) {
183 return ignore_write(vcpu, p);
184 } else {
185 *vcpu_reg(vcpu, p->Rt) = (1 << 3);
186 return true;
187 }
188}
189
190static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
191 const struct sys_reg_params *p,
192 const struct sys_reg_desc *r)
193{
194 if (p->is_write) {
195 return ignore_write(vcpu, p);
196 } else {
197 u32 val;
198 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
199 *vcpu_reg(vcpu, p->Rt) = val;
200 return true;
201 }
202}
203
204/*
205 * We want to avoid world-switching all the DBG registers all the
206 * time:
207 *
208 * - If we've touched any debug register, it is likely that we're
209 * going to touch more of them. It then makes sense to disable the
210 * traps and start doing the save/restore dance
211 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
212 * then mandatory to save/restore the registers, as the guest
213 * depends on them.
214 *
215 * For this, we use a DIRTY bit, indicating the guest has modified the
216 * debug registers, used as follow:
217 *
218 * On guest entry:
219 * - If the dirty bit is set (because we're coming back from trapping),
220 * disable the traps, save host registers, restore guest registers.
221 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
222 * set the dirty bit, disable the traps, save host registers,
223 * restore guest registers.
224 * - Otherwise, enable the traps
225 *
226 * On guest exit:
227 * - If the dirty bit is set, save guest registers, restore host
228 * registers and clear the dirty bit. This ensure that the host can
229 * now use the debug registers.
230 */
231static bool trap_debug_regs(struct kvm_vcpu *vcpu,
232 const struct sys_reg_params *p,
233 const struct sys_reg_desc *r)
234{
235 if (p->is_write) {
236 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
237 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
238 } else {
239 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
240 }
241
242 return true;
243}
244
185static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 245static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
186{ 246{
187 u64 amair; 247 u64 amair;
@@ -198,9 +258,39 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
198 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); 258 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
199} 259}
200 260
261/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
262#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
263 /* DBGBVRn_EL1 */ \
264 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
265 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
266 /* DBGBCRn_EL1 */ \
267 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
268 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
269 /* DBGWVRn_EL1 */ \
270 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
271 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
272 /* DBGWCRn_EL1 */ \
273 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
274 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
275
201/* 276/*
202 * Architected system registers. 277 * Architected system registers.
203 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 278 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
279 *
280 * We could trap ID_DFR0 and tell the guest we don't support performance
281 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
282 * NAKed, so it will read the PMCR anyway.
283 *
284 * Therefore we tell the guest we have 0 counters. Unfortunately, we
285 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
286 * all PM registers, which doesn't crash the guest kernel at least.
287 *
288 * Debug handling: We do trap most, if not all debug related system
289 * registers. The implementation is good enough to ensure that a guest
290 * can use these with minimal performance degradation. The drawback is
291 * that we don't implement any of the external debug, none of the
292 * OSlock protocol. This should be revisited if we ever encounter a
293 * more demanding guest...
204 */ 294 */
205static const struct sys_reg_desc sys_reg_descs[] = { 295static const struct sys_reg_desc sys_reg_descs[] = {
206 /* DC ISW */ 296 /* DC ISW */
@@ -213,12 +303,71 @@ static const struct sys_reg_desc sys_reg_descs[] = {
213 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 303 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
214 access_dcsw }, 304 access_dcsw },
215 305
306 DBG_BCR_BVR_WCR_WVR_EL1(0),
307 DBG_BCR_BVR_WCR_WVR_EL1(1),
308 /* MDCCINT_EL1 */
309 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
310 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
311 /* MDSCR_EL1 */
312 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
313 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
314 DBG_BCR_BVR_WCR_WVR_EL1(2),
315 DBG_BCR_BVR_WCR_WVR_EL1(3),
316 DBG_BCR_BVR_WCR_WVR_EL1(4),
317 DBG_BCR_BVR_WCR_WVR_EL1(5),
318 DBG_BCR_BVR_WCR_WVR_EL1(6),
319 DBG_BCR_BVR_WCR_WVR_EL1(7),
320 DBG_BCR_BVR_WCR_WVR_EL1(8),
321 DBG_BCR_BVR_WCR_WVR_EL1(9),
322 DBG_BCR_BVR_WCR_WVR_EL1(10),
323 DBG_BCR_BVR_WCR_WVR_EL1(11),
324 DBG_BCR_BVR_WCR_WVR_EL1(12),
325 DBG_BCR_BVR_WCR_WVR_EL1(13),
326 DBG_BCR_BVR_WCR_WVR_EL1(14),
327 DBG_BCR_BVR_WCR_WVR_EL1(15),
328
329 /* MDRAR_EL1 */
330 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
331 trap_raz_wi },
332 /* OSLAR_EL1 */
333 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
334 trap_raz_wi },
335 /* OSLSR_EL1 */
336 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
337 trap_oslsr_el1 },
338 /* OSDLR_EL1 */
339 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
340 trap_raz_wi },
341 /* DBGPRCR_EL1 */
342 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
343 trap_raz_wi },
344 /* DBGCLAIMSET_EL1 */
345 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
346 trap_raz_wi },
347 /* DBGCLAIMCLR_EL1 */
348 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
349 trap_raz_wi },
350 /* DBGAUTHSTATUS_EL1 */
351 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
352 trap_dbgauthstatus_el1 },
353
216 /* TEECR32_EL1 */ 354 /* TEECR32_EL1 */
217 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 355 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
218 NULL, reset_val, TEECR32_EL1, 0 }, 356 NULL, reset_val, TEECR32_EL1, 0 },
219 /* TEEHBR32_EL1 */ 357 /* TEEHBR32_EL1 */
220 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), 358 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
221 NULL, reset_val, TEEHBR32_EL1, 0 }, 359 NULL, reset_val, TEEHBR32_EL1, 0 },
360
361 /* MDCCSR_EL1 */
362 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
363 trap_raz_wi },
364 /* DBGDTR_EL0 */
365 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
366 trap_raz_wi },
367 /* DBGDTR[TR]X_EL0 */
368 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
369 trap_raz_wi },
370
222 /* DBGVCR32_EL2 */ 371 /* DBGVCR32_EL2 */
223 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), 372 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
224 NULL, reset_val, DBGVCR32_EL2, 0 }, 373 NULL, reset_val, DBGVCR32_EL2, 0 },
@@ -260,10 +409,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
260 409
261 /* PMINTENSET_EL1 */ 410 /* PMINTENSET_EL1 */
262 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 411 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
263 pm_fake }, 412 trap_raz_wi },
264 /* PMINTENCLR_EL1 */ 413 /* PMINTENCLR_EL1 */
265 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), 414 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
266 pm_fake }, 415 trap_raz_wi },
267 416
268 /* MAIR_EL1 */ 417 /* MAIR_EL1 */
269 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), 418 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -292,43 +441,43 @@ static const struct sys_reg_desc sys_reg_descs[] = {
292 441
293 /* PMCR_EL0 */ 442 /* PMCR_EL0 */
294 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), 443 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
295 pm_fake }, 444 trap_raz_wi },
296 /* PMCNTENSET_EL0 */ 445 /* PMCNTENSET_EL0 */
297 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), 446 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
298 pm_fake }, 447 trap_raz_wi },
299 /* PMCNTENCLR_EL0 */ 448 /* PMCNTENCLR_EL0 */
300 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), 449 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
301 pm_fake }, 450 trap_raz_wi },
302 /* PMOVSCLR_EL0 */ 451 /* PMOVSCLR_EL0 */
303 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), 452 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
304 pm_fake }, 453 trap_raz_wi },
305 /* PMSWINC_EL0 */ 454 /* PMSWINC_EL0 */
306 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), 455 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
307 pm_fake }, 456 trap_raz_wi },
308 /* PMSELR_EL0 */ 457 /* PMSELR_EL0 */
309 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), 458 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
310 pm_fake }, 459 trap_raz_wi },
311 /* PMCEID0_EL0 */ 460 /* PMCEID0_EL0 */
312 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), 461 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
313 pm_fake }, 462 trap_raz_wi },
314 /* PMCEID1_EL0 */ 463 /* PMCEID1_EL0 */
315 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), 464 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
316 pm_fake }, 465 trap_raz_wi },
317 /* PMCCNTR_EL0 */ 466 /* PMCCNTR_EL0 */
318 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), 467 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
319 pm_fake }, 468 trap_raz_wi },
320 /* PMXEVTYPER_EL0 */ 469 /* PMXEVTYPER_EL0 */
321 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), 470 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
322 pm_fake }, 471 trap_raz_wi },
323 /* PMXEVCNTR_EL0 */ 472 /* PMXEVCNTR_EL0 */
324 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), 473 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
325 pm_fake }, 474 trap_raz_wi },
326 /* PMUSERENR_EL0 */ 475 /* PMUSERENR_EL0 */
327 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), 476 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
328 pm_fake }, 477 trap_raz_wi },
329 /* PMOVSSET_EL0 */ 478 /* PMOVSSET_EL0 */
330 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), 479 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
331 pm_fake }, 480 trap_raz_wi },
332 481
333 /* TPIDR_EL0 */ 482 /* TPIDR_EL0 */
334 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), 483 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -348,13 +497,161 @@ static const struct sys_reg_desc sys_reg_descs[] = {
348 NULL, reset_val, FPEXC32_EL2, 0x70 }, 497 NULL, reset_val, FPEXC32_EL2, 0x70 },
349}; 498};
350 499
500static bool trap_dbgidr(struct kvm_vcpu *vcpu,
501 const struct sys_reg_params *p,
502 const struct sys_reg_desc *r)
503{
504 if (p->is_write) {
505 return ignore_write(vcpu, p);
506 } else {
507 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
508 u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
509 u32 el3 = !!((pfr >> 12) & 0xf);
510
511 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
512 (((dfr >> 12) & 0xf) << 24) |
513 (((dfr >> 28) & 0xf) << 20) |
514 (6 << 16) | (el3 << 14) | (el3 << 12));
515 return true;
516 }
517}
518
519static bool trap_debug32(struct kvm_vcpu *vcpu,
520 const struct sys_reg_params *p,
521 const struct sys_reg_desc *r)
522{
523 if (p->is_write) {
524 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
525 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
526 } else {
527 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
528 }
529
530 return true;
531}
532
533#define DBG_BCR_BVR_WCR_WVR(n) \
534 /* DBGBVRn */ \
535 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
536 NULL, (cp14_DBGBVR0 + (n) * 2) }, \
537 /* DBGBCRn */ \
538 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
539 NULL, (cp14_DBGBCR0 + (n) * 2) }, \
540 /* DBGWVRn */ \
541 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
542 NULL, (cp14_DBGWVR0 + (n) * 2) }, \
543 /* DBGWCRn */ \
544 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
545 NULL, (cp14_DBGWCR0 + (n) * 2) }
546
547#define DBGBXVR(n) \
548 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
549 NULL, cp14_DBGBXVR0 + n * 2 }
550
551/*
552 * Trapped cp14 registers. We generally ignore most of the external
553 * debug, on the principle that they don't really make sense to a
554 * guest. Revisit this one day, whould this principle change.
555 */
556static const struct sys_reg_desc cp14_regs[] = {
557 /* DBGIDR */
558 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
559 /* DBGDTRRXext */
560 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
561
562 DBG_BCR_BVR_WCR_WVR(0),
563 /* DBGDSCRint */
564 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
565 DBG_BCR_BVR_WCR_WVR(1),
566 /* DBGDCCINT */
567 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
568 /* DBGDSCRext */
569 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
570 DBG_BCR_BVR_WCR_WVR(2),
571 /* DBGDTR[RT]Xint */
572 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
573 /* DBGDTR[RT]Xext */
574 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
575 DBG_BCR_BVR_WCR_WVR(3),
576 DBG_BCR_BVR_WCR_WVR(4),
577 DBG_BCR_BVR_WCR_WVR(5),
578 /* DBGWFAR */
579 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
580 /* DBGOSECCR */
581 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
582 DBG_BCR_BVR_WCR_WVR(6),
583 /* DBGVCR */
584 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
585 DBG_BCR_BVR_WCR_WVR(7),
586 DBG_BCR_BVR_WCR_WVR(8),
587 DBG_BCR_BVR_WCR_WVR(9),
588 DBG_BCR_BVR_WCR_WVR(10),
589 DBG_BCR_BVR_WCR_WVR(11),
590 DBG_BCR_BVR_WCR_WVR(12),
591 DBG_BCR_BVR_WCR_WVR(13),
592 DBG_BCR_BVR_WCR_WVR(14),
593 DBG_BCR_BVR_WCR_WVR(15),
594
595 /* DBGDRAR (32bit) */
596 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
597
598 DBGBXVR(0),
599 /* DBGOSLAR */
600 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
601 DBGBXVR(1),
602 /* DBGOSLSR */
603 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
604 DBGBXVR(2),
605 DBGBXVR(3),
606 /* DBGOSDLR */
607 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
608 DBGBXVR(4),
609 /* DBGPRCR */
610 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
611 DBGBXVR(5),
612 DBGBXVR(6),
613 DBGBXVR(7),
614 DBGBXVR(8),
615 DBGBXVR(9),
616 DBGBXVR(10),
617 DBGBXVR(11),
618 DBGBXVR(12),
619 DBGBXVR(13),
620 DBGBXVR(14),
621 DBGBXVR(15),
622
623 /* DBGDSAR (32bit) */
624 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
625
626 /* DBGDEVID2 */
627 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
628 /* DBGDEVID1 */
629 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
630 /* DBGDEVID */
631 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
632 /* DBGCLAIMSET */
633 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
634 /* DBGCLAIMCLR */
635 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
636 /* DBGAUTHSTATUS */
637 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
638};
639
640/* Trapped cp14 64bit registers */
641static const struct sys_reg_desc cp14_64_regs[] = {
642 /* DBGDRAR (64bit) */
643 { Op1( 0), CRm( 1), .access = trap_raz_wi },
644
645 /* DBGDSAR (64bit) */
646 { Op1( 0), CRm( 2), .access = trap_raz_wi },
647};
648
351/* 649/*
352 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 650 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
353 * depending on the way they are accessed (as a 32bit or a 64bit 651 * depending on the way they are accessed (as a 32bit or a 64bit
354 * register). 652 * register).
355 */ 653 */
356static const struct sys_reg_desc cp15_regs[] = { 654static const struct sys_reg_desc cp15_regs[] = {
357 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
358 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 655 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
359 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 656 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
360 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 657 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -374,26 +671,30 @@ static const struct sys_reg_desc cp15_regs[] = {
374 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 671 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
375 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 672 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
376 673
377 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, 674 /* PMU */
378 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, 675 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
379 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, 676 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
380 { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, 677 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
381 { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, 678 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
382 { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, 679 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
383 { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, 680 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
384 { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, 681 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
385 { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, 682 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
386 { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, 683 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
387 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, 684 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
388 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, 685 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
389 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, 686 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
687 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
390 688
391 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 689 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
392 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 690 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
393 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 691 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
394 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 692 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
395 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 693 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
694};
396 695
696static const struct sys_reg_desc cp15_64_regs[] = {
697 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
397 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 698 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
398}; 699};
399 700
@@ -454,26 +755,29 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
454 return 1; 755 return 1;
455} 756}
456 757
457int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 758/*
458{ 759 * emulate_cp -- tries to match a sys_reg access in a handling table, and
459 kvm_inject_undefined(vcpu); 760 * call the corresponding trap handler.
460 return 1; 761 *
461} 762 * @params: pointer to the descriptor of the access
462 763 * @table: array of trap descriptors
463static void emulate_cp15(struct kvm_vcpu *vcpu, 764 * @num: size of the trap descriptor array
464 const struct sys_reg_params *params) 765 *
766 * Return 0 if the access has been handled, and -1 if not.
767 */
768static int emulate_cp(struct kvm_vcpu *vcpu,
769 const struct sys_reg_params *params,
770 const struct sys_reg_desc *table,
771 size_t num)
465{ 772{
466 size_t num; 773 const struct sys_reg_desc *r;
467 const struct sys_reg_desc *table, *r;
468 774
469 table = get_target_table(vcpu->arch.target, false, &num); 775 if (!table)
776 return -1; /* Not handled */
470 777
471 /* Search target-specific then generic table. */
472 r = find_reg(params, table, num); 778 r = find_reg(params, table, num);
473 if (!r)
474 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
475 779
476 if (likely(r)) { 780 if (r) {
477 /* 781 /*
478 * Not having an accessor means that we have 782 * Not having an accessor means that we have
479 * configured a trap that we don't know how to 783 * configured a trap that we don't know how to
@@ -485,22 +789,51 @@ static void emulate_cp15(struct kvm_vcpu *vcpu,
485 if (likely(r->access(vcpu, params, r))) { 789 if (likely(r->access(vcpu, params, r))) {
486 /* Skip instruction, since it was emulated */ 790 /* Skip instruction, since it was emulated */
487 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 791 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
488 return;
489 } 792 }
490 /* If access function fails, it should complain. */ 793
794 /* Handled */
795 return 0;
491 } 796 }
492 797
493 kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); 798 /* Not handled */
799 return -1;
800}
801
802static void unhandled_cp_access(struct kvm_vcpu *vcpu,
803 struct sys_reg_params *params)
804{
805 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
806 int cp;
807
808 switch(hsr_ec) {
809 case ESR_EL2_EC_CP15_32:
810 case ESR_EL2_EC_CP15_64:
811 cp = 15;
812 break;
813 case ESR_EL2_EC_CP14_MR:
814 case ESR_EL2_EC_CP14_64:
815 cp = 14;
816 break;
817 default:
818 WARN_ON((cp = -1));
819 }
820
821 kvm_err("Unsupported guest CP%d access at: %08lx\n",
822 cp, *vcpu_pc(vcpu));
494 print_sys_reg_instr(params); 823 print_sys_reg_instr(params);
495 kvm_inject_undefined(vcpu); 824 kvm_inject_undefined(vcpu);
496} 825}
497 826
498/** 827/**
499 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 828 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
500 * @vcpu: The VCPU pointer 829 * @vcpu: The VCPU pointer
501 * @run: The kvm_run struct 830 * @run: The kvm_run struct
502 */ 831 */
503int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 832static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
833 const struct sys_reg_desc *global,
834 size_t nr_global,
835 const struct sys_reg_desc *target_specific,
836 size_t nr_specific)
504{ 837{
505 struct sys_reg_params params; 838 struct sys_reg_params params;
506 u32 hsr = kvm_vcpu_get_hsr(vcpu); 839 u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -529,8 +862,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
529 *vcpu_reg(vcpu, params.Rt) = val; 862 *vcpu_reg(vcpu, params.Rt) = val;
530 } 863 }
531 864
532 emulate_cp15(vcpu, &params); 865 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
866 goto out;
867 if (!emulate_cp(vcpu, &params, global, nr_global))
868 goto out;
533 869
870 unhandled_cp_access(vcpu, &params);
871
872out:
534 /* Do the opposite hack for the read side */ 873 /* Do the opposite hack for the read side */
535 if (!params.is_write) { 874 if (!params.is_write) {
536 u64 val = *vcpu_reg(vcpu, params.Rt); 875 u64 val = *vcpu_reg(vcpu, params.Rt);
@@ -546,7 +885,11 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
546 * @vcpu: The VCPU pointer 885 * @vcpu: The VCPU pointer
547 * @run: The kvm_run struct 886 * @run: The kvm_run struct
548 */ 887 */
549int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 888static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
889 const struct sys_reg_desc *global,
890 size_t nr_global,
891 const struct sys_reg_desc *target_specific,
892 size_t nr_specific)
550{ 893{
551 struct sys_reg_params params; 894 struct sys_reg_params params;
552 u32 hsr = kvm_vcpu_get_hsr(vcpu); 895 u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -561,10 +904,51 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
561 params.Op1 = (hsr >> 14) & 0x7; 904 params.Op1 = (hsr >> 14) & 0x7;
562 params.Op2 = (hsr >> 17) & 0x7; 905 params.Op2 = (hsr >> 17) & 0x7;
563 906
564 emulate_cp15(vcpu, &params); 907 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
908 return 1;
909 if (!emulate_cp(vcpu, &params, global, nr_global))
910 return 1;
911
912 unhandled_cp_access(vcpu, &params);
565 return 1; 913 return 1;
566} 914}
567 915
916int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
917{
918 const struct sys_reg_desc *target_specific;
919 size_t num;
920
921 target_specific = get_target_table(vcpu->arch.target, false, &num);
922 return kvm_handle_cp_64(vcpu,
923 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
924 target_specific, num);
925}
926
927int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
928{
929 const struct sys_reg_desc *target_specific;
930 size_t num;
931
932 target_specific = get_target_table(vcpu->arch.target, false, &num);
933 return kvm_handle_cp_32(vcpu,
934 cp15_regs, ARRAY_SIZE(cp15_regs),
935 target_specific, num);
936}
937
938int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
939{
940 return kvm_handle_cp_64(vcpu,
941 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
942 NULL, 0);
943}
944
945int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
946{
947 return kvm_handle_cp_32(vcpu,
948 cp14_regs, ARRAY_SIZE(cp14_regs),
949 NULL, 0);
950}
951
568static int emulate_sys_reg(struct kvm_vcpu *vcpu, 952static int emulate_sys_reg(struct kvm_vcpu *vcpu,
569 const struct sys_reg_params *params) 953 const struct sys_reg_params *params)
570{ 954{
@@ -776,17 +1160,15 @@ static struct sys_reg_desc invariant_sys_regs[] = {
776 NULL, get_ctr_el0 }, 1160 NULL, get_ctr_el0 },
777}; 1161};
778 1162
779static int reg_from_user(void *val, const void __user *uaddr, u64 id) 1163static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
780{ 1164{
781 /* This Just Works because we are little endian. */
782 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 1165 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
783 return -EFAULT; 1166 return -EFAULT;
784 return 0; 1167 return 0;
785} 1168}
786 1169
787static int reg_to_user(void __user *uaddr, const void *val, u64 id) 1170static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
788{ 1171{
789 /* This Just Works because we are little endian. */
790 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 1172 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
791 return -EFAULT; 1173 return -EFAULT;
792 return 0; 1174 return 0;
@@ -962,7 +1344,7 @@ static unsigned int num_demux_regs(void)
962 1344
963static int write_demux_regids(u64 __user *uindices) 1345static int write_demux_regids(u64 __user *uindices)
964{ 1346{
965 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1347 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
966 unsigned int i; 1348 unsigned int i;
967 1349
968 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1350 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
@@ -1069,14 +1451,32 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1069 return write_demux_regids(uindices); 1451 return write_demux_regids(uindices);
1070} 1452}
1071 1453
1454static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
1455{
1456 unsigned int i;
1457
1458 for (i = 1; i < n; i++) {
1459 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
1460 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
1461 return 1;
1462 }
1463 }
1464
1465 return 0;
1466}
1467
1072void kvm_sys_reg_table_init(void) 1468void kvm_sys_reg_table_init(void)
1073{ 1469{
1074 unsigned int i; 1470 unsigned int i;
1075 struct sys_reg_desc clidr; 1471 struct sys_reg_desc clidr;
1076 1472
1077 /* Make sure tables are unique and in order. */ 1473 /* Make sure tables are unique and in order. */
1078 for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) 1474 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
1079 BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); 1475 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
1476 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
1477 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1478 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
1479 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
1080 1480
1081 /* We abuse the reset function to overwrite the table itself. */ 1481 /* We abuse the reset function to overwrite the table itself. */
1082 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 1482 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644
index 000000000000..ae211772f991
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -0,0 +1,133 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/kvm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
28
29 .text
30 .pushsection .hyp.text, "ax"
31
32/*
33 * Save the VGIC CPU state into memory
34 * x0: Register pointing to VCPU struct
35 * Do not corrupt x1!!!
36 */
37ENTRY(__save_vgic_v2_state)
38__save_vgic_v2_state:
39 /* Get VGIC VCTRL base into x2 */
40 ldr x2, [x0, #VCPU_KVM]
41 kern_hyp_va x2
42 ldr x2, [x2, #KVM_VGIC_VCTRL]
43 kern_hyp_va x2
44 cbz x2, 2f // disabled
45
46 /* Compute the address of struct vgic_cpu */
47 add x3, x0, #VCPU_VGIC_CPU
48
49 /* Save all interesting registers */
50 ldr w4, [x2, #GICH_HCR]
51 ldr w5, [x2, #GICH_VMCR]
52 ldr w6, [x2, #GICH_MISR]
53 ldr w7, [x2, #GICH_EISR0]
54 ldr w8, [x2, #GICH_EISR1]
55 ldr w9, [x2, #GICH_ELRSR0]
56 ldr w10, [x2, #GICH_ELRSR1]
57 ldr w11, [x2, #GICH_APR]
58CPU_BE( rev w4, w4 )
59CPU_BE( rev w5, w5 )
60CPU_BE( rev w6, w6 )
61CPU_BE( rev w7, w7 )
62CPU_BE( rev w8, w8 )
63CPU_BE( rev w9, w9 )
64CPU_BE( rev w10, w10 )
65CPU_BE( rev w11, w11 )
66
67 str w4, [x3, #VGIC_V2_CPU_HCR]
68 str w5, [x3, #VGIC_V2_CPU_VMCR]
69 str w6, [x3, #VGIC_V2_CPU_MISR]
70 str w7, [x3, #VGIC_V2_CPU_EISR]
71 str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
72 str w9, [x3, #VGIC_V2_CPU_ELRSR]
73 str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
74 str w11, [x3, #VGIC_V2_CPU_APR]
75
76 /* Clear GICH_HCR */
77 str wzr, [x2, #GICH_HCR]
78
79 /* Save list registers */
80 add x2, x2, #GICH_LR0
81 ldr w4, [x3, #VGIC_CPU_NR_LR]
82 add x3, x3, #VGIC_V2_CPU_LR
831: ldr w5, [x2], #4
84CPU_BE( rev w5, w5 )
85 str w5, [x3], #4
86 sub w4, w4, #1
87 cbnz w4, 1b
882:
89 ret
90ENDPROC(__save_vgic_v2_state)
91
92/*
93 * Restore the VGIC CPU state from memory
94 * x0: Register pointing to VCPU struct
95 */
96ENTRY(__restore_vgic_v2_state)
97__restore_vgic_v2_state:
98 /* Get VGIC VCTRL base into x2 */
99 ldr x2, [x0, #VCPU_KVM]
100 kern_hyp_va x2
101 ldr x2, [x2, #KVM_VGIC_VCTRL]
102 kern_hyp_va x2
103 cbz x2, 2f // disabled
104
105 /* Compute the address of struct vgic_cpu */
106 add x3, x0, #VCPU_VGIC_CPU
107
108 /* We only restore a minimal set of registers */
109 ldr w4, [x3, #VGIC_V2_CPU_HCR]
110 ldr w5, [x3, #VGIC_V2_CPU_VMCR]
111 ldr w6, [x3, #VGIC_V2_CPU_APR]
112CPU_BE( rev w4, w4 )
113CPU_BE( rev w5, w5 )
114CPU_BE( rev w6, w6 )
115
116 str w4, [x2, #GICH_HCR]
117 str w5, [x2, #GICH_VMCR]
118 str w6, [x2, #GICH_APR]
119
120 /* Restore list registers */
121 add x2, x2, #GICH_LR0
122 ldr w4, [x3, #VGIC_CPU_NR_LR]
123 add x3, x3, #VGIC_V2_CPU_LR
1241: ldr w5, [x3], #4
125CPU_BE( rev w5, w5 )
126 str w5, [x2], #4
127 sub w4, w4, #1
128 cbnz w4, 1b
1292:
130 ret
131ENDPROC(__restore_vgic_v2_state)
132
133 .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644
index 000000000000..d16046999e06
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -0,0 +1,267 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic-v3.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/kvm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27
28 .text
29 .pushsection .hyp.text, "ax"
30
31/*
32 * We store LRs in reverse order to let the CPU deal with streaming
33 * access. Use this macro to make it look saner...
34 */
35#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
36
37/*
38 * Save the VGIC CPU state into memory
39 * x0: Register pointing to VCPU struct
40 * Do not corrupt x1!!!
41 */
42.macro save_vgic_v3_state
43 // Compute the address of struct vgic_cpu
44 add x3, x0, #VCPU_VGIC_CPU
45
46 // Make sure stores to the GIC via the memory mapped interface
47 // are now visible to the system register interface
48 dsb st
49
50 // Save all interesting registers
51 mrs_s x4, ICH_HCR_EL2
52 mrs_s x5, ICH_VMCR_EL2
53 mrs_s x6, ICH_MISR_EL2
54 mrs_s x7, ICH_EISR_EL2
55 mrs_s x8, ICH_ELSR_EL2
56
57 str w4, [x3, #VGIC_V3_CPU_HCR]
58 str w5, [x3, #VGIC_V3_CPU_VMCR]
59 str w6, [x3, #VGIC_V3_CPU_MISR]
60 str w7, [x3, #VGIC_V3_CPU_EISR]
61 str w8, [x3, #VGIC_V3_CPU_ELRSR]
62
63 msr_s ICH_HCR_EL2, xzr
64
65 mrs_s x21, ICH_VTR_EL2
66 mvn w22, w21
67 ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
68
69 adr x24, 1f
70 add x24, x24, x23
71 br x24
72
731:
74 mrs_s x20, ICH_LR15_EL2
75 mrs_s x19, ICH_LR14_EL2
76 mrs_s x18, ICH_LR13_EL2
77 mrs_s x17, ICH_LR12_EL2
78 mrs_s x16, ICH_LR11_EL2
79 mrs_s x15, ICH_LR10_EL2
80 mrs_s x14, ICH_LR9_EL2
81 mrs_s x13, ICH_LR8_EL2
82 mrs_s x12, ICH_LR7_EL2
83 mrs_s x11, ICH_LR6_EL2
84 mrs_s x10, ICH_LR5_EL2
85 mrs_s x9, ICH_LR4_EL2
86 mrs_s x8, ICH_LR3_EL2
87 mrs_s x7, ICH_LR2_EL2
88 mrs_s x6, ICH_LR1_EL2
89 mrs_s x5, ICH_LR0_EL2
90
91 adr x24, 1f
92 add x24, x24, x23
93 br x24
94
951:
96 str x20, [x3, #LR_OFFSET(15)]
97 str x19, [x3, #LR_OFFSET(14)]
98 str x18, [x3, #LR_OFFSET(13)]
99 str x17, [x3, #LR_OFFSET(12)]
100 str x16, [x3, #LR_OFFSET(11)]
101 str x15, [x3, #LR_OFFSET(10)]
102 str x14, [x3, #LR_OFFSET(9)]
103 str x13, [x3, #LR_OFFSET(8)]
104 str x12, [x3, #LR_OFFSET(7)]
105 str x11, [x3, #LR_OFFSET(6)]
106 str x10, [x3, #LR_OFFSET(5)]
107 str x9, [x3, #LR_OFFSET(4)]
108 str x8, [x3, #LR_OFFSET(3)]
109 str x7, [x3, #LR_OFFSET(2)]
110 str x6, [x3, #LR_OFFSET(1)]
111 str x5, [x3, #LR_OFFSET(0)]
112
113 tbnz w21, #29, 6f // 6 bits
114 tbz w21, #30, 5f // 5 bits
115 // 7 bits
116 mrs_s x20, ICH_AP0R3_EL2
117 str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
118 mrs_s x19, ICH_AP0R2_EL2
119 str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
1206: mrs_s x18, ICH_AP0R1_EL2
121 str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
1225: mrs_s x17, ICH_AP0R0_EL2
123 str w17, [x3, #VGIC_V3_CPU_AP0R]
124
125 tbnz w21, #29, 6f // 6 bits
126 tbz w21, #30, 5f // 5 bits
127 // 7 bits
128 mrs_s x20, ICH_AP1R3_EL2
129 str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
130 mrs_s x19, ICH_AP1R2_EL2
131 str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
1326: mrs_s x18, ICH_AP1R1_EL2
133 str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
1345: mrs_s x17, ICH_AP1R0_EL2
135 str w17, [x3, #VGIC_V3_CPU_AP1R]
136
137 // Restore SRE_EL1 access and re-enable SRE at EL1.
138 mrs_s x5, ICC_SRE_EL2
139 orr x5, x5, #ICC_SRE_EL2_ENABLE
140 msr_s ICC_SRE_EL2, x5
141 isb
142 mov x5, #1
143 msr_s ICC_SRE_EL1, x5
144.endm
145
146/*
147 * Restore the VGIC CPU state from memory
148 * x0: Register pointing to VCPU struct
149 */
150.macro restore_vgic_v3_state
151 // Disable SRE_EL1 access. Necessary, otherwise
152 // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
153 msr_s ICC_SRE_EL1, xzr
154 isb
155
156 // Compute the address of struct vgic_cpu
157 add x3, x0, #VCPU_VGIC_CPU
158
159 // Restore all interesting registers
160 ldr w4, [x3, #VGIC_V3_CPU_HCR]
161 ldr w5, [x3, #VGIC_V3_CPU_VMCR]
162
163 msr_s ICH_HCR_EL2, x4
164 msr_s ICH_VMCR_EL2, x5
165
166 mrs_s x21, ICH_VTR_EL2
167
168 tbnz w21, #29, 6f // 6 bits
169 tbz w21, #30, 5f // 5 bits
170 // 7 bits
171 ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
172 msr_s ICH_AP1R3_EL2, x20
173 ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
174 msr_s ICH_AP1R2_EL2, x19
1756: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
176 msr_s ICH_AP1R1_EL2, x18
1775: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
178 msr_s ICH_AP1R0_EL2, x17
179
180 tbnz w21, #29, 6f // 6 bits
181 tbz w21, #30, 5f // 5 bits
182 // 7 bits
183 ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
184 msr_s ICH_AP0R3_EL2, x20
185 ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
186 msr_s ICH_AP0R2_EL2, x19
1876: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
188 msr_s ICH_AP0R1_EL2, x18
1895: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
190 msr_s ICH_AP0R0_EL2, x17
191
192 and w22, w21, #0xf
193 mvn w22, w21
194 ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
195
196 adr x24, 1f
197 add x24, x24, x23
198 br x24
199
2001:
201 ldr x20, [x3, #LR_OFFSET(15)]
202 ldr x19, [x3, #LR_OFFSET(14)]
203 ldr x18, [x3, #LR_OFFSET(13)]
204 ldr x17, [x3, #LR_OFFSET(12)]
205 ldr x16, [x3, #LR_OFFSET(11)]
206 ldr x15, [x3, #LR_OFFSET(10)]
207 ldr x14, [x3, #LR_OFFSET(9)]
208 ldr x13, [x3, #LR_OFFSET(8)]
209 ldr x12, [x3, #LR_OFFSET(7)]
210 ldr x11, [x3, #LR_OFFSET(6)]
211 ldr x10, [x3, #LR_OFFSET(5)]
212 ldr x9, [x3, #LR_OFFSET(4)]
213 ldr x8, [x3, #LR_OFFSET(3)]
214 ldr x7, [x3, #LR_OFFSET(2)]
215 ldr x6, [x3, #LR_OFFSET(1)]
216 ldr x5, [x3, #LR_OFFSET(0)]
217
218 adr x24, 1f
219 add x24, x24, x23
220 br x24
221
2221:
223 msr_s ICH_LR15_EL2, x20
224 msr_s ICH_LR14_EL2, x19
225 msr_s ICH_LR13_EL2, x18
226 msr_s ICH_LR12_EL2, x17
227 msr_s ICH_LR11_EL2, x16
228 msr_s ICH_LR10_EL2, x15
229 msr_s ICH_LR9_EL2, x14
230 msr_s ICH_LR8_EL2, x13
231 msr_s ICH_LR7_EL2, x12
232 msr_s ICH_LR6_EL2, x11
233 msr_s ICH_LR5_EL2, x10
234 msr_s ICH_LR4_EL2, x9
235 msr_s ICH_LR3_EL2, x8
236 msr_s ICH_LR2_EL2, x7
237 msr_s ICH_LR1_EL2, x6
238 msr_s ICH_LR0_EL2, x5
239
240 // Ensure that the above will have reached the
241 // (re)distributors. This ensure the guest will read
242 // the correct values from the memory-mapped interface.
243 isb
244 dsb sy
245
246 // Prevent the guest from touching the GIC system registers
247 mrs_s x5, ICC_SRE_EL2
248 and x5, x5, #~ICC_SRE_EL2_ENABLE
249 msr_s ICC_SRE_EL2, x5
250.endm
251
252ENTRY(__save_vgic_v3_state)
253 save_vgic_v3_state
254 ret
255ENDPROC(__save_vgic_v3_state)
256
257ENTRY(__restore_vgic_v3_state)
258 restore_vgic_v3_state
259 ret
260ENDPROC(__restore_vgic_v3_state)
261
262ENTRY(__vgic_v3_get_ich_vtr_el2)
263 mrs_s x0, ICH_VTR_EL2
264 ret
265ENDPROC(__vgic_v3_get_ich_vtr_el2)
266
267 .popsection
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 6d9aeddc09bf..ad9db6045b2f 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -67,6 +67,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
67void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); 67void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
68void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); 68void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
69void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); 69void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
70
71u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
72int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
73
70#else 74#else
71static inline int kvm_timer_hyp_init(void) 75static inline int kvm_timer_hyp_init(void)
72{ 76{
@@ -84,6 +88,16 @@ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
84static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} 88static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
85static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} 89static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
86static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} 90static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
91
92static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
93{
94 return 0;
95}
96
97static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
98{
99 return 0;
100}
87#endif 101#endif
88 102
89#endif 103#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f27000f55a83..35b0c121bb65 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -24,7 +24,6 @@
24#include <linux/irqreturn.h> 24#include <linux/irqreturn.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/irqchip/arm-gic.h>
28 27
29#define VGIC_NR_IRQS 256 28#define VGIC_NR_IRQS 256
30#define VGIC_NR_SGIS 16 29#define VGIC_NR_SGIS 16
@@ -32,7 +31,9 @@
32#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) 31#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
33#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) 32#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
34#define VGIC_MAX_CPUS KVM_MAX_VCPUS 33#define VGIC_MAX_CPUS KVM_MAX_VCPUS
35#define VGIC_MAX_LRS (1 << 6) 34
35#define VGIC_V2_MAX_LRS (1 << 6)
36#define VGIC_V3_MAX_LRS 16
36 37
37/* Sanity checks... */ 38/* Sanity checks... */
38#if (VGIC_MAX_CPUS > 8) 39#if (VGIC_MAX_CPUS > 8)
@@ -68,9 +69,62 @@ struct vgic_bytemap {
68 u32 shared[VGIC_NR_SHARED_IRQS / 4]; 69 u32 shared[VGIC_NR_SHARED_IRQS / 4];
69}; 70};
70 71
72struct kvm_vcpu;
73
74enum vgic_type {
75 VGIC_V2, /* Good ol' GICv2 */
76 VGIC_V3, /* New fancy GICv3 */
77};
78
79#define LR_STATE_PENDING (1 << 0)
80#define LR_STATE_ACTIVE (1 << 1)
81#define LR_STATE_MASK (3 << 0)
82#define LR_EOI_INT (1 << 2)
83
84struct vgic_lr {
85 u16 irq;
86 u8 source;
87 u8 state;
88};
89
90struct vgic_vmcr {
91 u32 ctlr;
92 u32 abpr;
93 u32 bpr;
94 u32 pmr;
95};
96
97struct vgic_ops {
98 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
99 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
100 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
101 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
102 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
103 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
104 void (*enable_underflow)(struct kvm_vcpu *vcpu);
105 void (*disable_underflow)(struct kvm_vcpu *vcpu);
106 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
107 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
108 void (*enable)(struct kvm_vcpu *vcpu);
109};
110
111struct vgic_params {
112 /* vgic type */
113 enum vgic_type type;
114 /* Physical address of vgic virtual cpu interface */
115 phys_addr_t vcpu_base;
116 /* Number of list registers */
117 u32 nr_lr;
118 /* Interrupt number */
119 unsigned int maint_irq;
120 /* Virtual control interface base address */
121 void __iomem *vctrl_base;
122};
123
71struct vgic_dist { 124struct vgic_dist {
72#ifdef CONFIG_KVM_ARM_VGIC 125#ifdef CONFIG_KVM_ARM_VGIC
73 spinlock_t lock; 126 spinlock_t lock;
127 bool in_kernel;
74 bool ready; 128 bool ready;
75 129
76 /* Virtual control interface mapping */ 130 /* Virtual control interface mapping */
@@ -110,6 +164,29 @@ struct vgic_dist {
110#endif 164#endif
111}; 165};
112 166
167struct vgic_v2_cpu_if {
168 u32 vgic_hcr;
169 u32 vgic_vmcr;
170 u32 vgic_misr; /* Saved only */
171 u32 vgic_eisr[2]; /* Saved only */
172 u32 vgic_elrsr[2]; /* Saved only */
173 u32 vgic_apr;
174 u32 vgic_lr[VGIC_V2_MAX_LRS];
175};
176
177struct vgic_v3_cpu_if {
178#ifdef CONFIG_ARM_GIC_V3
179 u32 vgic_hcr;
180 u32 vgic_vmcr;
181 u32 vgic_misr; /* Saved only */
182 u32 vgic_eisr; /* Saved only */
183 u32 vgic_elrsr; /* Saved only */
184 u32 vgic_ap0r[4];
185 u32 vgic_ap1r[4];
186 u64 vgic_lr[VGIC_V3_MAX_LRS];
187#endif
188};
189
113struct vgic_cpu { 190struct vgic_cpu {
114#ifdef CONFIG_KVM_ARM_VGIC 191#ifdef CONFIG_KVM_ARM_VGIC
115 /* per IRQ to LR mapping */ 192 /* per IRQ to LR mapping */
@@ -120,24 +197,24 @@ struct vgic_cpu {
120 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); 197 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
121 198
122 /* Bitmap of used/free list registers */ 199 /* Bitmap of used/free list registers */
123 DECLARE_BITMAP( lr_used, VGIC_MAX_LRS); 200 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
124 201
125 /* Number of list registers on this CPU */ 202 /* Number of list registers on this CPU */
126 int nr_lr; 203 int nr_lr;
127 204
128 /* CPU vif control registers for world switch */ 205 /* CPU vif control registers for world switch */
129 u32 vgic_hcr; 206 union {
130 u32 vgic_vmcr; 207 struct vgic_v2_cpu_if vgic_v2;
131 u32 vgic_misr; /* Saved only */ 208 struct vgic_v3_cpu_if vgic_v3;
132 u32 vgic_eisr[2]; /* Saved only */ 209 };
133 u32 vgic_elrsr[2]; /* Saved only */
134 u32 vgic_apr;
135 u32 vgic_lr[VGIC_MAX_LRS];
136#endif 210#endif
137}; 211};
138 212
139#define LR_EMPTY 0xff 213#define LR_EMPTY 0xff
140 214
215#define INT_STATUS_EOI (1 << 0)
216#define INT_STATUS_UNDERFLOW (1 << 1)
217
141struct kvm; 218struct kvm;
142struct kvm_vcpu; 219struct kvm_vcpu;
143struct kvm_run; 220struct kvm_run;
@@ -157,9 +234,25 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
157bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 234bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
158 struct kvm_exit_mmio *mmio); 235 struct kvm_exit_mmio *mmio);
159 236
160#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base)) 237#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
161#define vgic_initialized(k) ((k)->arch.vgic.ready) 238#define vgic_initialized(k) ((k)->arch.vgic.ready)
162 239
240int vgic_v2_probe(struct device_node *vgic_node,
241 const struct vgic_ops **ops,
242 const struct vgic_params **params);
243#ifdef CONFIG_ARM_GIC_V3
244int vgic_v3_probe(struct device_node *vgic_node,
245 const struct vgic_ops **ops,
246 const struct vgic_params **params);
247#else
248static inline int vgic_v3_probe(struct device_node *vgic_node,
249 const struct vgic_ops **ops,
250 const struct vgic_params **params)
251{
252 return -ENODEV;
253}
254#endif
255
163#else 256#else
164static inline int kvm_vgic_hyp_init(void) 257static inline int kvm_vgic_hyp_init(void)
165{ 258{
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
new file mode 100644
index 000000000000..01124ef3690a
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26
27#include <linux/irqchip/arm-gic.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
34{
35 struct vgic_lr lr_desc;
36 u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
37
38 lr_desc.irq = val & GICH_LR_VIRTUALID;
39 if (lr_desc.irq <= 15)
40 lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
41 else
42 lr_desc.source = 0;
43 lr_desc.state = 0;
44
45 if (val & GICH_LR_PENDING_BIT)
46 lr_desc.state |= LR_STATE_PENDING;
47 if (val & GICH_LR_ACTIVE_BIT)
48 lr_desc.state |= LR_STATE_ACTIVE;
49 if (val & GICH_LR_EOI)
50 lr_desc.state |= LR_EOI_INT;
51
52 return lr_desc;
53}
54
55static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
56 struct vgic_lr lr_desc)
57{
58 u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
59
60 if (lr_desc.state & LR_STATE_PENDING)
61 lr_val |= GICH_LR_PENDING_BIT;
62 if (lr_desc.state & LR_STATE_ACTIVE)
63 lr_val |= GICH_LR_ACTIVE_BIT;
64 if (lr_desc.state & LR_EOI_INT)
65 lr_val |= GICH_LR_EOI;
66
67 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
68}
69
70static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
71 struct vgic_lr lr_desc)
72{
73 if (!(lr_desc.state & LR_STATE_MASK))
74 set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
75}
76
77static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
78{
79 u64 val;
80
81#if BITS_PER_LONG == 64
82 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
83 val <<= 32;
84 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
85#else
86 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
87#endif
88 return val;
89}
90
91static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
92{
93 u64 val;
94
95#if BITS_PER_LONG == 64
96 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
97 val <<= 32;
98 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
99#else
100 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
101#endif
102 return val;
103}
104
105static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
106{
107 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
108 u32 ret = 0;
109
110 if (misr & GICH_MISR_EOI)
111 ret |= INT_STATUS_EOI;
112 if (misr & GICH_MISR_U)
113 ret |= INT_STATUS_UNDERFLOW;
114
115 return ret;
116}
117
118static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu)
119{
120 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
121}
122
123static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu)
124{
125 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
126}
127
128static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
129{
130 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
131
132 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
133 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
134 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
135 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
136}
137
138static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
139{
140 u32 vmcr;
141
142 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
143 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
144 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
145 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
146
147 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
148}
149
150static void vgic_v2_enable(struct kvm_vcpu *vcpu)
151{
152 /*
153 * By forcing VMCR to zero, the GIC will restore the binary
154 * points to their reset values. Anything else resets to zero
155 * anyway.
156 */
157 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
158
159 /* Get the show on the road... */
160 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
161}
162
163static const struct vgic_ops vgic_v2_ops = {
164 .get_lr = vgic_v2_get_lr,
165 .set_lr = vgic_v2_set_lr,
166 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
167 .get_elrsr = vgic_v2_get_elrsr,
168 .get_eisr = vgic_v2_get_eisr,
169 .get_interrupt_status = vgic_v2_get_interrupt_status,
170 .enable_underflow = vgic_v2_enable_underflow,
171 .disable_underflow = vgic_v2_disable_underflow,
172 .get_vmcr = vgic_v2_get_vmcr,
173 .set_vmcr = vgic_v2_set_vmcr,
174 .enable = vgic_v2_enable,
175};
176
177static struct vgic_params vgic_v2_params;
178
179/**
180 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
181 * @node: pointer to the DT node
182 * @ops: address of a pointer to the GICv2 operations
183 * @params: address of a pointer to HW-specific parameters
184 *
185 * Returns 0 if a GICv2 has been found, with the low level operations
186 * in *ops and the HW parameters in *params. Returns an error code
187 * otherwise.
188 */
189int vgic_v2_probe(struct device_node *vgic_node,
190 const struct vgic_ops **ops,
191 const struct vgic_params **params)
192{
193 int ret;
194 struct resource vctrl_res;
195 struct resource vcpu_res;
196 struct vgic_params *vgic = &vgic_v2_params;
197
198 vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
199 if (!vgic->maint_irq) {
200 kvm_err("error getting vgic maintenance irq from DT\n");
201 ret = -ENXIO;
202 goto out;
203 }
204
205 ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
206 if (ret) {
207 kvm_err("Cannot obtain GICH resource\n");
208 goto out;
209 }
210
211 vgic->vctrl_base = of_iomap(vgic_node, 2);
212 if (!vgic->vctrl_base) {
213 kvm_err("Cannot ioremap GICH\n");
214 ret = -ENOMEM;
215 goto out;
216 }
217
218 vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
219 vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
220
221 ret = create_hyp_io_mappings(vgic->vctrl_base,
222 vgic->vctrl_base + resource_size(&vctrl_res),
223 vctrl_res.start);
224 if (ret) {
225 kvm_err("Cannot map VCTRL into hyp\n");
226 goto out_unmap;
227 }
228
229 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
230 kvm_err("Cannot obtain GICV resource\n");
231 ret = -ENXIO;
232 goto out_unmap;
233 }
234
235 if (!PAGE_ALIGNED(vcpu_res.start)) {
236 kvm_err("GICV physical address 0x%llx not page aligned\n",
237 (unsigned long long)vcpu_res.start);
238 ret = -ENXIO;
239 goto out_unmap;
240 }
241
242 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
243 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
244 (unsigned long long)resource_size(&vcpu_res),
245 PAGE_SIZE);
246 ret = -ENXIO;
247 goto out_unmap;
248 }
249
250 vgic->vcpu_base = vcpu_res.start;
251
252 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
253 vctrl_res.start, vgic->maint_irq);
254
255 vgic->type = VGIC_V2;
256 *ops = &vgic_v2_ops;
257 *params = vgic;
258 goto out;
259
260out_unmap:
261 iounmap(vgic->vctrl_base);
262out:
263 of_node_put(vgic_node);
264 return ret;
265}
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
new file mode 100644
index 000000000000..1c2c8eef0599
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright (C) 2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26
27#include <linux/irqchip/arm-gic-v3.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33/* These are for GICv2 emulation only */
34#define GICH_LR_VIRTUALID (0x3ffUL << 0)
35#define GICH_LR_PHYSID_CPUID_SHIFT (10)
36#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
37
38/*
39 * LRs are stored in reverse order in memory. make sure we index them
40 * correctly.
41 */
42#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
43
44static u32 ich_vtr_el2;
45
46static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
47{
48 struct vgic_lr lr_desc;
49 u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
50
51 lr_desc.irq = val & GICH_LR_VIRTUALID;
52 if (lr_desc.irq <= 15)
53 lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
54 else
55 lr_desc.source = 0;
56 lr_desc.state = 0;
57
58 if (val & ICH_LR_PENDING_BIT)
59 lr_desc.state |= LR_STATE_PENDING;
60 if (val & ICH_LR_ACTIVE_BIT)
61 lr_desc.state |= LR_STATE_ACTIVE;
62 if (val & ICH_LR_EOI)
63 lr_desc.state |= LR_EOI_INT;
64
65 return lr_desc;
66}
67
68static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
69 struct vgic_lr lr_desc)
70{
71 u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
72 lr_desc.irq);
73
74 if (lr_desc.state & LR_STATE_PENDING)
75 lr_val |= ICH_LR_PENDING_BIT;
76 if (lr_desc.state & LR_STATE_ACTIVE)
77 lr_val |= ICH_LR_ACTIVE_BIT;
78 if (lr_desc.state & LR_EOI_INT)
79 lr_val |= ICH_LR_EOI;
80
81 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
82}
83
84static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
85 struct vgic_lr lr_desc)
86{
87 if (!(lr_desc.state & LR_STATE_MASK))
88 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
89}
90
91static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
92{
93 return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
94}
95
96static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
97{
98 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
99}
100
101static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
102{
103 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
104 u32 ret = 0;
105
106 if (misr & ICH_MISR_EOI)
107 ret |= INT_STATUS_EOI;
108 if (misr & ICH_MISR_U)
109 ret |= INT_STATUS_UNDERFLOW;
110
111 return ret;
112}
113
114static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
115{
116 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
117
118 vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
119 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
120 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
121 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
122}
123
124static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu)
125{
126 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE;
127}
128
129static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu)
130{
131 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE;
132}
133
134static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
135{
136 u32 vmcr;
137
138 vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
139 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
140 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
141 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
142
143 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
144}
145
146static void vgic_v3_enable(struct kvm_vcpu *vcpu)
147{
148 /*
149 * By forcing VMCR to zero, the GIC will restore the binary
150 * points to their reset values. Anything else resets to zero
151 * anyway.
152 */
153 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
154
155 /* Get the show on the road... */
156 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
157}
158
159static const struct vgic_ops vgic_v3_ops = {
160 .get_lr = vgic_v3_get_lr,
161 .set_lr = vgic_v3_set_lr,
162 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
163 .get_elrsr = vgic_v3_get_elrsr,
164 .get_eisr = vgic_v3_get_eisr,
165 .get_interrupt_status = vgic_v3_get_interrupt_status,
166 .enable_underflow = vgic_v3_enable_underflow,
167 .disable_underflow = vgic_v3_disable_underflow,
168 .get_vmcr = vgic_v3_get_vmcr,
169 .set_vmcr = vgic_v3_set_vmcr,
170 .enable = vgic_v3_enable,
171};
172
173static struct vgic_params vgic_v3_params;
174
175/**
176 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
177 * @node: pointer to the DT node
178 * @ops: address of a pointer to the GICv3 operations
179 * @params: address of a pointer to HW-specific parameters
180 *
181 * Returns 0 if a GICv3 has been found, with the low level operations
182 * in *ops and the HW parameters in *params. Returns an error code
183 * otherwise.
184 */
185int vgic_v3_probe(struct device_node *vgic_node,
186 const struct vgic_ops **ops,
187 const struct vgic_params **params)
188{
189 int ret = 0;
190 u32 gicv_idx;
191 struct resource vcpu_res;
192 struct vgic_params *vgic = &vgic_v3_params;
193
194 vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
195 if (!vgic->maint_irq) {
196 kvm_err("error getting vgic maintenance irq from DT\n");
197 ret = -ENXIO;
198 goto out;
199 }
200
201 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
202
203 /*
204 * The ListRegs field is 5 bits, but there is a architectural
205 * maximum of 16 list registers. Just ignore bit 4...
206 */
207 vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
208
209 if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
210 gicv_idx = 1;
211
212 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
213 if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
214 kvm_err("Cannot obtain GICV region\n");
215 ret = -ENXIO;
216 goto out;
217 }
218
219 if (!PAGE_ALIGNED(vcpu_res.start)) {
220 kvm_err("GICV physical address 0x%llx not page aligned\n",
221 (unsigned long long)vcpu_res.start);
222 ret = -ENXIO;
223 goto out;
224 }
225
226 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
227 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
228 (unsigned long long)resource_size(&vcpu_res),
229 PAGE_SIZE);
230 ret = -ENXIO;
231 goto out;
232 }
233
234 vgic->vcpu_base = vcpu_res.start;
235 vgic->vctrl_base = NULL;
236 vgic->type = VGIC_V3;
237
238 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
239 vcpu_res.start, vgic->maint_irq);
240
241 *ops = &vgic_v3_ops;
242 *params = vgic;
243
244out:
245 of_node_put(vgic_node);
246 return ret;
247}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 476d3bf540a8..73eba793b17f 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -76,14 +76,6 @@
76#define IMPLEMENTER_ARM 0x43b 76#define IMPLEMENTER_ARM 0x43b
77#define GICC_ARCH_VERSION_V2 0x2 77#define GICC_ARCH_VERSION_V2 0x2
78 78
79/* Physical address of vgic virtual cpu interface */
80static phys_addr_t vgic_vcpu_base;
81
82/* Virtual control interface base address */
83static void __iomem *vgic_vctrl_base;
84
85static struct device_node *vgic_node;
86
87#define ACCESS_READ_VALUE (1 << 0) 79#define ACCESS_READ_VALUE (1 << 0)
88#define ACCESS_READ_RAZ (0 << 0) 80#define ACCESS_READ_RAZ (0 << 0)
89#define ACCESS_READ_MASK(x) ((x) & (1 << 0)) 81#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
@@ -94,21 +86,46 @@ static struct device_node *vgic_node;
94#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 86#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
95 87
96static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 88static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
89static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
97static void vgic_update_state(struct kvm *kvm); 90static void vgic_update_state(struct kvm *kvm);
98static void vgic_kick_vcpus(struct kvm *kvm); 91static void vgic_kick_vcpus(struct kvm *kvm);
99static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); 92static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
100static u32 vgic_nr_lr; 93static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
94static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
95static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
96static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
101 97
102static unsigned int vgic_maint_irq; 98static const struct vgic_ops *vgic_ops;
99static const struct vgic_params *vgic;
100
101/*
102 * struct vgic_bitmap contains unions that provide two views of
103 * the same data. In one case it is an array of registers of
104 * u32's, and in the other case it is a bitmap of unsigned
105 * longs.
106 *
107 * This does not work on 64-bit BE systems, because the bitmap access
108 * will store two consecutive 32-bit words with the higher-addressed
109 * register's bits at the lower index and the lower-addressed register's
110 * bits at the higher index.
111 *
112 * Therefore, swizzle the register index when accessing the 32-bit word
113 * registers to access the right register's value.
114 */
115#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
116#define REG_OFFSET_SWIZZLE 1
117#else
118#define REG_OFFSET_SWIZZLE 0
119#endif
103 120
104static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, 121static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
105 int cpuid, u32 offset) 122 int cpuid, u32 offset)
106{ 123{
107 offset >>= 2; 124 offset >>= 2;
108 if (!offset) 125 if (!offset)
109 return x->percpu[cpuid].reg; 126 return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE);
110 else 127 else
111 return x->shared.reg + offset - 1; 128 return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
112} 129}
113 130
114static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, 131static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
@@ -241,12 +258,12 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
241 258
242static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 259static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
243{ 260{
244 return *((u32 *)mmio->data) & mask; 261 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
245} 262}
246 263
247static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) 264static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
248{ 265{
249 *((u32 *)mmio->data) = value & mask; 266 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
250} 267}
251 268
252/** 269/**
@@ -593,18 +610,6 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
593 return false; 610 return false;
594} 611}
595 612
596#define LR_CPUID(lr) \
597 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
598#define LR_IRQID(lr) \
599 ((lr) & GICH_LR_VIRTUALID)
600
601static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
602{
603 clear_bit(lr_nr, vgic_cpu->lr_used);
604 vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
605 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
606}
607
608/** 613/**
609 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor 614 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
610 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs 615 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -622,13 +627,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
622 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 627 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
623 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 628 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
624 int vcpu_id = vcpu->vcpu_id; 629 int vcpu_id = vcpu->vcpu_id;
625 int i, irq, source_cpu; 630 int i;
626 u32 *lr;
627 631
628 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { 632 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
629 lr = &vgic_cpu->vgic_lr[i]; 633 struct vgic_lr lr = vgic_get_lr(vcpu, i);
630 irq = LR_IRQID(*lr);
631 source_cpu = LR_CPUID(*lr);
632 634
633 /* 635 /*
634 * There are three options for the state bits: 636 * There are three options for the state bits:
@@ -640,7 +642,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
640 * If the LR holds only an active interrupt (not pending) then 642 * If the LR holds only an active interrupt (not pending) then
641 * just leave it alone. 643 * just leave it alone.
642 */ 644 */
643 if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT) 645 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
644 continue; 646 continue;
645 647
646 /* 648 /*
@@ -649,18 +651,19 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
649 * is fine, then we are only setting a few bits that were 651 * is fine, then we are only setting a few bits that were
650 * already set. 652 * already set.
651 */ 653 */
652 vgic_dist_irq_set(vcpu, irq); 654 vgic_dist_irq_set(vcpu, lr.irq);
653 if (irq < VGIC_NR_SGIS) 655 if (lr.irq < VGIC_NR_SGIS)
654 dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu; 656 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
655 *lr &= ~GICH_LR_PENDING_BIT; 657 lr.state &= ~LR_STATE_PENDING;
658 vgic_set_lr(vcpu, i, lr);
656 659
657 /* 660 /*
658 * If there's no state left on the LR (it could still be 661 * If there's no state left on the LR (it could still be
659 * active), then the LR does not hold any useful info and can 662 * active), then the LR does not hold any useful info and can
660 * be marked as free for other use. 663 * be marked as free for other use.
661 */ 664 */
662 if (!(*lr & GICH_LR_STATE)) 665 if (!(lr.state & LR_STATE_MASK))
663 vgic_retire_lr(i, irq, vgic_cpu); 666 vgic_retire_lr(i, lr.irq, vcpu);
664 667
665 /* Finally update the VGIC state. */ 668 /* Finally update the VGIC state. */
666 vgic_update_state(vcpu->kvm); 669 vgic_update_state(vcpu->kvm);
@@ -989,8 +992,73 @@ static void vgic_update_state(struct kvm *kvm)
989 } 992 }
990} 993}
991 994
992#define MK_LR_PEND(src, irq) \ 995static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
993 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) 996{
997 return vgic_ops->get_lr(vcpu, lr);
998}
999
1000static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1001 struct vgic_lr vlr)
1002{
1003 vgic_ops->set_lr(vcpu, lr, vlr);
1004}
1005
1006static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1007 struct vgic_lr vlr)
1008{
1009 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1010}
1011
1012static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1013{
1014 return vgic_ops->get_elrsr(vcpu);
1015}
1016
1017static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1018{
1019 return vgic_ops->get_eisr(vcpu);
1020}
1021
1022static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1023{
1024 return vgic_ops->get_interrupt_status(vcpu);
1025}
1026
1027static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1028{
1029 vgic_ops->enable_underflow(vcpu);
1030}
1031
1032static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1033{
1034 vgic_ops->disable_underflow(vcpu);
1035}
1036
1037static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1038{
1039 vgic_ops->get_vmcr(vcpu, vmcr);
1040}
1041
1042static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1043{
1044 vgic_ops->set_vmcr(vcpu, vmcr);
1045}
1046
1047static inline void vgic_enable(struct kvm_vcpu *vcpu)
1048{
1049 vgic_ops->enable(vcpu);
1050}
1051
1052static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1053{
1054 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1055 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1056
1057 vlr.state = 0;
1058 vgic_set_lr(vcpu, lr_nr, vlr);
1059 clear_bit(lr_nr, vgic_cpu->lr_used);
1060 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1061}
994 1062
995/* 1063/*
996 * An interrupt may have been disabled after being made pending on the 1064 * An interrupt may have been disabled after being made pending on the
@@ -1006,13 +1074,13 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1006 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1074 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1007 int lr; 1075 int lr;
1008 1076
1009 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { 1077 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1010 int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1078 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1011 1079
1012 if (!vgic_irq_is_enabled(vcpu, irq)) { 1080 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1013 vgic_retire_lr(lr, irq, vgic_cpu); 1081 vgic_retire_lr(lr, vlr.irq, vcpu);
1014 if (vgic_irq_is_active(vcpu, irq)) 1082 if (vgic_irq_is_active(vcpu, vlr.irq))
1015 vgic_irq_clear_active(vcpu, irq); 1083 vgic_irq_clear_active(vcpu, vlr.irq);
1016 } 1084 }
1017 } 1085 }
1018} 1086}
@@ -1024,6 +1092,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1024static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) 1092static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1025{ 1093{
1026 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1094 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1095 struct vgic_lr vlr;
1027 int lr; 1096 int lr;
1028 1097
1029 /* Sanitize the input... */ 1098 /* Sanitize the input... */
@@ -1036,28 +1105,34 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1036 lr = vgic_cpu->vgic_irq_lr_map[irq]; 1105 lr = vgic_cpu->vgic_irq_lr_map[irq];
1037 1106
1038 /* Do we have an active interrupt for the same CPUID? */ 1107 /* Do we have an active interrupt for the same CPUID? */
1039 if (lr != LR_EMPTY && 1108 if (lr != LR_EMPTY) {
1040 (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { 1109 vlr = vgic_get_lr(vcpu, lr);
1041 kvm_debug("LR%d piggyback for IRQ%d %x\n", 1110 if (vlr.source == sgi_source_id) {
1042 lr, irq, vgic_cpu->vgic_lr[lr]); 1111 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1043 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 1112 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1044 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; 1113 vlr.state |= LR_STATE_PENDING;
1045 return true; 1114 vgic_set_lr(vcpu, lr, vlr);
1115 return true;
1116 }
1046 } 1117 }
1047 1118
1048 /* Try to use another LR for this interrupt */ 1119 /* Try to use another LR for this interrupt */
1049 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, 1120 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1050 vgic_cpu->nr_lr); 1121 vgic->nr_lr);
1051 if (lr >= vgic_cpu->nr_lr) 1122 if (lr >= vgic->nr_lr)
1052 return false; 1123 return false;
1053 1124
1054 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); 1125 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1055 vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
1056 vgic_cpu->vgic_irq_lr_map[irq] = lr; 1126 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1057 set_bit(lr, vgic_cpu->lr_used); 1127 set_bit(lr, vgic_cpu->lr_used);
1058 1128
1129 vlr.irq = irq;
1130 vlr.source = sgi_source_id;
1131 vlr.state = LR_STATE_PENDING;
1059 if (!vgic_irq_is_edge(vcpu, irq)) 1132 if (!vgic_irq_is_edge(vcpu, irq))
1060 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; 1133 vlr.state |= LR_EOI_INT;
1134
1135 vgic_set_lr(vcpu, lr, vlr);
1061 1136
1062 return true; 1137 return true;
1063} 1138}
@@ -1155,9 +1230,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1155 1230
1156epilog: 1231epilog:
1157 if (overflow) { 1232 if (overflow) {
1158 vgic_cpu->vgic_hcr |= GICH_HCR_UIE; 1233 vgic_enable_underflow(vcpu);
1159 } else { 1234 } else {
1160 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; 1235 vgic_disable_underflow(vcpu);
1161 /* 1236 /*
1162 * We're about to run this VCPU, and we've consumed 1237 * We're about to run this VCPU, and we've consumed
1163 * everything the distributor had in store for 1238 * everything the distributor had in store for
@@ -1170,44 +1245,46 @@ epilog:
1170 1245
1171static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1246static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1172{ 1247{
1173 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1248 u32 status = vgic_get_interrupt_status(vcpu);
1174 bool level_pending = false; 1249 bool level_pending = false;
1175 1250
1176 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); 1251 kvm_debug("STATUS = %08x\n", status);
1177 1252
1178 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { 1253 if (status & INT_STATUS_EOI) {
1179 /* 1254 /*
1180 * Some level interrupts have been EOIed. Clear their 1255 * Some level interrupts have been EOIed. Clear their
1181 * active bit. 1256 * active bit.
1182 */ 1257 */
1183 int lr, irq; 1258 u64 eisr = vgic_get_eisr(vcpu);
1259 unsigned long *eisr_ptr = (unsigned long *)&eisr;
1260 int lr;
1184 1261
1185 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, 1262 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1186 vgic_cpu->nr_lr) { 1263 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1187 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1188 1264
1189 vgic_irq_clear_active(vcpu, irq); 1265 vgic_irq_clear_active(vcpu, vlr.irq);
1190 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; 1266 WARN_ON(vlr.state & LR_STATE_MASK);
1267 vlr.state = 0;
1268 vgic_set_lr(vcpu, lr, vlr);
1191 1269
1192 /* Any additional pending interrupt? */ 1270 /* Any additional pending interrupt? */
1193 if (vgic_dist_irq_is_pending(vcpu, irq)) { 1271 if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
1194 vgic_cpu_irq_set(vcpu, irq); 1272 vgic_cpu_irq_set(vcpu, vlr.irq);
1195 level_pending = true; 1273 level_pending = true;
1196 } else { 1274 } else {
1197 vgic_cpu_irq_clear(vcpu, irq); 1275 vgic_cpu_irq_clear(vcpu, vlr.irq);
1198 } 1276 }
1199 1277
1200 /* 1278 /*
1201 * Despite being EOIed, the LR may not have 1279 * Despite being EOIed, the LR may not have
1202 * been marked as empty. 1280 * been marked as empty.
1203 */ 1281 */
1204 set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); 1282 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1205 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1206 } 1283 }
1207 } 1284 }
1208 1285
1209 if (vgic_cpu->vgic_misr & GICH_MISR_U) 1286 if (status & INT_STATUS_UNDERFLOW)
1210 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; 1287 vgic_disable_underflow(vcpu);
1211 1288
1212 return level_pending; 1289 return level_pending;
1213} 1290}
@@ -1220,29 +1297,31 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1220{ 1297{
1221 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1298 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1300 u64 elrsr;
1301 unsigned long *elrsr_ptr;
1223 int lr, pending; 1302 int lr, pending;
1224 bool level_pending; 1303 bool level_pending;
1225 1304
1226 level_pending = vgic_process_maintenance(vcpu); 1305 level_pending = vgic_process_maintenance(vcpu);
1306 elrsr = vgic_get_elrsr(vcpu);
1307 elrsr_ptr = (unsigned long *)&elrsr;
1227 1308
1228 /* Clear mappings for empty LRs */ 1309 /* Clear mappings for empty LRs */
1229 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, 1310 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1230 vgic_cpu->nr_lr) { 1311 struct vgic_lr vlr;
1231 int irq;
1232 1312
1233 if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) 1313 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1234 continue; 1314 continue;
1235 1315
1236 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1316 vlr = vgic_get_lr(vcpu, lr);
1237 1317
1238 BUG_ON(irq >= VGIC_NR_IRQS); 1318 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1239 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 1319 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1240 } 1320 }
1241 1321
1242 /* Check if we still have something up our sleeve... */ 1322 /* Check if we still have something up our sleeve... */
1243 pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, 1323 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1244 vgic_cpu->nr_lr); 1324 if (level_pending || pending < vgic->nr_lr)
1245 if (level_pending || pending < vgic_cpu->nr_lr)
1246 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); 1325 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1247} 1326}
1248 1327
@@ -1432,21 +1511,20 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1432 } 1511 }
1433 1512
1434 /* 1513 /*
1435 * By forcing VMCR to zero, the GIC will restore the binary 1514 * Store the number of LRs per vcpu, so we don't have to go
1436 * points to their reset values. Anything else resets to zero 1515 * all the way to the distributor structure to find out. Only
1437 * anyway. 1516 * assembly code should use this one.
1438 */ 1517 */
1439 vgic_cpu->vgic_vmcr = 0; 1518 vgic_cpu->nr_lr = vgic->nr_lr;
1440 1519
1441 vgic_cpu->nr_lr = vgic_nr_lr; 1520 vgic_enable(vcpu);
1442 vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1443 1521
1444 return 0; 1522 return 0;
1445} 1523}
1446 1524
1447static void vgic_init_maintenance_interrupt(void *info) 1525static void vgic_init_maintenance_interrupt(void *info)
1448{ 1526{
1449 enable_percpu_irq(vgic_maint_irq, 0); 1527 enable_percpu_irq(vgic->maint_irq, 0);
1450} 1528}
1451 1529
1452static int vgic_cpu_notify(struct notifier_block *self, 1530static int vgic_cpu_notify(struct notifier_block *self,
@@ -1459,7 +1537,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
1459 break; 1537 break;
1460 case CPU_DYING: 1538 case CPU_DYING:
1461 case CPU_DYING_FROZEN: 1539 case CPU_DYING_FROZEN:
1462 disable_percpu_irq(vgic_maint_irq); 1540 disable_percpu_irq(vgic->maint_irq);
1463 break; 1541 break;
1464 } 1542 }
1465 1543
@@ -1470,30 +1548,37 @@ static struct notifier_block vgic_cpu_nb = {
1470 .notifier_call = vgic_cpu_notify, 1548 .notifier_call = vgic_cpu_notify,
1471}; 1549};
1472 1550
1551static const struct of_device_id vgic_ids[] = {
1552 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1553 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
1554 {},
1555};
1556
1473int kvm_vgic_hyp_init(void) 1557int kvm_vgic_hyp_init(void)
1474{ 1558{
1559 const struct of_device_id *matched_id;
1560 int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
1561 const struct vgic_params **);
1562 struct device_node *vgic_node;
1475 int ret; 1563 int ret;
1476 struct resource vctrl_res;
1477 struct resource vcpu_res;
1478 1564
1479 vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); 1565 vgic_node = of_find_matching_node_and_match(NULL,
1566 vgic_ids, &matched_id);
1480 if (!vgic_node) { 1567 if (!vgic_node) {
1481 kvm_err("error: no compatible vgic node in DT\n"); 1568 kvm_err("error: no compatible GIC node found\n");
1482 return -ENODEV; 1569 return -ENODEV;
1483 } 1570 }
1484 1571
1485 vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); 1572 vgic_probe = matched_id->data;
1486 if (!vgic_maint_irq) { 1573 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
1487 kvm_err("error getting vgic maintenance irq from DT\n"); 1574 if (ret)
1488 ret = -ENXIO; 1575 return ret;
1489 goto out;
1490 }
1491 1576
1492 ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, 1577 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
1493 "vgic", kvm_get_running_vcpus()); 1578 "vgic", kvm_get_running_vcpus());
1494 if (ret) { 1579 if (ret) {
1495 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); 1580 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
1496 goto out; 1581 return ret;
1497 } 1582 }
1498 1583
1499 ret = __register_cpu_notifier(&vgic_cpu_nb); 1584 ret = __register_cpu_notifier(&vgic_cpu_nb);
@@ -1502,65 +1587,15 @@ int kvm_vgic_hyp_init(void)
1502 goto out_free_irq; 1587 goto out_free_irq;
1503 } 1588 }
1504 1589
1505 ret = of_address_to_resource(vgic_node, 2, &vctrl_res); 1590 /* Callback into for arch code for setup */
1506 if (ret) { 1591 vgic_arch_setup(vgic);
1507 kvm_err("Cannot obtain VCTRL resource\n");
1508 goto out_free_irq;
1509 }
1510 1592
1511 vgic_vctrl_base = of_iomap(vgic_node, 2);
1512 if (!vgic_vctrl_base) {
1513 kvm_err("Cannot ioremap VCTRL\n");
1514 ret = -ENOMEM;
1515 goto out_free_irq;
1516 }
1517
1518 vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1519 vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1520
1521 ret = create_hyp_io_mappings(vgic_vctrl_base,
1522 vgic_vctrl_base + resource_size(&vctrl_res),
1523 vctrl_res.start);
1524 if (ret) {
1525 kvm_err("Cannot map VCTRL into hyp\n");
1526 goto out_unmap;
1527 }
1528
1529 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1530 kvm_err("Cannot obtain VCPU resource\n");
1531 ret = -ENXIO;
1532 goto out_unmap;
1533 }
1534
1535 if (!PAGE_ALIGNED(vcpu_res.start)) {
1536 kvm_err("GICV physical address 0x%llx not page aligned\n",
1537 (unsigned long long)vcpu_res.start);
1538 ret = -ENXIO;
1539 goto out_unmap;
1540 }
1541
1542 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
1543 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
1544 (unsigned long long)resource_size(&vcpu_res),
1545 PAGE_SIZE);
1546 ret = -ENXIO;
1547 goto out_unmap;
1548 }
1549
1550 vgic_vcpu_base = vcpu_res.start;
1551
1552 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1553 vctrl_res.start, vgic_maint_irq);
1554 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); 1593 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1555 1594
1556 goto out; 1595 return 0;
1557 1596
1558out_unmap:
1559 iounmap(vgic_vctrl_base);
1560out_free_irq: 1597out_free_irq:
1561 free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); 1598 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1562out:
1563 of_node_put(vgic_node);
1564 return ret; 1599 return ret;
1565} 1600}
1566 1601
@@ -1593,7 +1628,7 @@ int kvm_vgic_init(struct kvm *kvm)
1593 } 1628 }
1594 1629
1595 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, 1630 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1596 vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); 1631 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1597 if (ret) { 1632 if (ret) {
1598 kvm_err("Unable to remap VGIC CPU to VCPU\n"); 1633 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1599 goto out; 1634 goto out;
@@ -1639,7 +1674,8 @@ int kvm_vgic_create(struct kvm *kvm)
1639 } 1674 }
1640 1675
1641 spin_lock_init(&kvm->arch.vgic.lock); 1676 spin_lock_init(&kvm->arch.vgic.lock);
1642 kvm->arch.vgic.vctrl_base = vgic_vctrl_base; 1677 kvm->arch.vgic.in_kernel = true;
1678 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1643 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 1679 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1644 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; 1680 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1645 1681
@@ -1738,39 +1774,40 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1738static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, 1774static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1739 struct kvm_exit_mmio *mmio, phys_addr_t offset) 1775 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1740{ 1776{
1741 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1742 u32 reg, mask = 0, shift = 0;
1743 bool updated = false; 1777 bool updated = false;
1778 struct vgic_vmcr vmcr;
1779 u32 *vmcr_field;
1780 u32 reg;
1781
1782 vgic_get_vmcr(vcpu, &vmcr);
1744 1783
1745 switch (offset & ~0x3) { 1784 switch (offset & ~0x3) {
1746 case GIC_CPU_CTRL: 1785 case GIC_CPU_CTRL:
1747 mask = GICH_VMCR_CTRL_MASK; 1786 vmcr_field = &vmcr.ctlr;
1748 shift = GICH_VMCR_CTRL_SHIFT;
1749 break; 1787 break;
1750 case GIC_CPU_PRIMASK: 1788 case GIC_CPU_PRIMASK:
1751 mask = GICH_VMCR_PRIMASK_MASK; 1789 vmcr_field = &vmcr.pmr;
1752 shift = GICH_VMCR_PRIMASK_SHIFT;
1753 break; 1790 break;
1754 case GIC_CPU_BINPOINT: 1791 case GIC_CPU_BINPOINT:
1755 mask = GICH_VMCR_BINPOINT_MASK; 1792 vmcr_field = &vmcr.bpr;
1756 shift = GICH_VMCR_BINPOINT_SHIFT;
1757 break; 1793 break;
1758 case GIC_CPU_ALIAS_BINPOINT: 1794 case GIC_CPU_ALIAS_BINPOINT:
1759 mask = GICH_VMCR_ALIAS_BINPOINT_MASK; 1795 vmcr_field = &vmcr.abpr;
1760 shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
1761 break; 1796 break;
1797 default:
1798 BUG();
1762 } 1799 }
1763 1800
1764 if (!mmio->is_write) { 1801 if (!mmio->is_write) {
1765 reg = (vgic_cpu->vgic_vmcr & mask) >> shift; 1802 reg = *vmcr_field;
1766 mmio_data_write(mmio, ~0, reg); 1803 mmio_data_write(mmio, ~0, reg);
1767 } else { 1804 } else {
1768 reg = mmio_data_read(mmio, ~0); 1805 reg = mmio_data_read(mmio, ~0);
1769 reg = (reg << shift) & mask; 1806 if (reg != *vmcr_field) {
1770 if (reg != (vgic_cpu->vgic_vmcr & mask)) 1807 *vmcr_field = reg;
1808 vgic_set_vmcr(vcpu, &vmcr);
1771 updated = true; 1809 updated = true;
1772 vgic_cpu->vgic_vmcr &= ~mask; 1810 }
1773 vgic_cpu->vgic_vmcr |= reg;
1774 } 1811 }
1775 return updated; 1812 return updated;
1776} 1813}