aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2013-06-12 11:48:38 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2013-06-12 11:48:38 -0400
commit63917f0b5ba2a932d4fca7f67d1a1eae9034269e (patch)
tree2e5b219ca98b56868136e227601a91143d289daf
parentd822d2a1e33144967b01f9535ce217639aa75279 (diff)
parentaa4a73a0a23a65a2f531d01f1865d1e61c6acb55 (diff)
Merge branch 'kvm-arm64/kvm-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into upstream
* 'kvm-arm64/kvm-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms: (33 commits) arm64: KVM: document kernel object mappings in HYP arm64: KVM: MAINTAINERS update arm64: KVM: userspace API documentation arm64: KVM: enable initialization of a 32bit vcpu arm64: KVM: 32bit guest fault injection arm64: KVM: 32bit specific register world switch arm64: KVM: CPU specific 32bit coprocessor access arm64: KVM: 32bit handling of coprocessor traps arm64: KVM: 32bit conditional execution emulation arm64: KVM: 32bit GP register access arm64: KVM: define 32bit specific registers arm64: KVM: Build system integration arm64: KVM: PSCI implementation arm64: KVM: Plug the arch timer ARM: KVM: timer: allow DT matching for ARMv8 cores arm64: KVM: Plug the VGIC arm64: KVM: Exit handling arm64: KVM: HYP mode world switch implementation arm64: KVM: hypervisor initialization code arm64: KVM: guest one-reg interface ... Conflicts: arch/arm64/Makefile
-rw-r--r--Documentation/arm64/memory.txt7
-rw-r--r--Documentation/virtual/kvm/api.txt58
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/kvm/arch_timer.c1
-rw-r--r--arch/arm64/Makefile3
-rw-r--r--arch/arm64/include/asm/kvm_arm.h245
-rw-r--r--arch/arm64/include/asm/kvm_asm.h104
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h56
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h180
-rw-r--r--arch/arm64/include/asm/kvm_host.h202
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h59
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h135
-rw-r--r--arch/arm64/include/asm/kvm_psci.h23
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h19
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h168
-rw-r--r--arch/arm64/kernel/asm-offsets.c33
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S20
-rw-r--r--arch/arm64/kvm/Makefile23
-rw-r--r--arch/arm64/kvm/emulate.c158
-rw-r--r--arch/arm64/kvm/guest.c265
-rw-r--r--arch/arm64/kvm/handle_exit.c124
-rw-r--r--arch/arm64/kvm/hyp-init.S107
-rw-r--r--arch/arm64/kvm/hyp.S831
-rw-r--r--arch/arm64/kvm/inject_fault.c203
-rw-r--r--arch/arm64/kvm/regmap.c168
-rw-r--r--arch/arm64/kvm/reset.c112
-rw-r--r--arch/arm64/kvm/sys_regs.c1050
-rw-r--r--arch/arm64/kvm/sys_regs.h138
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c95
-rw-r--r--include/uapi/linux/kvm.h2
32 files changed, 4596 insertions, 20 deletions
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 5f583af0a6e1..78a377124ef0 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -73,3 +73,10 @@ Translation table lookup with 64KB pages:
73 | | +--------------------------> [41:29] L2 index (only 38:29 used) 73 | | +--------------------------> [41:29] L2 index (only 38:29 used)
74 | +-------------------------------> [47:42] L1 index (not used) 74 | +-------------------------------> [47:42] L1 index (not used)
75 +-------------------------------------------------> [63] TTBR0/1 75 +-------------------------------------------------> [63] TTBR0/1
76
77When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
78offset from the kernel VA (top 24bits of the kernel VA set to zero):
79
80Start End Size Use
81-----------------------------------------------------------------------
820000004000000000 0000007fffffffff 256GB kernel objects mapped in HYP
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 5f91eda91647..9bfadeb8be31 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -280,7 +280,7 @@ kvm_run' (see below).
2804.11 KVM_GET_REGS 2804.11 KVM_GET_REGS
281 281
282Capability: basic 282Capability: basic
283Architectures: all except ARM 283Architectures: all except ARM, arm64
284Type: vcpu ioctl 284Type: vcpu ioctl
285Parameters: struct kvm_regs (out) 285Parameters: struct kvm_regs (out)
286Returns: 0 on success, -1 on error 286Returns: 0 on success, -1 on error
@@ -301,7 +301,7 @@ struct kvm_regs {
3014.12 KVM_SET_REGS 3014.12 KVM_SET_REGS
302 302
303Capability: basic 303Capability: basic
304Architectures: all except ARM 304Architectures: all except ARM, arm64
305Type: vcpu ioctl 305Type: vcpu ioctl
306Parameters: struct kvm_regs (in) 306Parameters: struct kvm_regs (in)
307Returns: 0 on success, -1 on error 307Returns: 0 on success, -1 on error
@@ -587,7 +587,7 @@ struct kvm_fpu {
5874.24 KVM_CREATE_IRQCHIP 5874.24 KVM_CREATE_IRQCHIP
588 588
589Capability: KVM_CAP_IRQCHIP 589Capability: KVM_CAP_IRQCHIP
590Architectures: x86, ia64, ARM 590Architectures: x86, ia64, ARM, arm64
591Type: vm ioctl 591Type: vm ioctl
592Parameters: none 592Parameters: none
593Returns: 0 on success, -1 on error 593Returns: 0 on success, -1 on error
@@ -595,14 +595,14 @@ Returns: 0 on success, -1 on error
595Creates an interrupt controller model in the kernel. On x86, creates a virtual 595Creates an interrupt controller model in the kernel. On x86, creates a virtual
596ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a 596ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
597local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 597local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
598only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM, a GIC is 598only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM/arm64, a GIC is
599created. 599created.
600 600
601 601
6024.25 KVM_IRQ_LINE 6024.25 KVM_IRQ_LINE
603 603
604Capability: KVM_CAP_IRQCHIP 604Capability: KVM_CAP_IRQCHIP
605Architectures: x86, ia64, arm 605Architectures: x86, ia64, arm, arm64
606Type: vm ioctl 606Type: vm ioctl
607Parameters: struct kvm_irq_level 607Parameters: struct kvm_irq_level
608Returns: 0 on success, -1 on error 608Returns: 0 on success, -1 on error
@@ -612,9 +612,10 @@ On some architectures it is required that an interrupt controller model has
612been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered 612been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered
613interrupts require the level to be set to 1 and then back to 0. 613interrupts require the level to be set to 1 and then back to 0.
614 614
615ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip 615ARM/arm64 can signal an interrupt either at the CPU level, or at the
616(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for 616in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
617specific cpus. The irq field is interpreted like this: 617use PPIs designated for specific cpus. The irq field is interpreted
618like this:
618 619
619  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | 620  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 |
620 field: | irq_type | vcpu_index | irq_id | 621 field: | irq_type | vcpu_index | irq_id |
@@ -1831,6 +1832,22 @@ ARM 32-bit VFP control registers have the following id bit patterns:
1831ARM 64-bit FP registers have the following id bit patterns: 1832ARM 64-bit FP registers have the following id bit patterns:
1832 0x4030 0000 0012 0 <regno:12> 1833 0x4030 0000 0012 0 <regno:12>
1833 1834
1835
1836arm64 registers are mapped using the lower 32 bits. The upper 16 of
1837that is the register group type, or coprocessor number:
1838
1839arm64 core/FP-SIMD registers have the following id bit patterns. Note
1840that the size of the access is variable, as the kvm_regs structure
1841contains elements ranging from 32 to 128 bits. The index is a 32bit
1842value in the kvm_regs structure seen as a 32bit array.
1843 0x60x0 0000 0010 <index into the kvm_regs struct:16>
1844
1845arm64 CCSIDR registers are demultiplexed by CSSELR value:
1846 0x6020 0000 0011 00 <csselr:8>
1847
1848arm64 system registers have the following id bit patterns:
1849 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
1850
18344.69 KVM_GET_ONE_REG 18514.69 KVM_GET_ONE_REG
1835 1852
1836Capability: KVM_CAP_ONE_REG 1853Capability: KVM_CAP_ONE_REG
@@ -2264,7 +2281,7 @@ current state. "addr" is ignored.
22644.77 KVM_ARM_VCPU_INIT 22814.77 KVM_ARM_VCPU_INIT
2265 2282
2266Capability: basic 2283Capability: basic
2267Architectures: arm 2284Architectures: arm, arm64
2268Type: vcpu ioctl 2285Type: vcpu ioctl
2269Parameters: struct struct kvm_vcpu_init (in) 2286Parameters: struct struct kvm_vcpu_init (in)
2270Returns: 0 on success; -1 on error 2287Returns: 0 on success; -1 on error
@@ -2283,12 +2300,14 @@ should be created before this ioctl is invoked.
2283Possible features: 2300Possible features:
2284 - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. 2301 - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
2285 Depends on KVM_CAP_ARM_PSCI. 2302 Depends on KVM_CAP_ARM_PSCI.
2303 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
2304 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
2286 2305
2287 2306
22884.78 KVM_GET_REG_LIST 23074.78 KVM_GET_REG_LIST
2289 2308
2290Capability: basic 2309Capability: basic
2291Architectures: arm 2310Architectures: arm, arm64
2292Type: vcpu ioctl 2311Type: vcpu ioctl
2293Parameters: struct kvm_reg_list (in/out) 2312Parameters: struct kvm_reg_list (in/out)
2294Returns: 0 on success; -1 on error 2313Returns: 0 on success; -1 on error
@@ -2308,7 +2327,7 @@ KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
23084.80 KVM_ARM_SET_DEVICE_ADDR 23274.80 KVM_ARM_SET_DEVICE_ADDR
2309 2328
2310Capability: KVM_CAP_ARM_SET_DEVICE_ADDR 2329Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
2311Architectures: arm 2330Architectures: arm, arm64
2312Type: vm ioctl 2331Type: vm ioctl
2313Parameters: struct kvm_arm_device_address (in) 2332Parameters: struct kvm_arm_device_address (in)
2314Returns: 0 on success, -1 on error 2333Returns: 0 on success, -1 on error
@@ -2329,18 +2348,19 @@ can access emulated or directly exposed devices, which the host kernel needs
2329to know about. The id field is an architecture specific identifier for a 2348to know about. The id field is an architecture specific identifier for a
2330specific device. 2349specific device.
2331 2350
2332ARM divides the id field into two parts, a device id and an address type id 2351ARM/arm64 divides the id field into two parts, a device id and an
2333specific to the individual device. 2352address type id specific to the individual device.
2334 2353
2335  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 | 2354  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
2336 field: | 0x00000000 | device id | addr type id | 2355 field: | 0x00000000 | device id | addr type id |
2337 2356
2338ARM currently only require this when using the in-kernel GIC support for the 2357ARM/arm64 currently only require this when using the in-kernel GIC
2339hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 as the device id. When 2358support for the hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2
2340setting the base address for the guest's mapping of the VGIC virtual CPU 2359as the device id. When setting the base address for the guest's
2341and distributor interface, the ioctl must be called after calling 2360mapping of the VGIC virtual CPU and distributor interface, the ioctl
2342KVM_CREATE_IRQCHIP, but before calling KVM_RUN on any of the VCPUs. Calling 2361must be called after calling KVM_CREATE_IRQCHIP, but before calling
2343this ioctl twice for any of the base addresses will return -EEXIST. 2362KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the
2363base addresses will return -EEXIST.
2344 2364
23454.82 KVM_PPC_RTAS_DEFINE_TOKEN 23654.82 KVM_PPC_RTAS_DEFINE_TOKEN
2346 2366
diff --git a/MAINTAINERS b/MAINTAINERS
index f0df6d4d492b..505efea05230 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4709,6 +4709,15 @@ F: arch/arm/include/uapi/asm/kvm*
4709F: arch/arm/include/asm/kvm* 4709F: arch/arm/include/asm/kvm*
4710F: arch/arm/kvm/ 4710F: arch/arm/kvm/
4711 4711
4712KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
4713M: Marc Zyngier <marc.zyngier@arm.com>
4714L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
4715L: kvmarm@lists.cs.columbia.edu
4716S: Maintained
4717F: arch/arm64/include/uapi/asm/kvm*
4718F: arch/arm64/include/asm/kvm*
4719F: arch/arm64/kvm/
4720
4712KEXEC 4721KEXEC
4713M: Eric Biederman <ebiederm@xmission.com> 4722M: Eric Biederman <ebiederm@xmission.com>
4714W: http://kernel.org/pub/linux/utils/kernel/kexec/ 4723W: http://kernel.org/pub/linux/utils/kernel/kexec/
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c
index c55b6089e923..49a7516d81c7 100644
--- a/arch/arm/kvm/arch_timer.c
+++ b/arch/arm/kvm/arch_timer.c
@@ -195,6 +195,7 @@ static struct notifier_block kvm_timer_cpu_nb = {
195 195
196static const struct of_device_id arch_timer_of_match[] = { 196static const struct of_device_id arch_timer_of_match[] = {
197 { .compatible = "arm,armv7-timer", }, 197 { .compatible = "arm,armv7-timer", },
198 { .compatible = "arm,armv8-timer", },
198 {}, 199 {},
199}; 200};
200 201
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 79dd13dc8837..741f04fd636a 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -37,7 +37,8 @@ TEXT_OFFSET := 0x00080000
37export TEXT_OFFSET GZFLAGS 37export TEXT_OFFSET GZFLAGS
38 38
39core-y += arch/arm64/kernel/ arch/arm64/mm/ 39core-y += arch/arm64/kernel/ arch/arm64/mm/
40core-$(CONFIG_XEN) += arch/arm64/xen/ 40core-$(CONFIG_KVM) += arch/arm64/kvm/
41core-$(CONFIG_XEN) += arch/arm64/xen/
41libs-y := arch/arm64/lib/ $(libs-y) 42libs-y := arch/arm64/lib/ $(libs-y)
42libs-y += $(LIBGCC) 43libs-y += $(LIBGCC)
43 44
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..a5f28e2720c7
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_ARM_H__
19#define __ARM64_KVM_ARM_H__
20
21#include <asm/types.h>
22
23/* Hyp Configuration Register (HCR) bits */
24#define HCR_ID (UL(1) << 33)
25#define HCR_CD (UL(1) << 32)
26#define HCR_RW_SHIFT 31
27#define HCR_RW (UL(1) << HCR_RW_SHIFT)
28#define HCR_TRVM (UL(1) << 30)
29#define HCR_HCD (UL(1) << 29)
30#define HCR_TDZ (UL(1) << 28)
31#define HCR_TGE (UL(1) << 27)
32#define HCR_TVM (UL(1) << 26)
33#define HCR_TTLB (UL(1) << 25)
34#define HCR_TPU (UL(1) << 24)
35#define HCR_TPC (UL(1) << 23)
36#define HCR_TSW (UL(1) << 22)
37#define HCR_TAC (UL(1) << 21)
38#define HCR_TIDCP (UL(1) << 20)
39#define HCR_TSC (UL(1) << 19)
40#define HCR_TID3 (UL(1) << 18)
41#define HCR_TID2 (UL(1) << 17)
42#define HCR_TID1 (UL(1) << 16)
43#define HCR_TID0 (UL(1) << 15)
44#define HCR_TWE (UL(1) << 14)
45#define HCR_TWI (UL(1) << 13)
46#define HCR_DC (UL(1) << 12)
47#define HCR_BSU (3 << 10)
48#define HCR_BSU_IS (UL(1) << 10)
49#define HCR_FB (UL(1) << 9)
50#define HCR_VA (UL(1) << 8)
51#define HCR_VI (UL(1) << 7)
52#define HCR_VF (UL(1) << 6)
53#define HCR_AMO (UL(1) << 5)
54#define HCR_IMO (UL(1) << 4)
55#define HCR_FMO (UL(1) << 3)
56#define HCR_PTW (UL(1) << 2)
57#define HCR_SWIO (UL(1) << 1)
58#define HCR_VM (UL(1) << 0)
59
60/*
61 * The bits we set in HCR:
62 * RW: 64bit by default, can be overriden for 32bit VMs
63 * TAC: Trap ACTLR
64 * TSC: Trap SMC
65 * TSW: Trap cache operations by set/way
66 * TWI: Trap WFI
67 * TIDCP: Trap L2CTLR/L2ECTLR
68 * BSU_IS: Upgrade barriers to the inner shareable domain
69 * FB: Force broadcast of all maintainance operations
70 * AMO: Override CPSR.A and enable signaling with VA
71 * IMO: Override CPSR.I and enable signaling with VI
72 * FMO: Override CPSR.F and enable signaling with VF
73 * SWIO: Turn set/way invalidates into set/way clean+invalidate
74 */
75#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
76 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
77 HCR_SWIO | HCR_TIDCP | HCR_RW)
78#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
79
80/* Hyp System Control Register (SCTLR_EL2) bits */
81#define SCTLR_EL2_EE (1 << 25)
82#define SCTLR_EL2_WXN (1 << 19)
83#define SCTLR_EL2_I (1 << 12)
84#define SCTLR_EL2_SA (1 << 3)
85#define SCTLR_EL2_C (1 << 2)
86#define SCTLR_EL2_A (1 << 1)
87#define SCTLR_EL2_M 1
88#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
89 SCTLR_EL2_SA | SCTLR_EL2_I)
90
91/* TCR_EL2 Registers bits */
92#define TCR_EL2_TBI (1 << 20)
93#define TCR_EL2_PS (7 << 16)
94#define TCR_EL2_PS_40B (2 << 16)
95#define TCR_EL2_TG0 (1 << 14)
96#define TCR_EL2_SH0 (3 << 12)
97#define TCR_EL2_ORGN0 (3 << 10)
98#define TCR_EL2_IRGN0 (3 << 8)
99#define TCR_EL2_T0SZ 0x3f
100#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
101 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
102
103#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
104
105/* VTCR_EL2 Registers bits */
106#define VTCR_EL2_PS_MASK (7 << 16)
107#define VTCR_EL2_PS_40B (2 << 16)
108#define VTCR_EL2_TG0_MASK (1 << 14)
109#define VTCR_EL2_TG0_4K (0 << 14)
110#define VTCR_EL2_TG0_64K (1 << 14)
111#define VTCR_EL2_SH0_MASK (3 << 12)
112#define VTCR_EL2_SH0_INNER (3 << 12)
113#define VTCR_EL2_ORGN0_MASK (3 << 10)
114#define VTCR_EL2_ORGN0_WBWA (1 << 10)
115#define VTCR_EL2_IRGN0_MASK (3 << 8)
116#define VTCR_EL2_IRGN0_WBWA (1 << 8)
117#define VTCR_EL2_SL0_MASK (3 << 6)
118#define VTCR_EL2_SL0_LVL1 (1 << 6)
119#define VTCR_EL2_T0SZ_MASK 0x3f
120#define VTCR_EL2_T0SZ_40B 24
121
122#ifdef CONFIG_ARM64_64K_PAGES
123/*
124 * Stage2 translation configuration:
125 * 40bits output (PS = 2)
126 * 40bits input (T0SZ = 24)
127 * 64kB pages (TG0 = 1)
128 * 2 level page tables (SL = 1)
129 */
130#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
131 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
132 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
133 VTCR_EL2_T0SZ_40B)
134#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
135#else
136/*
137 * Stage2 translation configuration:
138 * 40bits output (PS = 2)
139 * 40bits input (T0SZ = 24)
140 * 4kB pages (TG0 = 0)
141 * 3 level page tables (SL = 1)
142 */
143#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
144 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
145 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
146 VTCR_EL2_T0SZ_40B)
147#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
148#endif
149
150#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
151#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
152#define VTTBR_VMID_SHIFT (48LLU)
153#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
154
155/* Hyp System Trap Register */
156#define HSTR_EL2_TTEE (1 << 16)
157#define HSTR_EL2_T(x) (1 << x)
158
159/* Hyp Coprocessor Trap Register */
160#define CPTR_EL2_TCPAC (1 << 31)
161#define CPTR_EL2_TTA (1 << 20)
162#define CPTR_EL2_TFP (1 << 10)
163
164/* Hyp Debug Configuration Register bits */
165#define MDCR_EL2_TDRA (1 << 11)
166#define MDCR_EL2_TDOSA (1 << 10)
167#define MDCR_EL2_TDA (1 << 9)
168#define MDCR_EL2_TDE (1 << 8)
169#define MDCR_EL2_HPME (1 << 7)
170#define MDCR_EL2_TPM (1 << 6)
171#define MDCR_EL2_TPMCR (1 << 5)
172#define MDCR_EL2_HPMN_MASK (0x1F)
173
174/* Exception Syndrome Register (ESR) bits */
175#define ESR_EL2_EC_SHIFT (26)
176#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
177#define ESR_EL2_IL (1U << 25)
178#define ESR_EL2_ISS (ESR_EL2_IL - 1)
179#define ESR_EL2_ISV_SHIFT (24)
180#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
181#define ESR_EL2_SAS_SHIFT (22)
182#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
183#define ESR_EL2_SSE (1 << 21)
184#define ESR_EL2_SRT_SHIFT (16)
185#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
186#define ESR_EL2_SF (1 << 15)
187#define ESR_EL2_AR (1 << 14)
188#define ESR_EL2_EA (1 << 9)
189#define ESR_EL2_CM (1 << 8)
190#define ESR_EL2_S1PTW (1 << 7)
191#define ESR_EL2_WNR (1 << 6)
192#define ESR_EL2_FSC (0x3f)
193#define ESR_EL2_FSC_TYPE (0x3c)
194
195#define ESR_EL2_CV_SHIFT (24)
196#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
197#define ESR_EL2_COND_SHIFT (20)
198#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
199
200
201#define FSC_FAULT (0x04)
202#define FSC_PERM (0x0c)
203
204/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
205#define HPFAR_MASK (~0xFUL)
206
207#define ESR_EL2_EC_UNKNOWN (0x00)
208#define ESR_EL2_EC_WFI (0x01)
209#define ESR_EL2_EC_CP15_32 (0x03)
210#define ESR_EL2_EC_CP15_64 (0x04)
211#define ESR_EL2_EC_CP14_MR (0x05)
212#define ESR_EL2_EC_CP14_LS (0x06)
213#define ESR_EL2_EC_FP_ASIMD (0x07)
214#define ESR_EL2_EC_CP10_ID (0x08)
215#define ESR_EL2_EC_CP14_64 (0x0C)
216#define ESR_EL2_EC_ILL_ISS (0x0E)
217#define ESR_EL2_EC_SVC32 (0x11)
218#define ESR_EL2_EC_HVC32 (0x12)
219#define ESR_EL2_EC_SMC32 (0x13)
220#define ESR_EL2_EC_SVC64 (0x15)
221#define ESR_EL2_EC_HVC64 (0x16)
222#define ESR_EL2_EC_SMC64 (0x17)
223#define ESR_EL2_EC_SYS64 (0x18)
224#define ESR_EL2_EC_IABT (0x20)
225#define ESR_EL2_EC_IABT_HYP (0x21)
226#define ESR_EL2_EC_PC_ALIGN (0x22)
227#define ESR_EL2_EC_DABT (0x24)
228#define ESR_EL2_EC_DABT_HYP (0x25)
229#define ESR_EL2_EC_SP_ALIGN (0x26)
230#define ESR_EL2_EC_FP_EXC32 (0x28)
231#define ESR_EL2_EC_FP_EXC64 (0x2C)
232#define ESR_EL2_EC_SERRROR (0x2F)
233#define ESR_EL2_EC_BREAKPT (0x30)
234#define ESR_EL2_EC_BREAKPT_HYP (0x31)
235#define ESR_EL2_EC_SOFTSTP (0x32)
236#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
237#define ESR_EL2_EC_WATCHPT (0x34)
238#define ESR_EL2_EC_WATCHPT_HYP (0x35)
239#define ESR_EL2_EC_BKPT32 (0x38)
240#define ESR_EL2_EC_VECTOR32 (0x3A)
241#define ESR_EL2_EC_BRK64 (0x3C)
242
243#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
244
245#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..c92de4163eba
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM_KVM_ASM_H__
19#define __ARM_KVM_ASM_H__
20
21/*
22 * 0 is reserved as an invalid value.
23 * Order *must* be kept in sync with the hyp switch code.
24 */
25#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
26#define CSSELR_EL1 2 /* Cache Size Selection Register */
27#define SCTLR_EL1 3 /* System Control Register */
28#define ACTLR_EL1 4 /* Auxilliary Control Register */
29#define CPACR_EL1 5 /* Coprocessor Access Control */
30#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
31#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
32#define TCR_EL1 8 /* Translation Control Register */
33#define ESR_EL1 9 /* Exception Syndrome Register */
34#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
35#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
36#define FAR_EL1 12 /* Fault Address Register */
37#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
38#define VBAR_EL1 14 /* Vector Base Address Register */
39#define CONTEXTIDR_EL1 15 /* Context ID Register */
40#define TPIDR_EL0 16 /* Thread ID, User R/W */
41#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27
53
54/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
56#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
57#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
58#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
59#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
60#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
61#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
62#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
63#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
64#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
65#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
66#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
67#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
68#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
75#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
76#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
77#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
78#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
79#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
80#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
81#define NR_CP15_REGS (NR_SYS_REGS * 2)
82
83#define ARM_EXCEPTION_IRQ 0
84#define ARM_EXCEPTION_TRAP 1
85
86#ifndef __ASSEMBLY__
87struct kvm;
88struct kvm_vcpu;
89
90extern char __kvm_hyp_init[];
91extern char __kvm_hyp_init_end[];
92
93extern char __kvm_hyp_vector[];
94
95extern char __kvm_hyp_code_start[];
96extern char __kvm_hyp_code_end[];
97
98extern void __kvm_flush_vm_context(void);
99extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
100
101extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
102#endif
103
104#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..9a59301cd014
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_coproc.h
6 * Copyright (C) 2012 Rusty Russell IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ARM64_KVM_COPROC_H__
22#define __ARM64_KVM_COPROC_H__
23
24#include <linux/kvm_host.h>
25
26void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
27
28struct kvm_sys_reg_table {
29 const struct sys_reg_desc *table;
30 size_t num;
31};
32
33struct kvm_sys_reg_target_table {
34 struct kvm_sys_reg_table table64;
35 struct kvm_sys_reg_table table32;
36};
37
38void kvm_register_target_sys_reg_table(unsigned int target,
39 struct kvm_sys_reg_target_table *table);
40
41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
45int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
46
47#define kvm_coproc_table_init kvm_sys_reg_table_init
48void kvm_sys_reg_table_init(void);
49
50struct kvm_one_reg;
51int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
52int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
53int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
54unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
55
56#endif /* __ARM64_KVM_COPROC_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..eec073875218
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmio.h>
29#include <asm/ptrace.h>
30
31unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
32unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
33
34bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
35void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
36
37void kvm_inject_undefined(struct kvm_vcpu *vcpu);
38void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
39void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
40
41static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
42{
43 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
44}
45
46static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
47{
48 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
49}
50
51static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
52{
53 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
54}
55
56static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
57{
58 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
59}
60
61static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
62{
63 if (vcpu_mode_is_32bit(vcpu))
64 return kvm_condition_valid32(vcpu);
65
66 return true;
67}
68
69static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
70{
71 if (vcpu_mode_is_32bit(vcpu))
72 kvm_skip_instr32(vcpu, is_wide_instr);
73 else
74 *vcpu_pc(vcpu) += 4;
75}
76
77static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
78{
79 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
80}
81
82static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
83{
84 if (vcpu_mode_is_32bit(vcpu))
85 return vcpu_reg32(vcpu, reg_num);
86
87 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
88}
89
90/* Get vcpu SPSR for current mode */
91static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
92{
93 if (vcpu_mode_is_32bit(vcpu))
94 return vcpu_spsr32(vcpu);
95
96 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
97}
98
99static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
100{
101 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
102
103 if (vcpu_mode_is_32bit(vcpu))
104 return mode > COMPAT_PSR_MODE_USR;
105
106 return mode != PSR_MODE_EL0t;
107}
108
109static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
110{
111 return vcpu->arch.fault.esr_el2;
112}
113
114static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
115{
116 return vcpu->arch.fault.far_el2;
117}
118
119static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
120{
121 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
122}
123
124static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
125{
126 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
127}
128
129static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
130{
131 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
132}
133
134static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
135{
136 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
137}
138
139static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
140{
141 return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
142}
143
144static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
145{
146 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
147}
148
149static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
150{
151 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
152}
153
154static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
155{
156 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
157}
158
159/* This one is not specific to Data Abort */
160static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
161{
162 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
163}
164
165static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
166{
167 return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
168}
169
170static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
171{
172 return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
173}
174
175static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
176{
177 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
178}
179
180#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
new file mode 100644
index 000000000000..644d73956864
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_HOST_H__
23#define __ARM64_KVM_HOST_H__
24
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_mmio.h>
28
29#define KVM_MAX_VCPUS 4
30#define KVM_USER_MEM_SLOTS 32
31#define KVM_PRIVATE_MEM_SLOTS 4
32#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
33
34#include <kvm/arm_vgic.h>
35#include <kvm/arm_arch_timer.h>
36
37#define KVM_VCPU_MAX_FEATURES 2
38
39/* We don't currently support large pages. */
40#define KVM_HPAGE_GFN_SHIFT(x) 0
41#define KVM_NR_PAGE_SIZES 1
42#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
43
44struct kvm_vcpu;
45int kvm_target_cpu(void);
46int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
47int kvm_arch_dev_ioctl_check_extension(long ext);
48
49struct kvm_arch {
50 /* The VMID generation used for the virt. memory system */
51 u64 vmid_gen;
52 u32 vmid;
53
54 /* 1-level 2nd stage table and lock */
55 spinlock_t pgd_lock;
56 pgd_t *pgd;
57
58 /* VTTBR value associated with above pgd and vmid */
59 u64 vttbr;
60
61 /* Interrupt controller */
62 struct vgic_dist vgic;
63
64 /* Timer */
65 struct arch_timer_kvm timer;
66};
67
68#define KVM_NR_MEM_OBJS 40
69
70/*
71 * We don't want allocation failures within the mmu code, so we preallocate
72 * enough memory for a single page fault in a cache.
73 */
74struct kvm_mmu_memory_cache {
75 int nobjs;
76 void *objects[KVM_NR_MEM_OBJS];
77};
78
79struct kvm_vcpu_fault_info {
80 u32 esr_el2; /* Hyp Syndrom Register */
81 u64 far_el2; /* Hyp Fault Address Register */
82 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
83};
84
85struct kvm_cpu_context {
86 struct kvm_regs gp_regs;
87 union {
88 u64 sys_regs[NR_SYS_REGS];
89 u32 cp15[NR_CP15_REGS];
90 };
91};
92
93typedef struct kvm_cpu_context kvm_cpu_context_t;
94
95struct kvm_vcpu_arch {
96 struct kvm_cpu_context ctxt;
97
98 /* HYP configuration */
99 u64 hcr_el2;
100
101 /* Exception Information */
102 struct kvm_vcpu_fault_info fault;
103
104 /* Pointer to host CPU context */
105 kvm_cpu_context_t *host_cpu_context;
106
107 /* VGIC state */
108 struct vgic_cpu vgic_cpu;
109 struct arch_timer_cpu timer_cpu;
110
111 /*
112 * Anything that is not used directly from assembly code goes
113 * here.
114 */
115 /* dcache set/way operation pending */
116 int last_pcpu;
117 cpumask_t require_dcache_flush;
118
119 /* Don't run the guest */
120 bool pause;
121
122 /* IO related fields */
123 struct kvm_decode mmio_decode;
124
125 /* Interrupt related fields */
126 u64 irq_lines; /* IRQ and FIQ levels */
127
128 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache;
130
131 /* Target CPU and feature flags */
132 u32 target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134
135 /* Detect first run of a vcpu */
136 bool has_run_once;
137};
138
139#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
140#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
141#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)])
142
143struct kvm_vm_stat {
144 u32 remote_tlb_flush;
145};
146
147struct kvm_vcpu_stat {
148 u32 halt_wakeup;
149};
150
151struct kvm_vcpu_init;
152int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
153 const struct kvm_vcpu_init *init);
154unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
155int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
156struct kvm_one_reg;
157int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
158int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
159
160#define KVM_ARCH_WANT_MMU_NOTIFIER
161struct kvm;
162int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
163int kvm_unmap_hva_range(struct kvm *kvm,
164 unsigned long start, unsigned long end);
165void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
166
167/* We do not have shadow page tables, hence the empty hooks */
168static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
169{
170 return 0;
171}
172
173static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
174{
175 return 0;
176}
177
178struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
179struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
180
181u64 kvm_call_hyp(void *hypfn, ...);
182
183int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
184 int exception_index);
185
186int kvm_perf_init(void);
187int kvm_perf_teardown(void);
188
189static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
190 phys_addr_t pgd_ptr,
191 unsigned long hyp_stack_ptr,
192 unsigned long vector_ptr)
193{
194 /*
195 * Call initialization code, and switch to the full blown
196 * HYP code.
197 */
198 kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
199 hyp_stack_ptr, vector_ptr);
200}
201
202#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..fc2f689c0694
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMIO_H__
19#define __ARM64_KVM_MMIO_H__
20
21#include <linux/kvm_host.h>
22#include <asm/kvm_asm.h>
23#include <asm/kvm_arm.h>
24
25/*
26 * This is annoying. The mmio code requires this, even if we don't
27 * need any decoding. To be fixed.
28 */
29struct kvm_decode {
30 unsigned long rt;
31 bool sign_extend;
32};
33
34/*
35 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
36 * which is an anonymous type. Use our own type instead.
37 */
38struct kvm_exit_mmio {
39 phys_addr_t phys_addr;
40 u8 data[8];
41 u32 len;
42 bool is_write;
43};
44
45static inline void kvm_prepare_mmio(struct kvm_run *run,
46 struct kvm_exit_mmio *mmio)
47{
48 run->mmio.phys_addr = mmio->phys_addr;
49 run->mmio.len = mmio->len;
50 run->mmio.is_write = mmio->is_write;
51 memcpy(run->mmio.data, mmio->data, mmio->len);
52 run->exit_reason = KVM_EXIT_MMIO;
53}
54
55int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
56int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
57 phys_addr_t fault_ipa);
58
59#endif /* __ARM64_KVM_MMIO_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..efe609c6a3c9
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
44#ifdef __ASSEMBLY__
45
46/*
47 * Convert a kernel VA into a HYP VA.
48 * reg: VA to be converted.
49 */
50.macro kern_hyp_va reg
51 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
52.endm
53
54#else
55
56#include <asm/cachetype.h>
57#include <asm/cacheflush.h>
58
59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60
61/*
62 * Align KVM with the kernel's view of physical memory. Should be
63 * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
64 */
65#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
66#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
67#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
68
69/* Make sure we get the right size, and thus the right alignment */
70#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
71#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
72
73int create_hyp_mappings(void *from, void *to);
74int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
75void free_boot_hyp_pgd(void);
76void free_hyp_pgds(void);
77
78int kvm_alloc_stage2_pgd(struct kvm *kvm);
79void kvm_free_stage2_pgd(struct kvm *kvm);
80int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
81 phys_addr_t pa, unsigned long size);
82
83int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
84
85void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
86
87phys_addr_t kvm_mmu_get_httbr(void);
88phys_addr_t kvm_mmu_get_boot_httbr(void);
89phys_addr_t kvm_get_idmap_vector(void);
90int kvm_mmu_init(void);
91void kvm_clear_hyp_idmap(void);
92
93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
94
95static inline bool kvm_is_write_fault(unsigned long esr)
96{
97 unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
98
99 if (esr_ec == ESR_EL2_EC_IABT)
100 return false;
101
102 if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
103 return false;
104
105 return true;
106}
107
108static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
109static inline void kvm_clean_pgd(pgd_t *pgd) {}
110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {}
112static inline void kvm_clean_pte_entry(pte_t *pte) {}
113
114static inline void kvm_set_s2pte_writable(pte_t *pte)
115{
116 pte_val(*pte) |= PTE_S2_RDWR;
117}
118
119struct kvm;
120
121static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
122{
123 if (!icache_is_aliasing()) { /* PIPT */
124 unsigned long hva = gfn_to_hva(kvm, gfn);
125 flush_icache_range(hva, hva + PAGE_SIZE);
126 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
127 /* any kind of VIPT cache */
128 __flush_icache_all();
129 }
130}
131
132#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
133
134#endif /* __ASSEMBLY__ */
135#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..e301a4816355
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_PSCI_H__
19#define __ARM64_KVM_PSCI_H__
20
21bool kvm_psci_call(struct kvm_vcpu *vcpu);
22
23#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 381f556b664e..20925bcf4e2a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -90,6 +90,12 @@
90#define MT_NORMAL_NC 3 90#define MT_NORMAL_NC 3
91#define MT_NORMAL 4 91#define MT_NORMAL 4
92 92
93/*
94 * Memory types for Stage-2 translation
95 */
96#define MT_S2_NORMAL 0xf
97#define MT_S2_DEVICE_nGnRE 0x1
98
93#ifndef __ASSEMBLY__ 99#ifndef __ASSEMBLY__
94 100
95extern phys_addr_t memstart_addr; 101extern phys_addr_t memstart_addr;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 75fd13d289b9..66367c6c6527 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -35,6 +35,7 @@
35/* 35/*
36 * Section 36 * Section
37 */ 37 */
38#define PMD_SECT_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
38#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 39#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
39#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 40#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
40#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) 41#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
@@ -68,6 +69,24 @@
68#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) 69#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
69 70
70/* 71/*
72 * 2nd stage PTE definitions
73 */
74#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
75#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
76
77/*
78 * Memory Attribute override for Stage-2 (MemAttr[3:0])
79 */
80#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
81#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
82
83/*
84 * EL2/HYP PTE/PMD definitions
85 */
86#define PMD_HYP PMD_SECT_USER
87#define PTE_HYP PTE_USER
88
89/*
71 * 40-bit physical address supported. 90 * 40-bit physical address supported.
72 */ 91 */
73#define PHYS_MASK_SHIFT (40) 92#define PHYS_MASK_SHIFT (40)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b93bc2326f56..5588e8ad9762 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -76,6 +76,12 @@ extern pgprot_t pgprot_default;
76#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) 76#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
77#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) 77#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
78 78
79#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
80#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
81
82#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
83#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
84
79#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) 85#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
80#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 86#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
81#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 87#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
@@ -197,6 +203,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
197 203
198#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) 204#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
199 205
206#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
207 PMD_TYPE_TABLE)
208#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
209 PMD_TYPE_SECT)
210
211
200static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 212static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
201{ 213{
202 *pmdp = pmd; 214 *pmdp = pmd;
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..5031f4263937
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/uapi/asm/kvm.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM_KVM_H__
23#define __ARM_KVM_H__
24
25#define KVM_SPSR_EL1 0
26#define KVM_SPSR_SVC KVM_SPSR_EL1
27#define KVM_SPSR_ABT 1
28#define KVM_SPSR_UND 2
29#define KVM_SPSR_IRQ 3
30#define KVM_SPSR_FIQ 4
31#define KVM_NR_SPSR 5
32
33#ifndef __ASSEMBLY__
34#include <asm/types.h>
35#include <asm/ptrace.h>
36
37#define __KVM_HAVE_GUEST_DEBUG
38#define __KVM_HAVE_IRQ_LINE
39
40#define KVM_REG_SIZE(id) \
41 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
42
43struct kvm_regs {
44 struct user_pt_regs regs; /* sp = sp_el0 */
45
46 __u64 sp_el1;
47 __u64 elr_el1;
48
49 __u64 spsr[KVM_NR_SPSR];
50
51 struct user_fpsimd_state fp_regs;
52};
53
54/* Supported Processor Types */
55#define KVM_ARM_TARGET_AEM_V8 0
56#define KVM_ARM_TARGET_FOUNDATION_V8 1
57#define KVM_ARM_TARGET_CORTEX_A57 2
58
59#define KVM_ARM_NUM_TARGETS 3
60
61/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
62#define KVM_ARM_DEVICE_TYPE_SHIFT 0
63#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
64#define KVM_ARM_DEVICE_ID_SHIFT 16
65#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
66
67/* Supported device IDs */
68#define KVM_ARM_DEVICE_VGIC_V2 0
69
70/* Supported VGIC address types */
71#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
72#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
73
74#define KVM_VGIC_V2_DIST_SIZE 0x1000
75#define KVM_VGIC_V2_CPU_SIZE 0x2000
76
77#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
78#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
79
80struct kvm_vcpu_init {
81 __u32 target;
82 __u32 features[7];
83};
84
85struct kvm_sregs {
86};
87
88struct kvm_fpu {
89};
90
91struct kvm_guest_debug_arch {
92};
93
94struct kvm_debug_exit_arch {
95};
96
97struct kvm_sync_regs {
98};
99
100struct kvm_arch_memory_slot {
101};
102
103/* If you need to interpret the index values, here is the key: */
104#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
105#define KVM_REG_ARM_COPROC_SHIFT 16
106
107/* Normal registers are mapped as coprocessor 16. */
108#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
109#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32))
110
111/* Some registers need more space to represent values. */
112#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
113#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
114#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
115#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
116#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
117#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
118
119/* AArch64 system registers */
120#define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
121#define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
122#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
123#define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
124#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
125#define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
126#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
127#define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
128#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
129#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
130#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
131
132/* KVM_IRQ_LINE irq field index values */
133#define KVM_ARM_IRQ_TYPE_SHIFT 24
134#define KVM_ARM_IRQ_TYPE_MASK 0xff
135#define KVM_ARM_IRQ_VCPU_SHIFT 16
136#define KVM_ARM_IRQ_VCPU_MASK 0xff
137#define KVM_ARM_IRQ_NUM_SHIFT 0
138#define KVM_ARM_IRQ_NUM_MASK 0xffff
139
140/* irq_type field */
141#define KVM_ARM_IRQ_TYPE_CPU 0
142#define KVM_ARM_IRQ_TYPE_SPI 1
143#define KVM_ARM_IRQ_TYPE_PPI 2
144
145/* out-of-kernel GIC cpu interrupt injection irq_number field */
146#define KVM_ARM_IRQ_CPU_IRQ 0
147#define KVM_ARM_IRQ_CPU_FIQ 1
148
149/* Highest supported SPI, from VGIC_NR_IRQS */
150#define KVM_ARM_IRQ_GIC_MAX 127
151
152/* PSCI interface */
153#define KVM_PSCI_FN_BASE 0x95c1ba5e
154#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
155
156#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
157#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
158#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
159#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
160
161#define KVM_PSCI_RET_SUCCESS 0
162#define KVM_PSCI_RET_NI ((unsigned long)-1)
163#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
164#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
165
166#endif
167
168#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a2a4d810bea3..49c162c03b69 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -104,5 +104,38 @@ int main(void)
104 BLANK(); 104 BLANK();
105 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); 105 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
106 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); 106 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
107 BLANK();
108#ifdef CONFIG_KVM_ARM_HOST
109 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
110 DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
111 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
112 DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
113 DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
114 DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
115 DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
116 DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
117 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
118 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
119 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
120 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
121 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
122 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
123 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
124 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
125 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
126 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
127 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
128 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
129 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
130 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
131 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
132 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
133 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
134 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
135 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
136 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
137 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
138 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
139#endif
107 return 0; 140 return 0;
108} 141}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5e06a1786e26..f5e55747242f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -17,6 +17,19 @@ ENTRY(stext)
17 17
18jiffies = jiffies_64; 18jiffies = jiffies_64;
19 19
20#define HYPERVISOR_TEXT \
21 /* \
22 * Force the alignment to be compatible with \
23 * the vectors requirements \
24 */ \
25 . = ALIGN(2048); \
26 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
27 *(.hyp.idmap.text) \
28 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
29 VMLINUX_SYMBOL(__hyp_text_start) = .; \
30 *(.hyp.text) \
31 VMLINUX_SYMBOL(__hyp_text_end) = .;
32
20SECTIONS 33SECTIONS
21{ 34{
22 /* 35 /*
@@ -49,6 +62,7 @@ SECTIONS
49 TEXT_TEXT 62 TEXT_TEXT
50 SCHED_TEXT 63 SCHED_TEXT
51 LOCK_TEXT 64 LOCK_TEXT
65 HYPERVISOR_TEXT
52 *(.fixup) 66 *(.fixup)
53 *(.gnu.warning) 67 *(.gnu.warning)
54 . = ALIGN(16); 68 . = ALIGN(16);
@@ -116,3 +130,9 @@ SECTIONS
116 STABS_DEBUG 130 STABS_DEBUG
117 .comment 0 : { *(.comment) } 131 .comment 0 : { *(.comment) }
118} 132}
133
134/*
135 * The HYP init code can't be more than a page long.
136 */
137ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
138 "HYP init code too big")
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
new file mode 100644
index 000000000000..72a9fd583ad3
--- /dev/null
+++ b/arch/arm64/kvm/Makefile
@@ -0,0 +1,23 @@
1#
2# Makefile for Kernel-based Virtual Machine module
3#
4
5ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
6CFLAGS_arm.o := -I.
7CFLAGS_mmu.o := -I.
8
9KVM=../../../virt/kvm
10ARM=../../../arch/arm/kvm
11
12obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
13
14kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
15kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
16kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
17
18kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
21
22kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
23kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
new file mode 100644
index 000000000000..124418d17049
--- /dev/null
+++ b/arch/arm64/kvm/emulate.c
@@ -0,0 +1,158 @@
1/*
2 * (not much of an) Emulation layer for 32bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/kvm_host.h>
25#include <asm/kvm_emulate.h>
26
27/*
28 * stolen from arch/arm/kernel/opcodes.c
29 *
30 * condition code lookup table
31 * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
32 *
33 * bit position in short is condition code: NZCV
34 */
35static const unsigned short cc_map[16] = {
36 0xF0F0, /* EQ == Z set */
37 0x0F0F, /* NE */
38 0xCCCC, /* CS == C set */
39 0x3333, /* CC */
40 0xFF00, /* MI == N set */
41 0x00FF, /* PL */
42 0xAAAA, /* VS == V set */
43 0x5555, /* VC */
44 0x0C0C, /* HI == C set && Z clear */
45 0xF3F3, /* LS == C clear || Z set */
46 0xAA55, /* GE == (N==V) */
47 0x55AA, /* LT == (N!=V) */
48 0x0A05, /* GT == (!Z && (N==V)) */
49 0xF5FA, /* LE == (Z || (N!=V)) */
50 0xFFFF, /* AL always */
51 0 /* NV */
52};
53
54static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
55{
56 u32 esr = kvm_vcpu_get_hsr(vcpu);
57
58 if (esr & ESR_EL2_CV)
59 return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
60
61 return -1;
62}
63
64/*
65 * Check if a trapped instruction should have been executed or not.
66 */
67bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
68{
69 unsigned long cpsr;
70 u32 cpsr_cond;
71 int cond;
72
73 /* Top two bits non-zero? Unconditional. */
74 if (kvm_vcpu_get_hsr(vcpu) >> 30)
75 return true;
76
77 /* Is condition field valid? */
78 cond = kvm_vcpu_get_condition(vcpu);
79 if (cond == 0xE)
80 return true;
81
82 cpsr = *vcpu_cpsr(vcpu);
83
84 if (cond < 0) {
85 /* This can happen in Thumb mode: examine IT state. */
86 unsigned long it;
87
88 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
89
90 /* it == 0 => unconditional. */
91 if (it == 0)
92 return true;
93
94 /* The cond for this insn works out as the top 4 bits. */
95 cond = (it >> 4);
96 }
97
98 cpsr_cond = cpsr >> 28;
99
100 if (!((cc_map[cond] >> cpsr_cond) & 1))
101 return false;
102
103 return true;
104}
105
106/**
107 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
108 * @vcpu: The VCPU pointer
109 *
110 * When exceptions occur while instructions are executed in Thumb IF-THEN
111 * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
112 * to do this little bit of work manually. The fields map like this:
113 *
114 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
115 */
116static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
117{
118 unsigned long itbits, cond;
119 unsigned long cpsr = *vcpu_cpsr(vcpu);
120 bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
121
122 BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
123
124 if (!(cpsr & COMPAT_PSR_IT_MASK))
125 return;
126
127 cond = (cpsr & 0xe000) >> 13;
128 itbits = (cpsr & 0x1c00) >> (10 - 2);
129 itbits |= (cpsr & (0x3 << 25)) >> 25;
130
131 /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
132 if ((itbits & 0x7) == 0)
133 itbits = cond = 0;
134 else
135 itbits = (itbits << 1) & 0x1f;
136
137 cpsr &= ~COMPAT_PSR_IT_MASK;
138 cpsr |= cond << 13;
139 cpsr |= (itbits & 0x1c) << (10 - 2);
140 cpsr |= (itbits & 0x3) << 25;
141 *vcpu_cpsr(vcpu) = cpsr;
142}
143
144/**
145 * kvm_skip_instr - skip a trapped instruction and proceed to the next
146 * @vcpu: The vcpu pointer
147 */
148void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
149{
150 bool is_thumb;
151
152 is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
153 if (is_thumb && !is_wide_instr)
154 *vcpu_pc(vcpu) += 2;
155 else
156 *vcpu_pc(vcpu) += 4;
157 kvm_adjust_itstate(vcpu);
158}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
new file mode 100644
index 000000000000..2c3ff67a8ecb
--- /dev/null
+++ b/arch/arm64/kvm/guest.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/guest.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/kvm_host.h>
25#include <linux/module.h>
26#include <linux/vmalloc.h>
27#include <linux/fs.h>
28#include <asm/cputype.h>
29#include <asm/uaccess.h>
30#include <asm/kvm.h>
31#include <asm/kvm_asm.h>
32#include <asm/kvm_emulate.h>
33#include <asm/kvm_coproc.h>
34
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { NULL }
37};
38
39int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
40{
41 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
42 return 0;
43}
44
45static u64 core_reg_offset_from_id(u64 id)
46{
47 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
48}
49
50static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
51{
52 /*
53 * Because the kvm_regs structure is a mix of 32, 64 and
54 * 128bit fields, we index it as if it was a 32bit
55 * array. Hence below, nr_regs is the number of entries, and
56 * off the index in the "array".
57 */
58 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
59 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
60 int nr_regs = sizeof(*regs) / sizeof(__u32);
61 u32 off;
62
63 /* Our ID is an index into the kvm_regs struct. */
64 off = core_reg_offset_from_id(reg->id);
65 if (off >= nr_regs ||
66 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
67 return -ENOENT;
68
69 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
70 return -EFAULT;
71
72 return 0;
73}
74
75static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
76{
77 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
78 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
79 int nr_regs = sizeof(*regs) / sizeof(__u32);
80 __uint128_t tmp;
81 void *valp = &tmp;
82 u64 off;
83 int err = 0;
84
85 /* Our ID is an index into the kvm_regs struct. */
86 off = core_reg_offset_from_id(reg->id);
87 if (off >= nr_regs ||
88 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
89 return -ENOENT;
90
91 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
92 return -EINVAL;
93
94 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
95 err = -EFAULT;
96 goto out;
97 }
98
99 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
100 u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
101 switch (mode) {
102 case COMPAT_PSR_MODE_USR:
103 case COMPAT_PSR_MODE_FIQ:
104 case COMPAT_PSR_MODE_IRQ:
105 case COMPAT_PSR_MODE_SVC:
106 case COMPAT_PSR_MODE_ABT:
107 case COMPAT_PSR_MODE_UND:
108 case PSR_MODE_EL0t:
109 case PSR_MODE_EL1t:
110 case PSR_MODE_EL1h:
111 break;
112 default:
113 err = -EINVAL;
114 goto out;
115 }
116 }
117
118 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
119out:
120 return err;
121}
122
123int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
124{
125 return -EINVAL;
126}
127
128int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
129{
130 return -EINVAL;
131}
132
133static unsigned long num_core_regs(void)
134{
135 return sizeof(struct kvm_regs) / sizeof(__u32);
136}
137
138/**
139 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
140 *
141 * This is for all registers.
142 */
143unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
144{
145 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu);
146}
147
148/**
149 * kvm_arm_copy_reg_indices - get indices of all registers.
150 *
151 * We do core registers right here, then we apppend system regs.
152 */
153int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
154{
155 unsigned int i;
156 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
157
158 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
159 if (put_user(core_reg | i, uindices))
160 return -EFAULT;
161 uindices++;
162 }
163
164 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
165}
166
167int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
168{
169 /* We currently use nothing arch-specific in upper 32 bits */
170 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
171 return -EINVAL;
172
173 /* Register group 16 means we want a core register. */
174 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
175 return get_core_reg(vcpu, reg);
176
177 return kvm_arm_sys_reg_get_reg(vcpu, reg);
178}
179
180int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
181{
182 /* We currently use nothing arch-specific in upper 32 bits */
183 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
184 return -EINVAL;
185
186 /* Register group 16 means we set a core register. */
187 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
188 return set_core_reg(vcpu, reg);
189
190 return kvm_arm_sys_reg_set_reg(vcpu, reg);
191}
192
193int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
194 struct kvm_sregs *sregs)
195{
196 return -EINVAL;
197}
198
199int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
200 struct kvm_sregs *sregs)
201{
202 return -EINVAL;
203}
204
205int __attribute_const__ kvm_target_cpu(void)
206{
207 unsigned long implementor = read_cpuid_implementor();
208 unsigned long part_number = read_cpuid_part_number();
209
210 if (implementor != ARM_CPU_IMP_ARM)
211 return -EINVAL;
212
213 switch (part_number) {
214 case ARM_CPU_PART_AEM_V8:
215 return KVM_ARM_TARGET_AEM_V8;
216 case ARM_CPU_PART_FOUNDATION:
217 return KVM_ARM_TARGET_FOUNDATION_V8;
218 case ARM_CPU_PART_CORTEX_A57:
219 /* Currently handled by the generic backend */
220 return KVM_ARM_TARGET_CORTEX_A57;
221 default:
222 return -EINVAL;
223 }
224}
225
226int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
227 const struct kvm_vcpu_init *init)
228{
229 unsigned int i;
230 int phys_target = kvm_target_cpu();
231
232 if (init->target != phys_target)
233 return -EINVAL;
234
235 vcpu->arch.target = phys_target;
236 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
237
238 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
239 for (i = 0; i < sizeof(init->features) * 8; i++) {
240 if (init->features[i / 32] & (1 << (i % 32))) {
241 if (i >= KVM_VCPU_MAX_FEATURES)
242 return -ENOENT;
243 set_bit(i, vcpu->arch.features);
244 }
245 }
246
247 /* Now we know what it is, we can reset it. */
248 return kvm_reset_vcpu(vcpu);
249}
250
251int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
252{
253 return -EINVAL;
254}
255
256int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
257{
258 return -EINVAL;
259}
260
261int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
262 struct kvm_translation *tr)
263{
264 return -EINVAL;
265}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
new file mode 100644
index 000000000000..9beaca033437
--- /dev/null
+++ b/arch/arm64/kvm/handle_exit.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/handle_exit.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/kvm_coproc.h>
26#include <asm/kvm_mmu.h>
27#include <asm/kvm_psci.h>
28
29typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
30
31static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
32{
33 if (kvm_psci_call(vcpu))
34 return 1;
35
36 kvm_inject_undefined(vcpu);
37 return 1;
38}
39
40static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
41{
42 if (kvm_psci_call(vcpu))
43 return 1;
44
45 kvm_inject_undefined(vcpu);
46 return 1;
47}
48
49/**
50 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
51 * @vcpu: the vcpu pointer
52 *
53 * Simply call kvm_vcpu_block(), which will halt execution of
54 * world-switches and schedule other host processes until there is an
55 * incoming IRQ or FIQ to the VM.
56 */
57static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
58{
59 kvm_vcpu_block(vcpu);
60 return 1;
61}
62
63static exit_handle_fn arm_exit_handlers[] = {
64 [ESR_EL2_EC_WFI] = kvm_handle_wfi,
65 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
66 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
67 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
68 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
69 [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access,
70 [ESR_EL2_EC_HVC32] = handle_hvc,
71 [ESR_EL2_EC_SMC32] = handle_smc,
72 [ESR_EL2_EC_HVC64] = handle_hvc,
73 [ESR_EL2_EC_SMC64] = handle_smc,
74 [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
75 [ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
76 [ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
77};
78
79static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
80{
81 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
82
83 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
84 !arm_exit_handlers[hsr_ec]) {
85 kvm_err("Unkown exception class: hsr: %#08x\n",
86 (unsigned int)kvm_vcpu_get_hsr(vcpu));
87 BUG();
88 }
89
90 return arm_exit_handlers[hsr_ec];
91}
92
93/*
94 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
95 * proper exit to userspace.
96 */
97int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
98 int exception_index)
99{
100 exit_handle_fn exit_handler;
101
102 switch (exception_index) {
103 case ARM_EXCEPTION_IRQ:
104 return 1;
105 case ARM_EXCEPTION_TRAP:
106 /*
107 * See ARM ARM B1.14.1: "Hyp traps on instructions
108 * that fail their condition code check"
109 */
110 if (!kvm_condition_valid(vcpu)) {
111 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
112 return 1;
113 }
114
115 exit_handler = kvm_get_exit_handler(vcpu);
116
117 return exit_handler(vcpu, run);
118 default:
119 kvm_pr_unimpl("Unsupported exception type: %d",
120 exception_index);
121 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
122 return 0;
123 }
124}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
new file mode 100644
index 000000000000..ba84e6705e20
--- /dev/null
+++ b/arch/arm64/kvm/hyp-init.S
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/assembler.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_mmu.h>
23
24 .text
25 .pushsection .hyp.idmap.text, "ax"
26
27 .align 11
28
29ENTRY(__kvm_hyp_init)
30 ventry __invalid // Synchronous EL2t
31 ventry __invalid // IRQ EL2t
32 ventry __invalid // FIQ EL2t
33 ventry __invalid // Error EL2t
34
35 ventry __invalid // Synchronous EL2h
36 ventry __invalid // IRQ EL2h
37 ventry __invalid // FIQ EL2h
38 ventry __invalid // Error EL2h
39
40 ventry __do_hyp_init // Synchronous 64-bit EL1
41 ventry __invalid // IRQ 64-bit EL1
42 ventry __invalid // FIQ 64-bit EL1
43 ventry __invalid // Error 64-bit EL1
44
45 ventry __invalid // Synchronous 32-bit EL1
46 ventry __invalid // IRQ 32-bit EL1
47 ventry __invalid // FIQ 32-bit EL1
48 ventry __invalid // Error 32-bit EL1
49
50__invalid:
51 b .
52
53 /*
54 * x0: HYP boot pgd
55 * x1: HYP pgd
56 * x2: HYP stack
57 * x3: HYP vectors
58 */
59__do_hyp_init:
60
61 msr ttbr0_el2, x0
62
63 mrs x4, tcr_el1
64 ldr x5, =TCR_EL2_MASK
65 and x4, x4, x5
66 ldr x5, =TCR_EL2_FLAGS
67 orr x4, x4, x5
68 msr tcr_el2, x4
69
70 ldr x4, =VTCR_EL2_FLAGS
71 msr vtcr_el2, x4
72
73 mrs x4, mair_el1
74 msr mair_el2, x4
75 isb
76
77 mov x4, #SCTLR_EL2_FLAGS
78 msr sctlr_el2, x4
79 isb
80
81 /* MMU is now enabled. Get ready for the trampoline dance */
82 ldr x4, =TRAMPOLINE_VA
83 adr x5, target
84 bfi x4, x5, #0, #PAGE_SHIFT
85 br x4
86
87target: /* We're now in the trampoline code, switch page tables */
88 msr ttbr0_el2, x1
89 isb
90
91 /* Invalidate the old TLBs */
92 tlbi alle2
93 dsb sy
94
95 /* Set the stack and new vectors */
96 kern_hyp_va x2
97 mov sp, x2
98 kern_hyp_va x3
99 msr vbar_el2, x3
100
101 /* Hello, World! */
102 eret
103ENDPROC(__kvm_hyp_init)
104
105 .ltorg
106
107 .popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
new file mode 100644
index 000000000000..ff985e3d8b72
--- /dev/null
+++ b/arch/arm64/kvm/hyp.S
@@ -0,0 +1,831 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
29
30#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
34
35 .text
36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT
38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
57
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68.endm
69
70.macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
89
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97.endm
98
99.macro save_host_regs
100 save_common_regs
101.endm
102
103.macro restore_host_regs
104 restore_common_regs
105.endm
106
107.macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112.endm
113
114.macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119.endm
120
121.macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147.endm
148
149.macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179.endm
180
181/*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191.macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
217
218 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16]
220 stp x8, x9, [x3, #32]
221 stp x10, x11, [x3, #48]
222 stp x12, x13, [x3, #64]
223 stp x14, x15, [x3, #80]
224 stp x16, x17, [x3, #96]
225 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144]
228.endm
229
230.macro restore_sysregs
231 // x2: base address for cpu context
232 // x3: tmp register
233
234 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
235
236 ldp x4, x5, [x3]
237 ldp x6, x7, [x3, #16]
238 ldp x8, x9, [x3, #32]
239 ldp x10, x11, [x3, #48]
240 ldp x12, x13, [x3, #64]
241 ldp x14, x15, [x3, #80]
242 ldp x16, x17, [x3, #96]
243 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144]
246
247 msr vmpidr_el2, x4
248 msr csselr_el1, x5
249 msr sctlr_el1, x6
250 msr actlr_el1, x7
251 msr cpacr_el1, x8
252 msr ttbr0_el1, x9
253 msr ttbr1_el1, x10
254 msr tcr_el1, x11
255 msr esr_el1, x12
256 msr afsr0_el1, x13
257 msr afsr1_el1, x14
258 msr far_el1, x15
259 msr mair_el1, x16
260 msr vbar_el1, x17
261 msr contextidr_el1, x18
262 msr tpidr_el0, x19
263 msr tpidrro_el0, x20
264 msr tpidr_el1, x21
265 msr amair_el1, x22
266 msr cntkctl_el1, x23
267.endm
268
269.macro skip_32bit_state tmp, target
270 // Skip 32bit state if not needed
271 mrs \tmp, hcr_el2
272 tbnz \tmp, #HCR_RW_SHIFT, \target
273.endm
274
275.macro skip_tee_state tmp, target
276 // Skip ThumbEE state if not needed
277 mrs \tmp, id_pfr0_el1
278 tbz \tmp, #12, \target
279.endm
280
281.macro save_guest_32bit_state
282 skip_32bit_state x3, 1f
283
284 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
285 mrs x4, spsr_abt
286 mrs x5, spsr_und
287 mrs x6, spsr_irq
288 mrs x7, spsr_fiq
289 stp x4, x5, [x3]
290 stp x6, x7, [x3, #16]
291
292 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
293 mrs x4, dacr32_el2
294 mrs x5, ifsr32_el2
295 mrs x6, fpexc32_el2
296 mrs x7, dbgvcr32_el2
297 stp x4, x5, [x3]
298 stp x6, x7, [x3, #16]
299
300 skip_tee_state x8, 1f
301
302 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
303 mrs x4, teecr32_el1
304 mrs x5, teehbr32_el1
305 stp x4, x5, [x3]
3061:
307.endm
308
309.macro restore_guest_32bit_state
310 skip_32bit_state x3, 1f
311
312 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
313 ldp x4, x5, [x3]
314 ldp x6, x7, [x3, #16]
315 msr spsr_abt, x4
316 msr spsr_und, x5
317 msr spsr_irq, x6
318 msr spsr_fiq, x7
319
320 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
321 ldp x4, x5, [x3]
322 ldp x6, x7, [x3, #16]
323 msr dacr32_el2, x4
324 msr ifsr32_el2, x5
325 msr fpexc32_el2, x6
326 msr dbgvcr32_el2, x7
327
328 skip_tee_state x8, 1f
329
330 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
331 ldp x4, x5, [x3]
332 msr teecr32_el1, x4
333 msr teehbr32_el1, x5
3341:
335.endm
336
337.macro activate_traps
338 ldr x2, [x0, #VCPU_IRQ_LINES]
339 ldr x1, [x0, #VCPU_HCR_EL2]
340 orr x2, x2, x1
341 msr hcr_el2, x2
342
343 ldr x2, =(CPTR_EL2_TTA)
344 msr cptr_el2, x2
345
346 ldr x2, =(1 << 15) // Trap CP15 Cr=15
347 msr hstr_el2, x2
348
349 mrs x2, mdcr_el2
350 and x2, x2, #MDCR_EL2_HPMN_MASK
351 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
352 msr mdcr_el2, x2
353.endm
354
355.macro deactivate_traps
356 mov x2, #HCR_RW
357 msr hcr_el2, x2
358 msr cptr_el2, xzr
359 msr hstr_el2, xzr
360
361 mrs x2, mdcr_el2
362 and x2, x2, #MDCR_EL2_HPMN_MASK
363 msr mdcr_el2, x2
364.endm
365
366.macro activate_vm
367 ldr x1, [x0, #VCPU_KVM]
368 kern_hyp_va x1
369 ldr x2, [x1, #KVM_VTTBR]
370 msr vttbr_el2, x2
371.endm
372
373.macro deactivate_vm
374 msr vttbr_el2, xzr
375.endm
376
377/*
378 * Save the VGIC CPU state into memory
379 * x0: Register pointing to VCPU struct
380 * Do not corrupt x1!!!
381 */
382.macro save_vgic_state
383 /* Get VGIC VCTRL base into x2 */
384 ldr x2, [x0, #VCPU_KVM]
385 kern_hyp_va x2
386 ldr x2, [x2, #KVM_VGIC_VCTRL]
387 kern_hyp_va x2
388 cbz x2, 2f // disabled
389
390 /* Compute the address of struct vgic_cpu */
391 add x3, x0, #VCPU_VGIC_CPU
392
393 /* Save all interesting registers */
394 ldr w4, [x2, #GICH_HCR]
395 ldr w5, [x2, #GICH_VMCR]
396 ldr w6, [x2, #GICH_MISR]
397 ldr w7, [x2, #GICH_EISR0]
398 ldr w8, [x2, #GICH_EISR1]
399 ldr w9, [x2, #GICH_ELRSR0]
400 ldr w10, [x2, #GICH_ELRSR1]
401 ldr w11, [x2, #GICH_APR]
402
403 str w4, [x3, #VGIC_CPU_HCR]
404 str w5, [x3, #VGIC_CPU_VMCR]
405 str w6, [x3, #VGIC_CPU_MISR]
406 str w7, [x3, #VGIC_CPU_EISR]
407 str w8, [x3, #(VGIC_CPU_EISR + 4)]
408 str w9, [x3, #VGIC_CPU_ELRSR]
409 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
410 str w11, [x3, #VGIC_CPU_APR]
411
412 /* Clear GICH_HCR */
413 str wzr, [x2, #GICH_HCR]
414
415 /* Save list registers */
416 add x2, x2, #GICH_LR0
417 ldr w4, [x3, #VGIC_CPU_NR_LR]
418 add x3, x3, #VGIC_CPU_LR
4191: ldr w5, [x2], #4
420 str w5, [x3], #4
421 sub w4, w4, #1
422 cbnz w4, 1b
4232:
424.endm
425
426/*
427 * Restore the VGIC CPU state from memory
428 * x0: Register pointing to VCPU struct
429 */
430.macro restore_vgic_state
431 /* Get VGIC VCTRL base into x2 */
432 ldr x2, [x0, #VCPU_KVM]
433 kern_hyp_va x2
434 ldr x2, [x2, #KVM_VGIC_VCTRL]
435 kern_hyp_va x2
436 cbz x2, 2f // disabled
437
438 /* Compute the address of struct vgic_cpu */
439 add x3, x0, #VCPU_VGIC_CPU
440
441 /* We only restore a minimal set of registers */
442 ldr w4, [x3, #VGIC_CPU_HCR]
443 ldr w5, [x3, #VGIC_CPU_VMCR]
444 ldr w6, [x3, #VGIC_CPU_APR]
445
446 str w4, [x2, #GICH_HCR]
447 str w5, [x2, #GICH_VMCR]
448 str w6, [x2, #GICH_APR]
449
450 /* Restore list registers */
451 add x2, x2, #GICH_LR0
452 ldr w4, [x3, #VGIC_CPU_NR_LR]
453 add x3, x3, #VGIC_CPU_LR
4541: ldr w5, [x3], #4
455 str w5, [x2], #4
456 sub w4, w4, #1
457 cbnz w4, 1b
4582:
459.endm
460
461.macro save_timer_state
462 // x0: vcpu pointer
463 ldr x2, [x0, #VCPU_KVM]
464 kern_hyp_va x2
465 ldr w3, [x2, #KVM_TIMER_ENABLED]
466 cbz w3, 1f
467
468 mrs x3, cntv_ctl_el0
469 and x3, x3, #3
470 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
471 bic x3, x3, #1 // Clear Enable
472 msr cntv_ctl_el0, x3
473
474 isb
475
476 mrs x3, cntv_cval_el0
477 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
478
4791:
480 // Allow physical timer/counter access for the host
481 mrs x2, cnthctl_el2
482 orr x2, x2, #3
483 msr cnthctl_el2, x2
484
485 // Clear cntvoff for the host
486 msr cntvoff_el2, xzr
487.endm
488
489.macro restore_timer_state
490 // x0: vcpu pointer
491 // Disallow physical timer access for the guest
492 // Physical counter access is allowed
493 mrs x2, cnthctl_el2
494 orr x2, x2, #1
495 bic x2, x2, #2
496 msr cnthctl_el2, x2
497
498 ldr x2, [x0, #VCPU_KVM]
499 kern_hyp_va x2
500 ldr w3, [x2, #KVM_TIMER_ENABLED]
501 cbz w3, 1f
502
503 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
504 msr cntvoff_el2, x3
505 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
506 msr cntv_cval_el0, x2
507 isb
508
509 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
510 and x2, x2, #3
511 msr cntv_ctl_el0, x2
5121:
513.endm
514
515__save_sysregs:
516 save_sysregs
517 ret
518
519__restore_sysregs:
520 restore_sysregs
521 ret
522
523__save_fpsimd:
524 save_fpsimd
525 ret
526
527__restore_fpsimd:
528 restore_fpsimd
529 ret
530
531/*
532 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
533 *
534 * This is the world switch. The first half of the function
535 * deals with entering the guest, and anything from __kvm_vcpu_return
536 * to the end of the function deals with reentering the host.
537 * On the enter path, only x0 (vcpu pointer) must be preserved until
538 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
539 * code) must both be preserved until the epilogue.
540 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
541 */
542ENTRY(__kvm_vcpu_run)
543 kern_hyp_va x0
544 msr tpidr_el2, x0 // Save the vcpu register
545
546 // Host context
547 ldr x2, [x0, #VCPU_HOST_CONTEXT]
548 kern_hyp_va x2
549
550 save_host_regs
551 bl __save_fpsimd
552 bl __save_sysregs
553
554 activate_traps
555 activate_vm
556
557 restore_vgic_state
558 restore_timer_state
559
560 // Guest context
561 add x2, x0, #VCPU_CONTEXT
562
563 bl __restore_sysregs
564 bl __restore_fpsimd
565 restore_guest_32bit_state
566 restore_guest_regs
567
568 // That's it, no more messing around.
569 eret
570
571__kvm_vcpu_return:
572 // Assume x0 is the vcpu pointer, x1 the return code
573 // Guest's x0-x3 are on the stack
574
575 // Guest context
576 add x2, x0, #VCPU_CONTEXT
577
578 save_guest_regs
579 bl __save_fpsimd
580 bl __save_sysregs
581 save_guest_32bit_state
582
583 save_timer_state
584 save_vgic_state
585
586 deactivate_traps
587 deactivate_vm
588
589 // Host context
590 ldr x2, [x0, #VCPU_HOST_CONTEXT]
591 kern_hyp_va x2
592
593 bl __restore_sysregs
594 bl __restore_fpsimd
595 restore_host_regs
596
597 mov x0, x1
598 ret
599END(__kvm_vcpu_run)
600
601// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
602ENTRY(__kvm_tlb_flush_vmid_ipa)
603 kern_hyp_va x0
604 ldr x2, [x0, #KVM_VTTBR]
605 msr vttbr_el2, x2
606 isb
607
608 /*
609 * We could do so much better if we had the VA as well.
610 * Instead, we invalidate Stage-2 for this IPA, and the
611 * whole of Stage-1. Weep...
612 */
613 tlbi ipas2e1is, x1
614 dsb sy
615 tlbi vmalle1is
616 dsb sy
617 isb
618
619 msr vttbr_el2, xzr
620 ret
621ENDPROC(__kvm_tlb_flush_vmid_ipa)
622
623ENTRY(__kvm_flush_vm_context)
624 tlbi alle1is
625 ic ialluis
626 dsb sy
627 ret
628ENDPROC(__kvm_flush_vm_context)
629
630__kvm_hyp_panic:
631 // Guess the context by looking at VTTBR:
632 // If zero, then we're already a host.
633 // Otherwise restore a minimal host context before panicing.
634 mrs x0, vttbr_el2
635 cbz x0, 1f
636
637 mrs x0, tpidr_el2
638
639 deactivate_traps
640 deactivate_vm
641
642 ldr x2, [x0, #VCPU_HOST_CONTEXT]
643 kern_hyp_va x2
644
645 bl __restore_sysregs
646
6471: adr x0, __hyp_panic_str
648 adr x1, 2f
649 ldp x2, x3, [x1]
650 sub x0, x0, x2
651 add x0, x0, x3
652 mrs x1, spsr_el2
653 mrs x2, elr_el2
654 mrs x3, esr_el2
655 mrs x4, far_el2
656 mrs x5, hpfar_el2
657 mrs x6, par_el1
658 mrs x7, tpidr_el2
659
660 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
661 PSR_MODE_EL1h)
662 msr spsr_el2, lr
663 ldr lr, =panic
664 msr elr_el2, lr
665 eret
666
667 .align 3
6682: .quad HYP_PAGE_OFFSET
669 .quad PAGE_OFFSET
670ENDPROC(__kvm_hyp_panic)
671
672__hyp_panic_str:
673 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
674
675 .align 2
676
677ENTRY(kvm_call_hyp)
678 hvc #0
679 ret
680ENDPROC(kvm_call_hyp)
681
682.macro invalid_vector label, target
683 .align 2
684\label:
685 b \target
686ENDPROC(\label)
687.endm
688
689 /* None of these should ever happen */
690 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
691 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
692 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
693 invalid_vector el2t_error_invalid, __kvm_hyp_panic
694 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
695 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
696 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
697 invalid_vector el2h_error_invalid, __kvm_hyp_panic
698 invalid_vector el1_sync_invalid, __kvm_hyp_panic
699 invalid_vector el1_irq_invalid, __kvm_hyp_panic
700 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
701 invalid_vector el1_error_invalid, __kvm_hyp_panic
702
703el1_sync: // Guest trapped into EL2
704 push x0, x1
705 push x2, x3
706
707 mrs x1, esr_el2
708 lsr x2, x1, #ESR_EL2_EC_SHIFT
709
710 cmp x2, #ESR_EL2_EC_HVC64
711 b.ne el1_trap
712
713 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
714 cbnz x3, el1_trap // called HVC
715
716 /* Here, we're pretty sure the host called HVC. */
717 pop x2, x3
718 pop x0, x1
719
720 push lr, xzr
721
722 /*
723 * Compute the function address in EL2, and shuffle the parameters.
724 */
725 kern_hyp_va x0
726 mov lr, x0
727 mov x0, x1
728 mov x1, x2
729 mov x2, x3
730 blr lr
731
732 pop lr, xzr
733 eret
734
735el1_trap:
736 /*
737 * x1: ESR
738 * x2: ESR_EC
739 */
740 cmp x2, #ESR_EL2_EC_DABT
741 mov x0, #ESR_EL2_EC_IABT
742 ccmp x2, x0, #4, ne
743 b.ne 1f // Not an abort we care about
744
745 /* This is an abort. Check for permission fault */
746 and x2, x1, #ESR_EL2_FSC_TYPE
747 cmp x2, #FSC_PERM
748 b.ne 1f // Not a permission fault
749
750 /*
751 * Check for Stage-1 page table walk, which is guaranteed
752 * to give a valid HPFAR_EL2.
753 */
754 tbnz x1, #7, 1f // S1PTW is set
755
756 /*
757 * Permission fault, HPFAR_EL2 is invalid.
758 * Resolve the IPA the hard way using the guest VA.
759 * Stage-1 translation already validated the memory access rights.
760 * As such, we can use the EL1 translation regime, and don't have
761 * to distinguish between EL0 and EL1 access.
762 */
763 mrs x2, far_el2
764 at s1e1r, x2
765 isb
766
767 /* Read result */
768 mrs x3, par_el1
769 tbnz x3, #0, 3f // Bail out if we failed the translation
770 ubfx x3, x3, #12, #36 // Extract IPA
771 lsl x3, x3, #4 // and present it like HPFAR
772 b 2f
773
7741: mrs x3, hpfar_el2
775 mrs x2, far_el2
776
7772: mrs x0, tpidr_el2
778 str x1, [x0, #VCPU_ESR_EL2]
779 str x2, [x0, #VCPU_FAR_EL2]
780 str x3, [x0, #VCPU_HPFAR_EL2]
781
782 mov x1, #ARM_EXCEPTION_TRAP
783 b __kvm_vcpu_return
784
785 /*
786 * Translation failed. Just return to the guest and
787 * let it fault again. Another CPU is probably playing
788 * behind our back.
789 */
7903: pop x2, x3
791 pop x0, x1
792
793 eret
794
795el1_irq:
796 push x0, x1
797 push x2, x3
798 mrs x0, tpidr_el2
799 mov x1, #ARM_EXCEPTION_IRQ
800 b __kvm_vcpu_return
801
802 .ltorg
803
804 .align 11
805
806ENTRY(__kvm_hyp_vector)
807 ventry el2t_sync_invalid // Synchronous EL2t
808 ventry el2t_irq_invalid // IRQ EL2t
809 ventry el2t_fiq_invalid // FIQ EL2t
810 ventry el2t_error_invalid // Error EL2t
811
812 ventry el2h_sync_invalid // Synchronous EL2h
813 ventry el2h_irq_invalid // IRQ EL2h
814 ventry el2h_fiq_invalid // FIQ EL2h
815 ventry el2h_error_invalid // Error EL2h
816
817 ventry el1_sync // Synchronous 64-bit EL1
818 ventry el1_irq // IRQ 64-bit EL1
819 ventry el1_fiq_invalid // FIQ 64-bit EL1
820 ventry el1_error_invalid // Error 64-bit EL1
821
822 ventry el1_sync // Synchronous 32-bit EL1
823 ventry el1_irq // IRQ 32-bit EL1
824 ventry el1_fiq_invalid // FIQ 32-bit EL1
825 ventry el1_error_invalid // Error 32-bit EL1
826ENDPROC(__kvm_hyp_vector)
827
828__kvm_hyp_code_end:
829 .globl __kvm_hyp_code_end
830
831 .popsection
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
new file mode 100644
index 000000000000..81a02a8762b0
--- /dev/null
+++ b/arch/arm64/kvm/inject_fault.c
@@ -0,0 +1,203 @@
1/*
2 * Fault injection for both 32 and 64bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * Based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/kvm_host.h>
25#include <asm/kvm_emulate.h>
26#include <asm/esr.h>
27
28#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT)
30#define EL1_EXCEPT_SYNC_OFFSET 0x200
31
32static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
33{
34 unsigned long cpsr;
35 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
36 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
37 u32 return_offset = (is_thumb) ? 4 : 0;
38 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
39
40 cpsr = mode | COMPAT_PSR_I_BIT;
41
42 if (sctlr & (1 << 30))
43 cpsr |= COMPAT_PSR_T_BIT;
44 if (sctlr & (1 << 25))
45 cpsr |= COMPAT_PSR_E_BIT;
46
47 *vcpu_cpsr(vcpu) = cpsr;
48
49 /* Note: These now point to the banked copies */
50 *vcpu_spsr(vcpu) = new_spsr_value;
51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
52
53 /* Branch to exception vector */
54 if (sctlr & (1 << 13))
55 vect_offset += 0xffff0000;
56 else /* always have security exceptions */
57 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
58
59 *vcpu_pc(vcpu) = vect_offset;
60}
61
62static void inject_undef32(struct kvm_vcpu *vcpu)
63{
64 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
65}
66
67/*
68 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
69 * pseudocode.
70 */
71static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
72 unsigned long addr)
73{
74 u32 vect_offset;
75 u32 *far, *fsr;
76 bool is_lpae;
77
78 if (is_pabt) {
79 vect_offset = 12;
80 far = &vcpu_cp15(vcpu, c6_IFAR);
81 fsr = &vcpu_cp15(vcpu, c5_IFSR);
82 } else { /* !iabt */
83 vect_offset = 16;
84 far = &vcpu_cp15(vcpu, c6_DFAR);
85 fsr = &vcpu_cp15(vcpu, c5_DFSR);
86 }
87
88 prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
89
90 *far = addr;
91
92 /* Give the guest an IMPLEMENTATION DEFINED exception */
93 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
94 if (is_lpae)
95 *fsr = 1 << 9 | 0x34;
96 else
97 *fsr = 0x14;
98}
99
100static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
101{
102 unsigned long cpsr = *vcpu_cpsr(vcpu);
103 bool is_aarch32;
104 u32 esr = 0;
105
106 is_aarch32 = vcpu_mode_is_32bit(vcpu);
107
108 *vcpu_spsr(vcpu) = cpsr;
109 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
110
111 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
112 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
113
114 vcpu_sys_reg(vcpu, FAR_EL1) = addr;
115
116 /*
117 * Build an {i,d}abort, depending on the level and the
118 * instruction set. Report an external synchronous abort.
119 */
120 if (kvm_vcpu_trap_il_is32bit(vcpu))
121 esr |= ESR_EL1_IL;
122
123 /*
124 * Here, the guest runs in AArch64 mode when in EL1. If we get
125 * an AArch32 fault, it means we managed to trap an EL0 fault.
126 */
127 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
128 esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
129 else
130 esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
131
132 if (!is_iabt)
133 esr |= ESR_EL1_EC_DABT_EL0;
134
135 vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
136}
137
138static void inject_undef64(struct kvm_vcpu *vcpu)
139{
140 unsigned long cpsr = *vcpu_cpsr(vcpu);
141 u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
142
143 *vcpu_spsr(vcpu) = cpsr;
144 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
145
146 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
147 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
148
149 /*
150 * Build an unknown exception, depending on the instruction
151 * set.
152 */
153 if (kvm_vcpu_trap_il_is32bit(vcpu))
154 esr |= ESR_EL1_IL;
155
156 vcpu_sys_reg(vcpu, ESR_EL1) = esr;
157}
158
159/**
160 * kvm_inject_dabt - inject a data abort into the guest
161 * @vcpu: The VCPU to receive the undefined exception
162 * @addr: The address to report in the DFAR
163 *
164 * It is assumed that this code is called from the VCPU thread and that the
165 * VCPU therefore is not currently executing guest code.
166 */
167void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
168{
169 if (!(vcpu->arch.hcr_el2 & HCR_RW))
170 inject_abt32(vcpu, false, addr);
171
172 inject_abt64(vcpu, false, addr);
173}
174
175/**
176 * kvm_inject_pabt - inject a prefetch abort into the guest
177 * @vcpu: The VCPU to receive the undefined exception
178 * @addr: The address to report in the DFAR
179 *
180 * It is assumed that this code is called from the VCPU thread and that the
181 * VCPU therefore is not currently executing guest code.
182 */
183void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
184{
185 if (!(vcpu->arch.hcr_el2 & HCR_RW))
186 inject_abt32(vcpu, true, addr);
187
188 inject_abt64(vcpu, true, addr);
189}
190
191/**
192 * kvm_inject_undefined - inject an undefined instruction into the guest
193 *
194 * It is assumed that this code is called from the VCPU thread and that the
195 * VCPU therefore is not currently executing guest code.
196 */
197void kvm_inject_undefined(struct kvm_vcpu *vcpu)
198{
199 if (!(vcpu->arch.hcr_el2 & HCR_RW))
200 inject_undef32(vcpu);
201
202 inject_undef64(vcpu);
203}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
new file mode 100644
index 000000000000..bbc6ae32e4af
--- /dev/null
+++ b/arch/arm64/kvm/regmap.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/emulate.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/mm.h>
23#include <linux/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/ptrace.h>
26
27#define VCPU_NR_MODES 6
28#define REG_OFFSET(_reg) \
29 (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
30
31#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
32
33static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
34 /* USR Registers */
35 {
36 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
37 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
38 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
39 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
40 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
41 REG_OFFSET(pc)
42 },
43
44 /* FIQ Registers */
45 {
46 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
47 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
48 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
49 REG_OFFSET(compat_r8_fiq), /* r8 */
50 REG_OFFSET(compat_r9_fiq), /* r9 */
51 REG_OFFSET(compat_r10_fiq), /* r10 */
52 REG_OFFSET(compat_r11_fiq), /* r11 */
53 REG_OFFSET(compat_r12_fiq), /* r12 */
54 REG_OFFSET(compat_sp_fiq), /* r13 */
55 REG_OFFSET(compat_lr_fiq), /* r14 */
56 REG_OFFSET(pc)
57 },
58
59 /* IRQ Registers */
60 {
61 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
62 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
63 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
64 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
65 USR_REG_OFFSET(12),
66 REG_OFFSET(compat_sp_irq), /* r13 */
67 REG_OFFSET(compat_lr_irq), /* r14 */
68 REG_OFFSET(pc)
69 },
70
71 /* SVC Registers */
72 {
73 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
74 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
75 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
76 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
77 USR_REG_OFFSET(12),
78 REG_OFFSET(compat_sp_svc), /* r13 */
79 REG_OFFSET(compat_lr_svc), /* r14 */
80 REG_OFFSET(pc)
81 },
82
83 /* ABT Registers */
84 {
85 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
86 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
87 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
88 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
89 USR_REG_OFFSET(12),
90 REG_OFFSET(compat_sp_abt), /* r13 */
91 REG_OFFSET(compat_lr_abt), /* r14 */
92 REG_OFFSET(pc)
93 },
94
95 /* UND Registers */
96 {
97 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
98 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
99 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
100 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
101 USR_REG_OFFSET(12),
102 REG_OFFSET(compat_sp_und), /* r13 */
103 REG_OFFSET(compat_lr_und), /* r14 */
104 REG_OFFSET(pc)
105 },
106};
107
108/*
109 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU.
111 */
112unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
113{
114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
115 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
116
117 switch (mode) {
118 case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
119 mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
120 break;
121
122 case COMPAT_PSR_MODE_ABT:
123 mode = 4;
124 break;
125
126 case COMPAT_PSR_MODE_UND:
127 mode = 5;
128 break;
129
130 case COMPAT_PSR_MODE_SYS:
131 mode = 0; /* SYS maps to USR */
132 break;
133
134 default:
135 BUG();
136 }
137
138 return reg_array + vcpu_reg_offsets[mode][reg_num];
139}
140
141/*
142 * Return the SPSR for the current mode of the virtual CPU.
143 */
144unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
145{
146 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
147 switch (mode) {
148 case COMPAT_PSR_MODE_SVC:
149 mode = KVM_SPSR_SVC;
150 break;
151 case COMPAT_PSR_MODE_ABT:
152 mode = KVM_SPSR_ABT;
153 break;
154 case COMPAT_PSR_MODE_UND:
155 mode = KVM_SPSR_UND;
156 break;
157 case COMPAT_PSR_MODE_IRQ:
158 mode = KVM_SPSR_IRQ;
159 break;
160 case COMPAT_PSR_MODE_FIQ:
161 mode = KVM_SPSR_FIQ;
162 break;
163 default:
164 BUG();
165 }
166
167 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
168}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
new file mode 100644
index 000000000000..70a7816535cd
--- /dev/null
+++ b/arch/arm64/kvm/reset.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/reset.c
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/errno.h>
23#include <linux/kvm_host.h>
24#include <linux/kvm.h>
25
26#include <kvm/arm_arch_timer.h>
27
28#include <asm/cputype.h>
29#include <asm/ptrace.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_coproc.h>
32
33/*
34 * ARMv8 Reset Values
35 */
36static const struct kvm_regs default_regs_reset = {
37 .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
38 PSR_F_BIT | PSR_D_BIT),
39};
40
41static const struct kvm_regs default_regs_reset32 = {
42 .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
43 COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
44};
45
46static const struct kvm_irq_level default_vtimer_irq = {
47 .irq = 27,
48 .level = 1,
49};
50
51static bool cpu_has_32bit_el1(void)
52{
53 u64 pfr0;
54
55 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
56 return !!(pfr0 & 0x20);
57}
58
59int kvm_arch_dev_ioctl_check_extension(long ext)
60{
61 int r;
62
63 switch (ext) {
64 case KVM_CAP_ARM_EL1_32BIT:
65 r = cpu_has_32bit_el1();
66 break;
67 default:
68 r = 0;
69 }
70
71 return r;
72}
73
74/**
75 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
76 * @vcpu: The VCPU pointer
77 *
78 * This function finds the right table above and sets the registers on
79 * the virtual CPU struct to their architectually defined reset
80 * values.
81 */
82int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
83{
84 const struct kvm_irq_level *cpu_vtimer_irq;
85 const struct kvm_regs *cpu_reset;
86
87 switch (vcpu->arch.target) {
88 default:
89 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
90 if (!cpu_has_32bit_el1())
91 return -EINVAL;
92 cpu_reset = &default_regs_reset32;
93 vcpu->arch.hcr_el2 &= ~HCR_RW;
94 } else {
95 cpu_reset = &default_regs_reset;
96 }
97
98 cpu_vtimer_irq = &default_vtimer_irq;
99 break;
100 }
101
102 /* Reset core registers */
103 memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
104
105 /* Reset system registers */
106 kvm_reset_sys_regs(vcpu);
107
108 /* Reset timer */
109 kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
110
111 return 0;
112}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
new file mode 100644
index 000000000000..94923609753b
--- /dev/null
+++ b/arch/arm64/kvm/sys_regs.c
@@ -0,0 +1,1050 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/mm.h>
24#include <linux/kvm_host.h>
25#include <linux/uaccess.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_host.h>
28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h>
30#include <asm/cacheflush.h>
31#include <asm/cputype.h>
32#include <trace/events/kvm.h>
33
34#include "sys_regs.h"
35
36/*
37 * All of this file is extremly similar to the ARM coproc.c, but the
38 * types are different. My gut feeling is that it should be pretty
39 * easy to merge, but that would be an ABI breakage -- again. VFP
40 * would also need to be abstracted.
41 *
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
44 * 64bit interface.
45 */
46
47/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
48static u32 cache_levels;
49
50/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
51#define CSSELR_MAX 12
52
53/* Which cache CCSIDR represents depends on CSSELR value. */
54static u32 get_ccsidr(u32 csselr)
55{
56 u32 ccsidr;
57
58 /* Make sure noone else changes CSSELR during this! */
59 local_irq_disable();
60 /* Put value into CSSELR */
61 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
62 isb();
63 /* Read result out of CCSIDR */
64 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
65 local_irq_enable();
66
67 return ccsidr;
68}
69
70static void do_dc_cisw(u32 val)
71{
72 asm volatile("dc cisw, %x0" : : "r" (val));
73 dsb();
74}
75
76static void do_dc_csw(u32 val)
77{
78 asm volatile("dc csw, %x0" : : "r" (val));
79 dsb();
80}
81
82/* See note at ARM ARM B1.14.4 */
83static bool access_dcsw(struct kvm_vcpu *vcpu,
84 const struct sys_reg_params *p,
85 const struct sys_reg_desc *r)
86{
87 unsigned long val;
88 int cpu;
89
90 if (!p->is_write)
91 return read_from_write_only(vcpu, p);
92
93 cpu = get_cpu();
94
95 cpumask_setall(&vcpu->arch.require_dcache_flush);
96 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
97
98 /* If we were already preempted, take the long way around */
99 if (cpu != vcpu->arch.last_pcpu) {
100 flush_cache_all();
101 goto done;
102 }
103
104 val = *vcpu_reg(vcpu, p->Rt);
105
106 switch (p->CRm) {
107 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
108 case 14: /* DCCISW */
109 do_dc_cisw(val);
110 break;
111
112 case 10: /* DCCSW */
113 do_dc_csw(val);
114 break;
115 }
116
117done:
118 put_cpu();
119
120 return true;
121}
122
123/*
124 * We could trap ID_DFR0 and tell the guest we don't support performance
125 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
126 * NAKed, so it will read the PMCR anyway.
127 *
128 * Therefore we tell the guest we have 0 counters. Unfortunately, we
129 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
130 * all PM registers, which doesn't crash the guest kernel at least.
131 */
132static bool pm_fake(struct kvm_vcpu *vcpu,
133 const struct sys_reg_params *p,
134 const struct sys_reg_desc *r)
135{
136 if (p->is_write)
137 return ignore_write(vcpu, p);
138 else
139 return read_zero(vcpu, p);
140}
141
142static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
143{
144 u64 amair;
145
146 asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
147 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
148}
149
150static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
151{
152 /*
153 * Simply map the vcpu_id into the Aff0 field of the MPIDR.
154 */
155 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
156}
157
158/*
159 * Architected system registers.
160 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
161 */
162static const struct sys_reg_desc sys_reg_descs[] = {
163 /* DC ISW */
164 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
165 access_dcsw },
166 /* DC CSW */
167 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
168 access_dcsw },
169 /* DC CISW */
170 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
171 access_dcsw },
172
173 /* TEECR32_EL1 */
174 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
175 NULL, reset_val, TEECR32_EL1, 0 },
176 /* TEEHBR32_EL1 */
177 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
178 NULL, reset_val, TEEHBR32_EL1, 0 },
179 /* DBGVCR32_EL2 */
180 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
181 NULL, reset_val, DBGVCR32_EL2, 0 },
182
183 /* MPIDR_EL1 */
184 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
185 NULL, reset_mpidr, MPIDR_EL1 },
186 /* SCTLR_EL1 */
187 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
188 NULL, reset_val, SCTLR_EL1, 0x00C50078 },
189 /* CPACR_EL1 */
190 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
191 NULL, reset_val, CPACR_EL1, 0 },
192 /* TTBR0_EL1 */
193 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
194 NULL, reset_unknown, TTBR0_EL1 },
195 /* TTBR1_EL1 */
196 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
197 NULL, reset_unknown, TTBR1_EL1 },
198 /* TCR_EL1 */
199 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
200 NULL, reset_val, TCR_EL1, 0 },
201
202 /* AFSR0_EL1 */
203 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
204 NULL, reset_unknown, AFSR0_EL1 },
205 /* AFSR1_EL1 */
206 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
207 NULL, reset_unknown, AFSR1_EL1 },
208 /* ESR_EL1 */
209 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
210 NULL, reset_unknown, ESR_EL1 },
211 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 },
214
215 /* PMINTENSET_EL1 */
216 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
217 pm_fake },
218 /* PMINTENCLR_EL1 */
219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
220 pm_fake },
221
222 /* MAIR_EL1 */
223 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
224 NULL, reset_unknown, MAIR_EL1 },
225 /* AMAIR_EL1 */
226 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
227 NULL, reset_amair_el1, AMAIR_EL1 },
228
229 /* VBAR_EL1 */
230 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
231 NULL, reset_val, VBAR_EL1, 0 },
232 /* CONTEXTIDR_EL1 */
233 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
234 NULL, reset_val, CONTEXTIDR_EL1, 0 },
235 /* TPIDR_EL1 */
236 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
237 NULL, reset_unknown, TPIDR_EL1 },
238
239 /* CNTKCTL_EL1 */
240 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
241 NULL, reset_val, CNTKCTL_EL1, 0},
242
243 /* CSSELR_EL1 */
244 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
245 NULL, reset_unknown, CSSELR_EL1 },
246
247 /* PMCR_EL0 */
248 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
249 pm_fake },
250 /* PMCNTENSET_EL0 */
251 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
252 pm_fake },
253 /* PMCNTENCLR_EL0 */
254 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
255 pm_fake },
256 /* PMOVSCLR_EL0 */
257 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
258 pm_fake },
259 /* PMSWINC_EL0 */
260 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
261 pm_fake },
262 /* PMSELR_EL0 */
263 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
264 pm_fake },
265 /* PMCEID0_EL0 */
266 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
267 pm_fake },
268 /* PMCEID1_EL0 */
269 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
270 pm_fake },
271 /* PMCCNTR_EL0 */
272 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
273 pm_fake },
274 /* PMXEVTYPER_EL0 */
275 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
276 pm_fake },
277 /* PMXEVCNTR_EL0 */
278 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
279 pm_fake },
280 /* PMUSERENR_EL0 */
281 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
282 pm_fake },
283 /* PMOVSSET_EL0 */
284 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
285 pm_fake },
286
287 /* TPIDR_EL0 */
288 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
289 NULL, reset_unknown, TPIDR_EL0 },
290 /* TPIDRRO_EL0 */
291 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
292 NULL, reset_unknown, TPIDRRO_EL0 },
293
294 /* DACR32_EL2 */
295 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
296 NULL, reset_unknown, DACR32_EL2 },
297 /* IFSR32_EL2 */
298 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
299 NULL, reset_unknown, IFSR32_EL2 },
300 /* FPEXC32_EL2 */
301 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
302 NULL, reset_val, FPEXC32_EL2, 0x70 },
303};
304
305/* Trapped cp15 registers */
306static const struct sys_reg_desc cp15_regs[] = {
307 /*
308 * DC{C,I,CI}SW operations:
309 */
310 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
311 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
312 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
313 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
314 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
315 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
316 { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
317 { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
318 { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
319 { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
320 { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
321 { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
322 { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
323 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
324 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
325 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
326};
327
328/* Target specific emulation tables */
329static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
330
331void kvm_register_target_sys_reg_table(unsigned int target,
332 struct kvm_sys_reg_target_table *table)
333{
334 target_tables[target] = table;
335}
336
337/* Get specific register table for this target. */
338static const struct sys_reg_desc *get_target_table(unsigned target,
339 bool mode_is_64,
340 size_t *num)
341{
342 struct kvm_sys_reg_target_table *table;
343
344 table = target_tables[target];
345 if (mode_is_64) {
346 *num = table->table64.num;
347 return table->table64.table;
348 } else {
349 *num = table->table32.num;
350 return table->table32.table;
351 }
352}
353
354static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
355 const struct sys_reg_desc table[],
356 unsigned int num)
357{
358 unsigned int i;
359
360 for (i = 0; i < num; i++) {
361 const struct sys_reg_desc *r = &table[i];
362
363 if (params->Op0 != r->Op0)
364 continue;
365 if (params->Op1 != r->Op1)
366 continue;
367 if (params->CRn != r->CRn)
368 continue;
369 if (params->CRm != r->CRm)
370 continue;
371 if (params->Op2 != r->Op2)
372 continue;
373
374 return r;
375 }
376 return NULL;
377}
378
379int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
380{
381 kvm_inject_undefined(vcpu);
382 return 1;
383}
384
385int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
386{
387 kvm_inject_undefined(vcpu);
388 return 1;
389}
390
391static void emulate_cp15(struct kvm_vcpu *vcpu,
392 const struct sys_reg_params *params)
393{
394 size_t num;
395 const struct sys_reg_desc *table, *r;
396
397 table = get_target_table(vcpu->arch.target, false, &num);
398
399 /* Search target-specific then generic table. */
400 r = find_reg(params, table, num);
401 if (!r)
402 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
403
404 if (likely(r)) {
405 /*
406 * Not having an accessor means that we have
407 * configured a trap that we don't know how to
408 * handle. This certainly qualifies as a gross bug
409 * that should be fixed right away.
410 */
411 BUG_ON(!r->access);
412
413 if (likely(r->access(vcpu, params, r))) {
414 /* Skip instruction, since it was emulated */
415 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
416 return;
417 }
418 /* If access function fails, it should complain. */
419 }
420
421 kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
422 print_sys_reg_instr(params);
423 kvm_inject_undefined(vcpu);
424}
425
426/**
427 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
428 * @vcpu: The VCPU pointer
429 * @run: The kvm_run struct
430 */
431int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
432{
433 struct sys_reg_params params;
434 u32 hsr = kvm_vcpu_get_hsr(vcpu);
435 int Rt2 = (hsr >> 10) & 0xf;
436
437 params.CRm = (hsr >> 1) & 0xf;
438 params.Rt = (hsr >> 5) & 0xf;
439 params.is_write = ((hsr & 1) == 0);
440
441 params.Op0 = 0;
442 params.Op1 = (hsr >> 16) & 0xf;
443 params.Op2 = 0;
444 params.CRn = 0;
445
446 /*
447 * Massive hack here. Store Rt2 in the top 32bits so we only
448 * have one register to deal with. As we use the same trap
449 * backends between AArch32 and AArch64, we get away with it.
450 */
451 if (params.is_write) {
452 u64 val = *vcpu_reg(vcpu, params.Rt);
453 val &= 0xffffffff;
454 val |= *vcpu_reg(vcpu, Rt2) << 32;
455 *vcpu_reg(vcpu, params.Rt) = val;
456 }
457
458 emulate_cp15(vcpu, &params);
459
460 /* Do the opposite hack for the read side */
461 if (!params.is_write) {
462 u64 val = *vcpu_reg(vcpu, params.Rt);
463 val >>= 32;
464 *vcpu_reg(vcpu, Rt2) = val;
465 }
466
467 return 1;
468}
469
470/**
471 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
472 * @vcpu: The VCPU pointer
473 * @run: The kvm_run struct
474 */
475int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
476{
477 struct sys_reg_params params;
478 u32 hsr = kvm_vcpu_get_hsr(vcpu);
479
480 params.CRm = (hsr >> 1) & 0xf;
481 params.Rt = (hsr >> 5) & 0xf;
482 params.is_write = ((hsr & 1) == 0);
483 params.CRn = (hsr >> 10) & 0xf;
484 params.Op0 = 0;
485 params.Op1 = (hsr >> 14) & 0x7;
486 params.Op2 = (hsr >> 17) & 0x7;
487
488 emulate_cp15(vcpu, &params);
489 return 1;
490}
491
492static int emulate_sys_reg(struct kvm_vcpu *vcpu,
493 const struct sys_reg_params *params)
494{
495 size_t num;
496 const struct sys_reg_desc *table, *r;
497
498 table = get_target_table(vcpu->arch.target, true, &num);
499
500 /* Search target-specific then generic table. */
501 r = find_reg(params, table, num);
502 if (!r)
503 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
504
505 if (likely(r)) {
506 /*
507 * Not having an accessor means that we have
508 * configured a trap that we don't know how to
509 * handle. This certainly qualifies as a gross bug
510 * that should be fixed right away.
511 */
512 BUG_ON(!r->access);
513
514 if (likely(r->access(vcpu, params, r))) {
515 /* Skip instruction, since it was emulated */
516 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
517 return 1;
518 }
519 /* If access function fails, it should complain. */
520 } else {
521 kvm_err("Unsupported guest sys_reg access at: %lx\n",
522 *vcpu_pc(vcpu));
523 print_sys_reg_instr(params);
524 }
525 kvm_inject_undefined(vcpu);
526 return 1;
527}
528
529static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
530 const struct sys_reg_desc *table, size_t num)
531{
532 unsigned long i;
533
534 for (i = 0; i < num; i++)
535 if (table[i].reset)
536 table[i].reset(vcpu, &table[i]);
537}
538
539/**
540 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
541 * @vcpu: The VCPU pointer
542 * @run: The kvm_run struct
543 */
544int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
545{
546 struct sys_reg_params params;
547 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
548
549 params.Op0 = (esr >> 20) & 3;
550 params.Op1 = (esr >> 14) & 0x7;
551 params.CRn = (esr >> 10) & 0xf;
552 params.CRm = (esr >> 1) & 0xf;
553 params.Op2 = (esr >> 17) & 0x7;
554 params.Rt = (esr >> 5) & 0x1f;
555 params.is_write = !(esr & 1);
556
557 return emulate_sys_reg(vcpu, &params);
558}
559
560/******************************************************************************
561 * Userspace API
562 *****************************************************************************/
563
564static bool index_to_params(u64 id, struct sys_reg_params *params)
565{
566 switch (id & KVM_REG_SIZE_MASK) {
567 case KVM_REG_SIZE_U64:
568 /* Any unused index bits means it's not valid. */
569 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
570 | KVM_REG_ARM_COPROC_MASK
571 | KVM_REG_ARM64_SYSREG_OP0_MASK
572 | KVM_REG_ARM64_SYSREG_OP1_MASK
573 | KVM_REG_ARM64_SYSREG_CRN_MASK
574 | KVM_REG_ARM64_SYSREG_CRM_MASK
575 | KVM_REG_ARM64_SYSREG_OP2_MASK))
576 return false;
577 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
578 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
579 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
580 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
581 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
582 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
583 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
584 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
585 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
586 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
587 return true;
588 default:
589 return false;
590 }
591}
592
593/* Decode an index value, and find the sys_reg_desc entry. */
594static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
595 u64 id)
596{
597 size_t num;
598 const struct sys_reg_desc *table, *r;
599 struct sys_reg_params params;
600
601 /* We only do sys_reg for now. */
602 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
603 return NULL;
604
605 if (!index_to_params(id, &params))
606 return NULL;
607
608 table = get_target_table(vcpu->arch.target, true, &num);
609 r = find_reg(&params, table, num);
610 if (!r)
611 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
612
613 /* Not saved in the sys_reg array? */
614 if (r && !r->reg)
615 r = NULL;
616
617 return r;
618}
619
620/*
621 * These are the invariant sys_reg registers: we let the guest see the
622 * host versions of these, so they're part of the guest state.
623 *
624 * A future CPU may provide a mechanism to present different values to
625 * the guest, or a future kvm may trap them.
626 */
627
628#define FUNCTION_INVARIANT(reg) \
629 static void get_##reg(struct kvm_vcpu *v, \
630 const struct sys_reg_desc *r) \
631 { \
632 u64 val; \
633 \
634 asm volatile("mrs %0, " __stringify(reg) "\n" \
635 : "=r" (val)); \
636 ((struct sys_reg_desc *)r)->val = val; \
637 }
638
639FUNCTION_INVARIANT(midr_el1)
640FUNCTION_INVARIANT(ctr_el0)
641FUNCTION_INVARIANT(revidr_el1)
642FUNCTION_INVARIANT(id_pfr0_el1)
643FUNCTION_INVARIANT(id_pfr1_el1)
644FUNCTION_INVARIANT(id_dfr0_el1)
645FUNCTION_INVARIANT(id_afr0_el1)
646FUNCTION_INVARIANT(id_mmfr0_el1)
647FUNCTION_INVARIANT(id_mmfr1_el1)
648FUNCTION_INVARIANT(id_mmfr2_el1)
649FUNCTION_INVARIANT(id_mmfr3_el1)
650FUNCTION_INVARIANT(id_isar0_el1)
651FUNCTION_INVARIANT(id_isar1_el1)
652FUNCTION_INVARIANT(id_isar2_el1)
653FUNCTION_INVARIANT(id_isar3_el1)
654FUNCTION_INVARIANT(id_isar4_el1)
655FUNCTION_INVARIANT(id_isar5_el1)
656FUNCTION_INVARIANT(clidr_el1)
657FUNCTION_INVARIANT(aidr_el1)
658
659/* ->val is filled in by kvm_sys_reg_table_init() */
660static struct sys_reg_desc invariant_sys_regs[] = {
661 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
662 NULL, get_midr_el1 },
663 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
664 NULL, get_revidr_el1 },
665 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
666 NULL, get_id_pfr0_el1 },
667 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
668 NULL, get_id_pfr1_el1 },
669 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
670 NULL, get_id_dfr0_el1 },
671 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
672 NULL, get_id_afr0_el1 },
673 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
674 NULL, get_id_mmfr0_el1 },
675 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
676 NULL, get_id_mmfr1_el1 },
677 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
678 NULL, get_id_mmfr2_el1 },
679 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
680 NULL, get_id_mmfr3_el1 },
681 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
682 NULL, get_id_isar0_el1 },
683 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
684 NULL, get_id_isar1_el1 },
685 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
686 NULL, get_id_isar2_el1 },
687 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
688 NULL, get_id_isar3_el1 },
689 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
690 NULL, get_id_isar4_el1 },
691 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
692 NULL, get_id_isar5_el1 },
693 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
694 NULL, get_clidr_el1 },
695 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
696 NULL, get_aidr_el1 },
697 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
698 NULL, get_ctr_el0 },
699};
700
701static int reg_from_user(void *val, const void __user *uaddr, u64 id)
702{
703 /* This Just Works because we are little endian. */
704 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
705 return -EFAULT;
706 return 0;
707}
708
709static int reg_to_user(void __user *uaddr, const void *val, u64 id)
710{
711 /* This Just Works because we are little endian. */
712 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
713 return -EFAULT;
714 return 0;
715}
716
717static int get_invariant_sys_reg(u64 id, void __user *uaddr)
718{
719 struct sys_reg_params params;
720 const struct sys_reg_desc *r;
721
722 if (!index_to_params(id, &params))
723 return -ENOENT;
724
725 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
726 if (!r)
727 return -ENOENT;
728
729 return reg_to_user(uaddr, &r->val, id);
730}
731
732static int set_invariant_sys_reg(u64 id, void __user *uaddr)
733{
734 struct sys_reg_params params;
735 const struct sys_reg_desc *r;
736 int err;
737 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
738
739 if (!index_to_params(id, &params))
740 return -ENOENT;
741 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
742 if (!r)
743 return -ENOENT;
744
745 err = reg_from_user(&val, uaddr, id);
746 if (err)
747 return err;
748
749 /* This is what we mean by invariant: you can't change it. */
750 if (r->val != val)
751 return -EINVAL;
752
753 return 0;
754}
755
756static bool is_valid_cache(u32 val)
757{
758 u32 level, ctype;
759
760 if (val >= CSSELR_MAX)
761 return -ENOENT;
762
763 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
764 level = (val >> 1);
765 ctype = (cache_levels >> (level * 3)) & 7;
766
767 switch (ctype) {
768 case 0: /* No cache */
769 return false;
770 case 1: /* Instruction cache only */
771 return (val & 1);
772 case 2: /* Data cache only */
773 case 4: /* Unified cache */
774 return !(val & 1);
775 case 3: /* Separate instruction and data caches */
776 return true;
777 default: /* Reserved: we can't know instruction or data. */
778 return false;
779 }
780}
781
782static int demux_c15_get(u64 id, void __user *uaddr)
783{
784 u32 val;
785 u32 __user *uval = uaddr;
786
787 /* Fail if we have unknown bits set. */
788 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
789 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
790 return -ENOENT;
791
792 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
793 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
794 if (KVM_REG_SIZE(id) != 4)
795 return -ENOENT;
796 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
797 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
798 if (!is_valid_cache(val))
799 return -ENOENT;
800
801 return put_user(get_ccsidr(val), uval);
802 default:
803 return -ENOENT;
804 }
805}
806
807static int demux_c15_set(u64 id, void __user *uaddr)
808{
809 u32 val, newval;
810 u32 __user *uval = uaddr;
811
812 /* Fail if we have unknown bits set. */
813 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
814 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
815 return -ENOENT;
816
817 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
818 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
819 if (KVM_REG_SIZE(id) != 4)
820 return -ENOENT;
821 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
822 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
823 if (!is_valid_cache(val))
824 return -ENOENT;
825
826 if (get_user(newval, uval))
827 return -EFAULT;
828
829 /* This is also invariant: you can't change it. */
830 if (newval != get_ccsidr(val))
831 return -EINVAL;
832 return 0;
833 default:
834 return -ENOENT;
835 }
836}
837
838int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
839{
840 const struct sys_reg_desc *r;
841 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
842
843 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
844 return demux_c15_get(reg->id, uaddr);
845
846 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
847 return -ENOENT;
848
849 r = index_to_sys_reg_desc(vcpu, reg->id);
850 if (!r)
851 return get_invariant_sys_reg(reg->id, uaddr);
852
853 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
854}
855
856int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
857{
858 const struct sys_reg_desc *r;
859 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
860
861 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
862 return demux_c15_set(reg->id, uaddr);
863
864 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
865 return -ENOENT;
866
867 r = index_to_sys_reg_desc(vcpu, reg->id);
868 if (!r)
869 return set_invariant_sys_reg(reg->id, uaddr);
870
871 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
872}
873
874static unsigned int num_demux_regs(void)
875{
876 unsigned int i, count = 0;
877
878 for (i = 0; i < CSSELR_MAX; i++)
879 if (is_valid_cache(i))
880 count++;
881
882 return count;
883}
884
885static int write_demux_regids(u64 __user *uindices)
886{
887 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
888 unsigned int i;
889
890 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
891 for (i = 0; i < CSSELR_MAX; i++) {
892 if (!is_valid_cache(i))
893 continue;
894 if (put_user(val | i, uindices))
895 return -EFAULT;
896 uindices++;
897 }
898 return 0;
899}
900
901static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
902{
903 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
904 KVM_REG_ARM64_SYSREG |
905 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
906 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
907 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
908 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
909 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
910}
911
912static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
913{
914 if (!*uind)
915 return true;
916
917 if (put_user(sys_reg_to_index(reg), *uind))
918 return false;
919
920 (*uind)++;
921 return true;
922}
923
924/* Assumed ordered tables, see kvm_sys_reg_table_init. */
925static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
926{
927 const struct sys_reg_desc *i1, *i2, *end1, *end2;
928 unsigned int total = 0;
929 size_t num;
930
931 /* We check for duplicates here, to allow arch-specific overrides. */
932 i1 = get_target_table(vcpu->arch.target, true, &num);
933 end1 = i1 + num;
934 i2 = sys_reg_descs;
935 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
936
937 BUG_ON(i1 == end1 || i2 == end2);
938
939 /* Walk carefully, as both tables may refer to the same register. */
940 while (i1 || i2) {
941 int cmp = cmp_sys_reg(i1, i2);
942 /* target-specific overrides generic entry. */
943 if (cmp <= 0) {
944 /* Ignore registers we trap but don't save. */
945 if (i1->reg) {
946 if (!copy_reg_to_user(i1, &uind))
947 return -EFAULT;
948 total++;
949 }
950 } else {
951 /* Ignore registers we trap but don't save. */
952 if (i2->reg) {
953 if (!copy_reg_to_user(i2, &uind))
954 return -EFAULT;
955 total++;
956 }
957 }
958
959 if (cmp <= 0 && ++i1 == end1)
960 i1 = NULL;
961 if (cmp >= 0 && ++i2 == end2)
962 i2 = NULL;
963 }
964 return total;
965}
966
967unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
968{
969 return ARRAY_SIZE(invariant_sys_regs)
970 + num_demux_regs()
971 + walk_sys_regs(vcpu, (u64 __user *)NULL);
972}
973
974int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
975{
976 unsigned int i;
977 int err;
978
979 /* Then give them all the invariant registers' indices. */
980 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
981 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
982 return -EFAULT;
983 uindices++;
984 }
985
986 err = walk_sys_regs(vcpu, uindices);
987 if (err < 0)
988 return err;
989 uindices += err;
990
991 return write_demux_regids(uindices);
992}
993
994void kvm_sys_reg_table_init(void)
995{
996 unsigned int i;
997 struct sys_reg_desc clidr;
998
999 /* Make sure tables are unique and in order. */
1000 for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
1001 BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
1002
1003 /* We abuse the reset function to overwrite the table itself. */
1004 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
1005 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
1006
1007 /*
1008 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1009 *
1010 * If software reads the Cache Type fields from Ctype1
1011 * upwards, once it has seen a value of 0b000, no caches
1012 * exist at further-out levels of the hierarchy. So, for
1013 * example, if Ctype3 is the first Cache Type field with a
1014 * value of 0b000, the values of Ctype4 to Ctype7 must be
1015 * ignored.
1016 */
1017 get_clidr_el1(NULL, &clidr); /* Ugly... */
1018 cache_levels = clidr.val;
1019 for (i = 0; i < 7; i++)
1020 if (((cache_levels >> (i*3)) & 7) == 0)
1021 break;
1022 /* Clear all higher bits. */
1023 cache_levels &= (1 << (i*3))-1;
1024}
1025
1026/**
1027 * kvm_reset_sys_regs - sets system registers to reset value
1028 * @vcpu: The VCPU pointer
1029 *
1030 * This function finds the right table above and sets the registers on the
1031 * virtual CPU struct to their architecturally defined reset values.
1032 */
1033void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1034{
1035 size_t num;
1036 const struct sys_reg_desc *table;
1037
1038 /* Catch someone adding a register without putting in reset entry. */
1039 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
1040
1041 /* Generic chip reset first (so target could override). */
1042 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1043
1044 table = get_target_table(vcpu->arch.target, true, &num);
1045 reset_sys_reg_descs(vcpu, table, num);
1046
1047 for (num = 1; num < NR_SYS_REGS; num++)
1048 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1049 panic("Didn't reset vcpu_sys_reg(%zi)", num);
1050}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
new file mode 100644
index 000000000000..d50d3722998e
--- /dev/null
+++ b/arch/arm64/kvm/sys_regs.h
@@ -0,0 +1,138 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/coproc.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
23#define __ARM64_KVM_SYS_REGS_LOCAL_H__
24
25struct sys_reg_params {
26 u8 Op0;
27 u8 Op1;
28 u8 CRn;
29 u8 CRm;
30 u8 Op2;
31 u8 Rt;
32 bool is_write;
33};
34
35struct sys_reg_desc {
36 /* MRS/MSR instruction which accesses it. */
37 u8 Op0;
38 u8 Op1;
39 u8 CRn;
40 u8 CRm;
41 u8 Op2;
42
43 /* Trapped access from guest, if non-NULL. */
44 bool (*access)(struct kvm_vcpu *,
45 const struct sys_reg_params *,
46 const struct sys_reg_desc *);
47
48 /* Initialization for vcpu. */
49 void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
50
51 /* Index into sys_reg[], or 0 if we don't need to save it. */
52 int reg;
53
54 /* Value (usually reset value) */
55 u64 val;
56};
57
58static inline void print_sys_reg_instr(const struct sys_reg_params *p)
59{
60 /* Look, we even formatted it for you to paste into the table! */
61 kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
62 p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
63}
64
65static inline bool ignore_write(struct kvm_vcpu *vcpu,
66 const struct sys_reg_params *p)
67{
68 return true;
69}
70
71static inline bool read_zero(struct kvm_vcpu *vcpu,
72 const struct sys_reg_params *p)
73{
74 *vcpu_reg(vcpu, p->Rt) = 0;
75 return true;
76}
77
78static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
79 const struct sys_reg_params *params)
80{
81 kvm_debug("sys_reg write to read-only register at: %lx\n",
82 *vcpu_pc(vcpu));
83 print_sys_reg_instr(params);
84 return false;
85}
86
87static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
88 const struct sys_reg_params *params)
89{
90 kvm_debug("sys_reg read to write-only register at: %lx\n",
91 *vcpu_pc(vcpu));
92 print_sys_reg_instr(params);
93 return false;
94}
95
96/* Reset functions */
97static inline void reset_unknown(struct kvm_vcpu *vcpu,
98 const struct sys_reg_desc *r)
99{
100 BUG_ON(!r->reg);
101 BUG_ON(r->reg >= NR_SYS_REGS);
102 vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
103}
104
105static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
106{
107 BUG_ON(!r->reg);
108 BUG_ON(r->reg >= NR_SYS_REGS);
109 vcpu_sys_reg(vcpu, r->reg) = r->val;
110}
111
112static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
113 const struct sys_reg_desc *i2)
114{
115 BUG_ON(i1 == i2);
116 if (!i1)
117 return 1;
118 else if (!i2)
119 return -1;
120 if (i1->Op0 != i2->Op0)
121 return i1->Op0 - i2->Op0;
122 if (i1->Op1 != i2->Op1)
123 return i1->Op1 - i2->Op1;
124 if (i1->CRn != i2->CRn)
125 return i1->CRn - i2->CRn;
126 if (i1->CRm != i2->CRm)
127 return i1->CRm - i2->CRm;
128 return i1->Op2 - i2->Op2;
129}
130
131
132#define Op0(_x) .Op0 = _x
133#define Op1(_x) .Op1 = _x
134#define CRn(_x) .CRn = _x
135#define CRm(_x) .CRm = _x
136#define Op2(_x) .Op2 = _x
137
138#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
new file mode 100644
index 000000000000..4268ab9356b1
--- /dev/null
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Based on arch/arm/kvm/coproc_a15.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22#include <linux/kvm_host.h>
23#include <asm/cputype.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_host.h>
27#include <asm/kvm_emulate.h>
28#include <asm/kvm_coproc.h>
29#include <linux/init.h>
30
31#include "sys_regs.h"
32
33static bool access_actlr(struct kvm_vcpu *vcpu,
34 const struct sys_reg_params *p,
35 const struct sys_reg_desc *r)
36{
37 if (p->is_write)
38 return ignore_write(vcpu, p);
39
40 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
41 return true;
42}
43
44static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
45{
46 u64 actlr;
47
48 asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr));
49 vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr;
50}
51
52/*
53 * Implementation specific sys-reg registers.
54 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
55 */
56static const struct sys_reg_desc genericv8_sys_regs[] = {
57 /* ACTLR_EL1 */
58 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
59 access_actlr, reset_actlr, ACTLR_EL1 },
60};
61
62static const struct sys_reg_desc genericv8_cp15_regs[] = {
63 /* ACTLR */
64 { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
65 access_actlr },
66};
67
68static struct kvm_sys_reg_target_table genericv8_target_table = {
69 .table64 = {
70 .table = genericv8_sys_regs,
71 .num = ARRAY_SIZE(genericv8_sys_regs),
72 },
73 .table32 = {
74 .table = genericv8_cp15_regs,
75 .num = ARRAY_SIZE(genericv8_cp15_regs),
76 },
77};
78
79static int __init sys_reg_genericv8_init(void)
80{
81 unsigned int i;
82
83 for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++)
84 BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1],
85 &genericv8_sys_regs[i]) >= 0);
86
87 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8,
88 &genericv8_target_table);
89 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
90 &genericv8_target_table);
91 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
92 &genericv8_target_table);
93 return 0;
94}
95late_initcall(sys_reg_genericv8_init);
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a5c86fc34a37..aac27640bec2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -666,6 +666,7 @@ struct kvm_ppc_smmu_info {
666#define KVM_CAP_IRQ_MPIC 90 666#define KVM_CAP_IRQ_MPIC 90
667#define KVM_CAP_PPC_RTAS 91 667#define KVM_CAP_PPC_RTAS 91
668#define KVM_CAP_IRQ_XICS 92 668#define KVM_CAP_IRQ_XICS 92
669#define KVM_CAP_ARM_EL1_32BIT 93
669 670
670#ifdef KVM_CAP_IRQ_ROUTING 671#ifdef KVM_CAP_IRQ_ROUTING
671 672
@@ -783,6 +784,7 @@ struct kvm_dirty_tlb {
783#define KVM_REG_IA64 0x3000000000000000ULL 784#define KVM_REG_IA64 0x3000000000000000ULL
784#define KVM_REG_ARM 0x4000000000000000ULL 785#define KVM_REG_ARM 0x4000000000000000ULL
785#define KVM_REG_S390 0x5000000000000000ULL 786#define KVM_REG_S390 0x5000000000000000ULL
787#define KVM_REG_ARM64 0x6000000000000000ULL
786 788
787#define KVM_REG_SIZE_SHIFT 52 789#define KVM_REG_SIZE_SHIFT 52
788#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL 790#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL