aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2013-06-12 11:48:38 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2013-06-12 11:48:38 -0400
commit63917f0b5ba2a932d4fca7f67d1a1eae9034269e (patch)
tree2e5b219ca98b56868136e227601a91143d289daf /arch/arm64/include
parentd822d2a1e33144967b01f9535ce217639aa75279 (diff)
parentaa4a73a0a23a65a2f531d01f1865d1e61c6acb55 (diff)
Merge branch 'kvm-arm64/kvm-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into upstream
* 'kvm-arm64/kvm-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms: (33 commits) arm64: KVM: document kernel object mappings in HYP arm64: KVM: MAINTAINERS update arm64: KVM: userspace API documentation arm64: KVM: enable initialization of a 32bit vcpu arm64: KVM: 32bit guest fault injection arm64: KVM: 32bit specific register world switch arm64: KVM: CPU specific 32bit coprocessor access arm64: KVM: 32bit handling of coprocessor traps arm64: KVM: 32bit conditional execution emulation arm64: KVM: 32bit GP register access arm64: KVM: define 32bit specific registers arm64: KVM: Build system integration arm64: KVM: PSCI implementation arm64: KVM: Plug the arch timer ARM: KVM: timer: allow DT matching for ARMv8 cores arm64: KVM: Plug the VGIC arm64: KVM: Exit handling arm64: KVM: HYP mode world switch implementation arm64: KVM: hypervisor initialization code arm64: KVM: guest one-reg interface ... Conflicts: arch/arm64/Makefile
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/kvm_arm.h245
-rw-r--r--arch/arm64/include/asm/kvm_asm.h104
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h56
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h180
-rw-r--r--arch/arm64/include/asm/kvm_host.h202
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h59
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h135
-rw-r--r--arch/arm64/include/asm/kvm_psci.h23
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h19
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h168
12 files changed, 1209 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..a5f28e2720c7
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_ARM_H__
19#define __ARM64_KVM_ARM_H__
20
21#include <asm/types.h>
22
23/* Hyp Configuration Register (HCR) bits */
24#define HCR_ID (UL(1) << 33)
25#define HCR_CD (UL(1) << 32)
26#define HCR_RW_SHIFT 31
27#define HCR_RW (UL(1) << HCR_RW_SHIFT)
28#define HCR_TRVM (UL(1) << 30)
29#define HCR_HCD (UL(1) << 29)
30#define HCR_TDZ (UL(1) << 28)
31#define HCR_TGE (UL(1) << 27)
32#define HCR_TVM (UL(1) << 26)
33#define HCR_TTLB (UL(1) << 25)
34#define HCR_TPU (UL(1) << 24)
35#define HCR_TPC (UL(1) << 23)
36#define HCR_TSW (UL(1) << 22)
37#define HCR_TAC (UL(1) << 21)
38#define HCR_TIDCP (UL(1) << 20)
39#define HCR_TSC (UL(1) << 19)
40#define HCR_TID3 (UL(1) << 18)
41#define HCR_TID2 (UL(1) << 17)
42#define HCR_TID1 (UL(1) << 16)
43#define HCR_TID0 (UL(1) << 15)
44#define HCR_TWE (UL(1) << 14)
45#define HCR_TWI (UL(1) << 13)
46#define HCR_DC (UL(1) << 12)
47#define HCR_BSU (3 << 10)
48#define HCR_BSU_IS (UL(1) << 10)
49#define HCR_FB (UL(1) << 9)
50#define HCR_VA (UL(1) << 8)
51#define HCR_VI (UL(1) << 7)
52#define HCR_VF (UL(1) << 6)
53#define HCR_AMO (UL(1) << 5)
54#define HCR_IMO (UL(1) << 4)
55#define HCR_FMO (UL(1) << 3)
56#define HCR_PTW (UL(1) << 2)
57#define HCR_SWIO (UL(1) << 1)
58#define HCR_VM (UL(1) << 0)
59
60/*
61 * The bits we set in HCR:
62 * RW: 64bit by default, can be overriden for 32bit VMs
63 * TAC: Trap ACTLR
64 * TSC: Trap SMC
65 * TSW: Trap cache operations by set/way
66 * TWI: Trap WFI
67 * TIDCP: Trap L2CTLR/L2ECTLR
68 * BSU_IS: Upgrade barriers to the inner shareable domain
69 * FB: Force broadcast of all maintainance operations
70 * AMO: Override CPSR.A and enable signaling with VA
71 * IMO: Override CPSR.I and enable signaling with VI
72 * FMO: Override CPSR.F and enable signaling with VF
73 * SWIO: Turn set/way invalidates into set/way clean+invalidate
74 */
75#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
76 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
77 HCR_SWIO | HCR_TIDCP | HCR_RW)
78#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
79
80/* Hyp System Control Register (SCTLR_EL2) bits */
81#define SCTLR_EL2_EE (1 << 25)
82#define SCTLR_EL2_WXN (1 << 19)
83#define SCTLR_EL2_I (1 << 12)
84#define SCTLR_EL2_SA (1 << 3)
85#define SCTLR_EL2_C (1 << 2)
86#define SCTLR_EL2_A (1 << 1)
87#define SCTLR_EL2_M 1
88#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
89 SCTLR_EL2_SA | SCTLR_EL2_I)
90
91/* TCR_EL2 Registers bits */
92#define TCR_EL2_TBI (1 << 20)
93#define TCR_EL2_PS (7 << 16)
94#define TCR_EL2_PS_40B (2 << 16)
95#define TCR_EL2_TG0 (1 << 14)
96#define TCR_EL2_SH0 (3 << 12)
97#define TCR_EL2_ORGN0 (3 << 10)
98#define TCR_EL2_IRGN0 (3 << 8)
99#define TCR_EL2_T0SZ 0x3f
100#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
101 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
102
103#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
104
105/* VTCR_EL2 Registers bits */
106#define VTCR_EL2_PS_MASK (7 << 16)
107#define VTCR_EL2_PS_40B (2 << 16)
108#define VTCR_EL2_TG0_MASK (1 << 14)
109#define VTCR_EL2_TG0_4K (0 << 14)
110#define VTCR_EL2_TG0_64K (1 << 14)
111#define VTCR_EL2_SH0_MASK (3 << 12)
112#define VTCR_EL2_SH0_INNER (3 << 12)
113#define VTCR_EL2_ORGN0_MASK (3 << 10)
114#define VTCR_EL2_ORGN0_WBWA (1 << 10)
115#define VTCR_EL2_IRGN0_MASK (3 << 8)
116#define VTCR_EL2_IRGN0_WBWA (1 << 8)
117#define VTCR_EL2_SL0_MASK (3 << 6)
118#define VTCR_EL2_SL0_LVL1 (1 << 6)
119#define VTCR_EL2_T0SZ_MASK 0x3f
120#define VTCR_EL2_T0SZ_40B 24
121
122#ifdef CONFIG_ARM64_64K_PAGES
123/*
124 * Stage2 translation configuration:
125 * 40bits output (PS = 2)
126 * 40bits input (T0SZ = 24)
127 * 64kB pages (TG0 = 1)
128 * 2 level page tables (SL = 1)
129 */
130#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
131 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
132 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
133 VTCR_EL2_T0SZ_40B)
134#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
135#else
136/*
137 * Stage2 translation configuration:
138 * 40bits output (PS = 2)
139 * 40bits input (T0SZ = 24)
140 * 4kB pages (TG0 = 0)
141 * 3 level page tables (SL = 1)
142 */
143#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
144 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
145 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
146 VTCR_EL2_T0SZ_40B)
147#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
148#endif
149
150#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
151#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
152#define VTTBR_VMID_SHIFT (48LLU)
153#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
154
155/* Hyp System Trap Register */
156#define HSTR_EL2_TTEE (1 << 16)
157#define HSTR_EL2_T(x) (1 << x)
158
159/* Hyp Coprocessor Trap Register */
160#define CPTR_EL2_TCPAC (1 << 31)
161#define CPTR_EL2_TTA (1 << 20)
162#define CPTR_EL2_TFP (1 << 10)
163
164/* Hyp Debug Configuration Register bits */
165#define MDCR_EL2_TDRA (1 << 11)
166#define MDCR_EL2_TDOSA (1 << 10)
167#define MDCR_EL2_TDA (1 << 9)
168#define MDCR_EL2_TDE (1 << 8)
169#define MDCR_EL2_HPME (1 << 7)
170#define MDCR_EL2_TPM (1 << 6)
171#define MDCR_EL2_TPMCR (1 << 5)
172#define MDCR_EL2_HPMN_MASK (0x1F)
173
174/* Exception Syndrome Register (ESR) bits */
175#define ESR_EL2_EC_SHIFT (26)
176#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
177#define ESR_EL2_IL (1U << 25)
178#define ESR_EL2_ISS (ESR_EL2_IL - 1)
179#define ESR_EL2_ISV_SHIFT (24)
180#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
181#define ESR_EL2_SAS_SHIFT (22)
182#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
183#define ESR_EL2_SSE (1 << 21)
184#define ESR_EL2_SRT_SHIFT (16)
185#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
186#define ESR_EL2_SF (1 << 15)
187#define ESR_EL2_AR (1 << 14)
188#define ESR_EL2_EA (1 << 9)
189#define ESR_EL2_CM (1 << 8)
190#define ESR_EL2_S1PTW (1 << 7)
191#define ESR_EL2_WNR (1 << 6)
192#define ESR_EL2_FSC (0x3f)
193#define ESR_EL2_FSC_TYPE (0x3c)
194
195#define ESR_EL2_CV_SHIFT (24)
196#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
197#define ESR_EL2_COND_SHIFT (20)
198#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
199
200
201#define FSC_FAULT (0x04)
202#define FSC_PERM (0x0c)
203
204/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
205#define HPFAR_MASK (~0xFUL)
206
207#define ESR_EL2_EC_UNKNOWN (0x00)
208#define ESR_EL2_EC_WFI (0x01)
209#define ESR_EL2_EC_CP15_32 (0x03)
210#define ESR_EL2_EC_CP15_64 (0x04)
211#define ESR_EL2_EC_CP14_MR (0x05)
212#define ESR_EL2_EC_CP14_LS (0x06)
213#define ESR_EL2_EC_FP_ASIMD (0x07)
214#define ESR_EL2_EC_CP10_ID (0x08)
215#define ESR_EL2_EC_CP14_64 (0x0C)
216#define ESR_EL2_EC_ILL_ISS (0x0E)
217#define ESR_EL2_EC_SVC32 (0x11)
218#define ESR_EL2_EC_HVC32 (0x12)
219#define ESR_EL2_EC_SMC32 (0x13)
220#define ESR_EL2_EC_SVC64 (0x15)
221#define ESR_EL2_EC_HVC64 (0x16)
222#define ESR_EL2_EC_SMC64 (0x17)
223#define ESR_EL2_EC_SYS64 (0x18)
224#define ESR_EL2_EC_IABT (0x20)
225#define ESR_EL2_EC_IABT_HYP (0x21)
226#define ESR_EL2_EC_PC_ALIGN (0x22)
227#define ESR_EL2_EC_DABT (0x24)
228#define ESR_EL2_EC_DABT_HYP (0x25)
229#define ESR_EL2_EC_SP_ALIGN (0x26)
230#define ESR_EL2_EC_FP_EXC32 (0x28)
231#define ESR_EL2_EC_FP_EXC64 (0x2C)
232#define ESR_EL2_EC_SERRROR (0x2F)
233#define ESR_EL2_EC_BREAKPT (0x30)
234#define ESR_EL2_EC_BREAKPT_HYP (0x31)
235#define ESR_EL2_EC_SOFTSTP (0x32)
236#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
237#define ESR_EL2_EC_WATCHPT (0x34)
238#define ESR_EL2_EC_WATCHPT_HYP (0x35)
239#define ESR_EL2_EC_BKPT32 (0x38)
240#define ESR_EL2_EC_VECTOR32 (0x3A)
241#define ESR_EL2_EC_BRK64 (0x3C)
242
243#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
244
245#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..c92de4163eba
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM_KVM_ASM_H__
19#define __ARM_KVM_ASM_H__
20
21/*
22 * 0 is reserved as an invalid value.
23 * Order *must* be kept in sync with the hyp switch code.
24 */
25#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
26#define CSSELR_EL1 2 /* Cache Size Selection Register */
27#define SCTLR_EL1 3 /* System Control Register */
28#define ACTLR_EL1 4 /* Auxilliary Control Register */
29#define CPACR_EL1 5 /* Coprocessor Access Control */
30#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
31#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
32#define TCR_EL1 8 /* Translation Control Register */
33#define ESR_EL1 9 /* Exception Syndrome Register */
34#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
35#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
36#define FAR_EL1 12 /* Fault Address Register */
37#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
38#define VBAR_EL1 14 /* Vector Base Address Register */
39#define CONTEXTIDR_EL1 15 /* Context ID Register */
40#define TPIDR_EL0 16 /* Thread ID, User R/W */
41#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27
53
54/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
56#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
57#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
58#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
59#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
60#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
61#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
62#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
63#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
64#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
65#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
66#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
67#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
68#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
75#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
76#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
77#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
78#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
79#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
80#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
81#define NR_CP15_REGS (NR_SYS_REGS * 2)
82
83#define ARM_EXCEPTION_IRQ 0
84#define ARM_EXCEPTION_TRAP 1
85
86#ifndef __ASSEMBLY__
87struct kvm;
88struct kvm_vcpu;
89
90extern char __kvm_hyp_init[];
91extern char __kvm_hyp_init_end[];
92
93extern char __kvm_hyp_vector[];
94
95extern char __kvm_hyp_code_start[];
96extern char __kvm_hyp_code_end[];
97
98extern void __kvm_flush_vm_context(void);
99extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
100
101extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
102#endif
103
104#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..9a59301cd014
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_coproc.h
6 * Copyright (C) 2012 Rusty Russell IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ARM64_KVM_COPROC_H__
22#define __ARM64_KVM_COPROC_H__
23
24#include <linux/kvm_host.h>
25
26void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
27
28struct kvm_sys_reg_table {
29 const struct sys_reg_desc *table;
30 size_t num;
31};
32
33struct kvm_sys_reg_target_table {
34 struct kvm_sys_reg_table table64;
35 struct kvm_sys_reg_table table32;
36};
37
38void kvm_register_target_sys_reg_table(unsigned int target,
39 struct kvm_sys_reg_target_table *table);
40
41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
45int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
46
47#define kvm_coproc_table_init kvm_sys_reg_table_init
48void kvm_sys_reg_table_init(void);
49
50struct kvm_one_reg;
51int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
52int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
53int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
54unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
55
56#endif /* __ARM64_KVM_COPROC_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..eec073875218
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmio.h>
29#include <asm/ptrace.h>
30
31unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
32unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
33
34bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
35void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
36
37void kvm_inject_undefined(struct kvm_vcpu *vcpu);
38void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
39void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
40
41static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
42{
43 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
44}
45
46static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
47{
48 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
49}
50
51static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
52{
53 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
54}
55
56static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
57{
58 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
59}
60
61static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
62{
63 if (vcpu_mode_is_32bit(vcpu))
64 return kvm_condition_valid32(vcpu);
65
66 return true;
67}
68
69static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
70{
71 if (vcpu_mode_is_32bit(vcpu))
72 kvm_skip_instr32(vcpu, is_wide_instr);
73 else
74 *vcpu_pc(vcpu) += 4;
75}
76
77static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
78{
79 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
80}
81
82static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
83{
84 if (vcpu_mode_is_32bit(vcpu))
85 return vcpu_reg32(vcpu, reg_num);
86
87 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
88}
89
90/* Get vcpu SPSR for current mode */
91static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
92{
93 if (vcpu_mode_is_32bit(vcpu))
94 return vcpu_spsr32(vcpu);
95
96 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
97}
98
99static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
100{
101 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
102
103 if (vcpu_mode_is_32bit(vcpu))
104 return mode > COMPAT_PSR_MODE_USR;
105
106 return mode != PSR_MODE_EL0t;
107}
108
109static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
110{
111 return vcpu->arch.fault.esr_el2;
112}
113
114static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
115{
116 return vcpu->arch.fault.far_el2;
117}
118
119static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
120{
121 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
122}
123
124static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
125{
126 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
127}
128
129static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
130{
131 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
132}
133
134static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
135{
136 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
137}
138
139static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
140{
141 return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
142}
143
144static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
145{
146 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
147}
148
149static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
150{
151 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
152}
153
154static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
155{
156 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
157}
158
159/* This one is not specific to Data Abort */
160static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
161{
162 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
163}
164
165static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
166{
167 return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
168}
169
170static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
171{
172 return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
173}
174
175static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
176{
177 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
178}
179
180#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
new file mode 100644
index 000000000000..644d73956864
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_HOST_H__
23#define __ARM64_KVM_HOST_H__
24
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_mmio.h>
28
29#define KVM_MAX_VCPUS 4
30#define KVM_USER_MEM_SLOTS 32
31#define KVM_PRIVATE_MEM_SLOTS 4
32#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
33
34#include <kvm/arm_vgic.h>
35#include <kvm/arm_arch_timer.h>
36
37#define KVM_VCPU_MAX_FEATURES 2
38
39/* We don't currently support large pages. */
40#define KVM_HPAGE_GFN_SHIFT(x) 0
41#define KVM_NR_PAGE_SIZES 1
42#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
43
44struct kvm_vcpu;
45int kvm_target_cpu(void);
46int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
47int kvm_arch_dev_ioctl_check_extension(long ext);
48
49struct kvm_arch {
50 /* The VMID generation used for the virt. memory system */
51 u64 vmid_gen;
52 u32 vmid;
53
54 /* 1-level 2nd stage table and lock */
55 spinlock_t pgd_lock;
56 pgd_t *pgd;
57
58 /* VTTBR value associated with above pgd and vmid */
59 u64 vttbr;
60
61 /* Interrupt controller */
62 struct vgic_dist vgic;
63
64 /* Timer */
65 struct arch_timer_kvm timer;
66};
67
68#define KVM_NR_MEM_OBJS 40
69
70/*
71 * We don't want allocation failures within the mmu code, so we preallocate
72 * enough memory for a single page fault in a cache.
73 */
74struct kvm_mmu_memory_cache {
75 int nobjs;
76 void *objects[KVM_NR_MEM_OBJS];
77};
78
79struct kvm_vcpu_fault_info {
80 u32 esr_el2; /* Hyp Syndrom Register */
81 u64 far_el2; /* Hyp Fault Address Register */
82 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
83};
84
85struct kvm_cpu_context {
86 struct kvm_regs gp_regs;
87 union {
88 u64 sys_regs[NR_SYS_REGS];
89 u32 cp15[NR_CP15_REGS];
90 };
91};
92
93typedef struct kvm_cpu_context kvm_cpu_context_t;
94
95struct kvm_vcpu_arch {
96 struct kvm_cpu_context ctxt;
97
98 /* HYP configuration */
99 u64 hcr_el2;
100
101 /* Exception Information */
102 struct kvm_vcpu_fault_info fault;
103
104 /* Pointer to host CPU context */
105 kvm_cpu_context_t *host_cpu_context;
106
107 /* VGIC state */
108 struct vgic_cpu vgic_cpu;
109 struct arch_timer_cpu timer_cpu;
110
111 /*
112 * Anything that is not used directly from assembly code goes
113 * here.
114 */
115 /* dcache set/way operation pending */
116 int last_pcpu;
117 cpumask_t require_dcache_flush;
118
119 /* Don't run the guest */
120 bool pause;
121
122 /* IO related fields */
123 struct kvm_decode mmio_decode;
124
125 /* Interrupt related fields */
126 u64 irq_lines; /* IRQ and FIQ levels */
127
128 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache;
130
131 /* Target CPU and feature flags */
132 u32 target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134
135 /* Detect first run of a vcpu */
136 bool has_run_once;
137};
138
139#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
140#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
141#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)])
142
143struct kvm_vm_stat {
144 u32 remote_tlb_flush;
145};
146
147struct kvm_vcpu_stat {
148 u32 halt_wakeup;
149};
150
151struct kvm_vcpu_init;
152int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
153 const struct kvm_vcpu_init *init);
154unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
155int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
156struct kvm_one_reg;
157int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
158int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
159
160#define KVM_ARCH_WANT_MMU_NOTIFIER
161struct kvm;
162int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
163int kvm_unmap_hva_range(struct kvm *kvm,
164 unsigned long start, unsigned long end);
165void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
166
167/* We do not have shadow page tables, hence the empty hooks */
168static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
169{
170 return 0;
171}
172
173static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
174{
175 return 0;
176}
177
178struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
179struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
180
181u64 kvm_call_hyp(void *hypfn, ...);
182
183int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
184 int exception_index);
185
186int kvm_perf_init(void);
187int kvm_perf_teardown(void);
188
189static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
190 phys_addr_t pgd_ptr,
191 unsigned long hyp_stack_ptr,
192 unsigned long vector_ptr)
193{
194 /*
195 * Call initialization code, and switch to the full blown
196 * HYP code.
197 */
198 kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
199 hyp_stack_ptr, vector_ptr);
200}
201
202#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..fc2f689c0694
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMIO_H__
19#define __ARM64_KVM_MMIO_H__
20
21#include <linux/kvm_host.h>
22#include <asm/kvm_asm.h>
23#include <asm/kvm_arm.h>
24
25/*
26 * This is annoying. The mmio code requires this, even if we don't
27 * need any decoding. To be fixed.
28 */
29struct kvm_decode {
30 unsigned long rt;
31 bool sign_extend;
32};
33
34/*
35 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
36 * which is an anonymous type. Use our own type instead.
37 */
38struct kvm_exit_mmio {
39 phys_addr_t phys_addr;
40 u8 data[8];
41 u32 len;
42 bool is_write;
43};
44
45static inline void kvm_prepare_mmio(struct kvm_run *run,
46 struct kvm_exit_mmio *mmio)
47{
48 run->mmio.phys_addr = mmio->phys_addr;
49 run->mmio.len = mmio->len;
50 run->mmio.is_write = mmio->is_write;
51 memcpy(run->mmio.data, mmio->data, mmio->len);
52 run->exit_reason = KVM_EXIT_MMIO;
53}
54
55int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
56int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
57 phys_addr_t fault_ipa);
58
59#endif /* __ARM64_KVM_MMIO_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..efe609c6a3c9
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
44#ifdef __ASSEMBLY__
45
46/*
47 * Convert a kernel VA into a HYP VA.
48 * reg: VA to be converted.
49 */
50.macro kern_hyp_va reg
51 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
52.endm
53
54#else
55
56#include <asm/cachetype.h>
57#include <asm/cacheflush.h>
58
59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60
61/*
62 * Align KVM with the kernel's view of physical memory. Should be
63 * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
64 */
65#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
66#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
67#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
68
69/* Make sure we get the right size, and thus the right alignment */
70#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
71#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
72
73int create_hyp_mappings(void *from, void *to);
74int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
75void free_boot_hyp_pgd(void);
76void free_hyp_pgds(void);
77
78int kvm_alloc_stage2_pgd(struct kvm *kvm);
79void kvm_free_stage2_pgd(struct kvm *kvm);
80int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
81 phys_addr_t pa, unsigned long size);
82
83int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
84
85void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
86
87phys_addr_t kvm_mmu_get_httbr(void);
88phys_addr_t kvm_mmu_get_boot_httbr(void);
89phys_addr_t kvm_get_idmap_vector(void);
90int kvm_mmu_init(void);
91void kvm_clear_hyp_idmap(void);
92
93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
94
95static inline bool kvm_is_write_fault(unsigned long esr)
96{
97 unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
98
99 if (esr_ec == ESR_EL2_EC_IABT)
100 return false;
101
102 if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
103 return false;
104
105 return true;
106}
107
108static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
109static inline void kvm_clean_pgd(pgd_t *pgd) {}
110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {}
112static inline void kvm_clean_pte_entry(pte_t *pte) {}
113
114static inline void kvm_set_s2pte_writable(pte_t *pte)
115{
116 pte_val(*pte) |= PTE_S2_RDWR;
117}
118
119struct kvm;
120
121static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
122{
123 if (!icache_is_aliasing()) { /* PIPT */
124 unsigned long hva = gfn_to_hva(kvm, gfn);
125 flush_icache_range(hva, hva + PAGE_SIZE);
126 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
127 /* any kind of VIPT cache */
128 __flush_icache_all();
129 }
130}
131
132#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
133
134#endif /* __ASSEMBLY__ */
135#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..e301a4816355
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_PSCI_H__
19#define __ARM64_KVM_PSCI_H__
20
21bool kvm_psci_call(struct kvm_vcpu *vcpu);
22
23#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 381f556b664e..20925bcf4e2a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -90,6 +90,12 @@
90#define MT_NORMAL_NC 3 90#define MT_NORMAL_NC 3
91#define MT_NORMAL 4 91#define MT_NORMAL 4
92 92
93/*
94 * Memory types for Stage-2 translation
95 */
96#define MT_S2_NORMAL 0xf
97#define MT_S2_DEVICE_nGnRE 0x1
98
93#ifndef __ASSEMBLY__ 99#ifndef __ASSEMBLY__
94 100
95extern phys_addr_t memstart_addr; 101extern phys_addr_t memstart_addr;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 75fd13d289b9..66367c6c6527 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -35,6 +35,7 @@
35/* 35/*
36 * Section 36 * Section
37 */ 37 */
38#define PMD_SECT_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
38#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 39#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
39#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 40#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
40#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) 41#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
@@ -68,6 +69,24 @@
68#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) 69#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
69 70
70/* 71/*
72 * 2nd stage PTE definitions
73 */
74#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
75#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
76
77/*
78 * Memory Attribute override for Stage-2 (MemAttr[3:0])
79 */
80#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
81#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
82
83/*
84 * EL2/HYP PTE/PMD definitions
85 */
86#define PMD_HYP PMD_SECT_USER
87#define PTE_HYP PTE_USER
88
89/*
71 * 40-bit physical address supported. 90 * 40-bit physical address supported.
72 */ 91 */
73#define PHYS_MASK_SHIFT (40) 92#define PHYS_MASK_SHIFT (40)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b93bc2326f56..5588e8ad9762 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -76,6 +76,12 @@ extern pgprot_t pgprot_default;
76#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) 76#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
77#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) 77#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
78 78
79#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
80#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
81
82#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
83#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
84
79#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) 85#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
80#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 86#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
81#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 87#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
@@ -197,6 +203,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
197 203
198#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) 204#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
199 205
206#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
207 PMD_TYPE_TABLE)
208#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
209 PMD_TYPE_SECT)
210
211
200static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 212static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
201{ 213{
202 *pmdp = pmd; 214 *pmdp = pmd;
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..5031f4263937
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/uapi/asm/kvm.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM_KVM_H__
23#define __ARM_KVM_H__
24
25#define KVM_SPSR_EL1 0
26#define KVM_SPSR_SVC KVM_SPSR_EL1
27#define KVM_SPSR_ABT 1
28#define KVM_SPSR_UND 2
29#define KVM_SPSR_IRQ 3
30#define KVM_SPSR_FIQ 4
31#define KVM_NR_SPSR 5
32
33#ifndef __ASSEMBLY__
34#include <asm/types.h>
35#include <asm/ptrace.h>
36
37#define __KVM_HAVE_GUEST_DEBUG
38#define __KVM_HAVE_IRQ_LINE
39
40#define KVM_REG_SIZE(id) \
41 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
42
43struct kvm_regs {
44 struct user_pt_regs regs; /* sp = sp_el0 */
45
46 __u64 sp_el1;
47 __u64 elr_el1;
48
49 __u64 spsr[KVM_NR_SPSR];
50
51 struct user_fpsimd_state fp_regs;
52};
53
54/* Supported Processor Types */
55#define KVM_ARM_TARGET_AEM_V8 0
56#define KVM_ARM_TARGET_FOUNDATION_V8 1
57#define KVM_ARM_TARGET_CORTEX_A57 2
58
59#define KVM_ARM_NUM_TARGETS 3
60
61/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
62#define KVM_ARM_DEVICE_TYPE_SHIFT 0
63#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
64#define KVM_ARM_DEVICE_ID_SHIFT 16
65#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
66
67/* Supported device IDs */
68#define KVM_ARM_DEVICE_VGIC_V2 0
69
70/* Supported VGIC address types */
71#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
72#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
73
74#define KVM_VGIC_V2_DIST_SIZE 0x1000
75#define KVM_VGIC_V2_CPU_SIZE 0x2000
76
77#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
78#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
79
80struct kvm_vcpu_init {
81 __u32 target;
82 __u32 features[7];
83};
84
85struct kvm_sregs {
86};
87
88struct kvm_fpu {
89};
90
91struct kvm_guest_debug_arch {
92};
93
94struct kvm_debug_exit_arch {
95};
96
97struct kvm_sync_regs {
98};
99
100struct kvm_arch_memory_slot {
101};
102
103/* If you need to interpret the index values, here is the key: */
104#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
105#define KVM_REG_ARM_COPROC_SHIFT 16
106
107/* Normal registers are mapped as coprocessor 16. */
108#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
109#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32))
110
111/* Some registers need more space to represent values. */
112#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
113#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
114#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
115#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
116#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
117#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
118
119/* AArch64 system registers */
120#define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
121#define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
122#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
123#define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
124#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
125#define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
126#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
127#define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
128#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
129#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
130#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
131
132/* KVM_IRQ_LINE irq field index values */
133#define KVM_ARM_IRQ_TYPE_SHIFT 24
134#define KVM_ARM_IRQ_TYPE_MASK 0xff
135#define KVM_ARM_IRQ_VCPU_SHIFT 16
136#define KVM_ARM_IRQ_VCPU_MASK 0xff
137#define KVM_ARM_IRQ_NUM_SHIFT 0
138#define KVM_ARM_IRQ_NUM_MASK 0xffff
139
140/* irq_type field */
141#define KVM_ARM_IRQ_TYPE_CPU 0
142#define KVM_ARM_IRQ_TYPE_SPI 1
143#define KVM_ARM_IRQ_TYPE_PPI 2
144
145/* out-of-kernel GIC cpu interrupt injection irq_number field */
146#define KVM_ARM_IRQ_CPU_IRQ 0
147#define KVM_ARM_IRQ_CPU_FIQ 1
148
149/* Highest supported SPI, from VGIC_NR_IRQS */
150#define KVM_ARM_IRQ_GIC_MAX 127
151
152/* PSCI interface */
153#define KVM_PSCI_FN_BASE 0x95c1ba5e
154#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
155
156#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
157#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
158#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
159#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
160
161#define KVM_PSCI_RET_SUCCESS 0
162#define KVM_PSCI_RET_NI ((unsigned long)-1)
163#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
164#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
165
166#endif
167
168#endif /* __ARM_KVM_H__ */