aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/cacheflush.h3
-rw-r--r--arch/arm64/include/asm/cputype.h3
-rw-r--r--arch/arm64/include/asm/debug-monitors.h9
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h17
-rw-r--r--arch/arm64/include/asm/hypervisor.h6
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h245
-rw-r--r--arch/arm64/include/asm/kvm_asm.h104
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h56
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h180
-rw-r--r--arch/arm64/include/asm/kvm_host.h202
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h59
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h135
-rw-r--r--arch/arm64/include/asm/kvm_psci.h23
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h16
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h3
-rw-r--r--arch/arm64/include/asm/sync_bitops.h26
-rw-r--r--arch/arm64/include/asm/timex.h6
-rw-r--r--arch/arm64/include/asm/xen/events.h21
-rw-r--r--arch/arm64/include/asm/xen/hypercall.h1
-rw-r--r--arch/arm64/include/asm/xen/hypervisor.h1
-rw-r--r--arch/arm64/include/asm/xen/interface.h1
-rw-r--r--arch/arm64/include/asm/xen/page.h1
28 files changed, 1132 insertions, 23 deletions
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 3300cbd18a89..fea9ee327206 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -123,9 +123,6 @@ static inline void __flush_icache_all(void)
123#define flush_dcache_mmap_unlock(mapping) \ 123#define flush_dcache_mmap_unlock(mapping) \
124 spin_unlock_irq(&(mapping)->tree_lock) 124 spin_unlock_irq(&(mapping)->tree_lock)
125 125
126#define flush_icache_user_range(vma,page,addr,len) \
127 flush_dcache_page(page)
128
129/* 126/*
130 * We don't appear to need to do anything here. In fact, if we did, we'd 127 * We don't appear to need to do anything here. In fact, if we did, we'd
131 * duplicate cache flushing elsewhere performed by flush_dcache_page(). 128 * duplicate cache flushing elsewhere performed by flush_dcache_page().
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index cf2749488cd4..5fe138e0b828 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -37,11 +37,14 @@
37}) 37})
38 38
39#define ARM_CPU_IMP_ARM 0x41 39#define ARM_CPU_IMP_ARM 0x41
40#define ARM_CPU_IMP_APM 0x50
40 41
41#define ARM_CPU_PART_AEM_V8 0xD0F0 42#define ARM_CPU_PART_AEM_V8 0xD0F0
42#define ARM_CPU_PART_FOUNDATION 0xD000 43#define ARM_CPU_PART_FOUNDATION 0xD000
43#define ARM_CPU_PART_CORTEX_A57 0xD070 44#define ARM_CPU_PART_CORTEX_A57 0xD070
44 45
46#define APM_CPU_PART_POTENZA 0x0000
47
45#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
46 49
47/* 50/*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7eaa0b302493..ef8235c68c09 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -83,6 +83,15 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
83} 83}
84#endif 84#endif
85 85
86#ifdef CONFIG_COMPAT
87int aarch32_break_handler(struct pt_regs *regs);
88#else
89static int aarch32_break_handler(struct pt_regs *regs)
90{
91 return -EFAULT;
92}
93#endif
94
86#endif /* __ASSEMBLY */ 95#endif /* __ASSEMBLY */
87#endif /* __KERNEL__ */ 96#endif /* __KERNEL__ */
88#endif /* __ASM_DEBUG_MONITORS_H */ 97#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 0d8453c755a8..cf98b362094b 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -18,6 +18,9 @@
18 18
19struct dev_archdata { 19struct dev_archdata {
20 struct dma_map_ops *dma_ops; 20 struct dma_map_ops *dma_ops;
21#ifdef CONFIG_IOMMU_API
22 void *iommu; /* private IOMMU data */
23#endif
21}; 24};
22 25
23struct pdev_archdata { 26struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 994776894198..8d1810001aef 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -81,8 +81,12 @@ static inline void dma_mark_clean(void *addr, size_t size)
81{ 81{
82} 82}
83 83
84static inline void *dma_alloc_coherent(struct device *dev, size_t size, 84#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
85 dma_addr_t *dma_handle, gfp_t flags) 85#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
86
87static inline void *dma_alloc_attrs(struct device *dev, size_t size,
88 dma_addr_t *dma_handle, gfp_t flags,
89 struct dma_attrs *attrs)
86{ 90{
87 struct dma_map_ops *ops = get_dma_ops(dev); 91 struct dma_map_ops *ops = get_dma_ops(dev);
88 void *vaddr; 92 void *vaddr;
@@ -90,13 +94,14 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
90 if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) 94 if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
91 return vaddr; 95 return vaddr;
92 96
93 vaddr = ops->alloc(dev, size, dma_handle, flags, NULL); 97 vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
94 debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); 98 debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
95 return vaddr; 99 return vaddr;
96} 100}
97 101
98static inline void dma_free_coherent(struct device *dev, size_t size, 102static inline void dma_free_attrs(struct device *dev, size_t size,
99 void *vaddr, dma_addr_t dev_addr) 103 void *vaddr, dma_addr_t dev_addr,
104 struct dma_attrs *attrs)
100{ 105{
101 struct dma_map_ops *ops = get_dma_ops(dev); 106 struct dma_map_ops *ops = get_dma_ops(dev);
102 107
@@ -104,7 +109,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
104 return; 109 return;
105 110
106 debug_dma_free_coherent(dev, size, vaddr, dev_addr); 111 debug_dma_free_coherent(dev, size, vaddr, dev_addr);
107 ops->free(dev, size, vaddr, dev_addr, NULL); 112 ops->free(dev, size, vaddr, dev_addr, attrs);
108} 113}
109 114
110/* 115/*
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
new file mode 100644
index 000000000000..d2c79049ff11
--- /dev/null
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_ARM64_HYPERVISOR_H
2#define _ASM_ARM64_HYPERVISOR_H
3
4#include <asm/xen/hypervisor.h>
5
6#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 2e12258aa7e4..1d12f89140ba 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -228,10 +228,12 @@ extern void __iounmap(volatile void __iomem *addr);
228#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) 228#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
229#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 229#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
230#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 230#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
231#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
231 232
232#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 233#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
233#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 234#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
234#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 235#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
236#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL))
235#define iounmap __iounmap 237#define iounmap __iounmap
236 238
237#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) 239#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..a5f28e2720c7
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_ARM_H__
19#define __ARM64_KVM_ARM_H__
20
21#include <asm/types.h>
22
23/* Hyp Configuration Register (HCR) bits */
24#define HCR_ID (UL(1) << 33)
25#define HCR_CD (UL(1) << 32)
26#define HCR_RW_SHIFT 31
27#define HCR_RW (UL(1) << HCR_RW_SHIFT)
28#define HCR_TRVM (UL(1) << 30)
29#define HCR_HCD (UL(1) << 29)
30#define HCR_TDZ (UL(1) << 28)
31#define HCR_TGE (UL(1) << 27)
32#define HCR_TVM (UL(1) << 26)
33#define HCR_TTLB (UL(1) << 25)
34#define HCR_TPU (UL(1) << 24)
35#define HCR_TPC (UL(1) << 23)
36#define HCR_TSW (UL(1) << 22)
37#define HCR_TAC (UL(1) << 21)
38#define HCR_TIDCP (UL(1) << 20)
39#define HCR_TSC (UL(1) << 19)
40#define HCR_TID3 (UL(1) << 18)
41#define HCR_TID2 (UL(1) << 17)
42#define HCR_TID1 (UL(1) << 16)
43#define HCR_TID0 (UL(1) << 15)
44#define HCR_TWE (UL(1) << 14)
45#define HCR_TWI (UL(1) << 13)
46#define HCR_DC (UL(1) << 12)
47#define HCR_BSU (3 << 10)
48#define HCR_BSU_IS (UL(1) << 10)
49#define HCR_FB (UL(1) << 9)
50#define HCR_VA (UL(1) << 8)
51#define HCR_VI (UL(1) << 7)
52#define HCR_VF (UL(1) << 6)
53#define HCR_AMO (UL(1) << 5)
54#define HCR_IMO (UL(1) << 4)
55#define HCR_FMO (UL(1) << 3)
56#define HCR_PTW (UL(1) << 2)
57#define HCR_SWIO (UL(1) << 1)
58#define HCR_VM (UL(1) << 0)
59
60/*
61 * The bits we set in HCR:
62 * RW: 64bit by default, can be overriden for 32bit VMs
63 * TAC: Trap ACTLR
64 * TSC: Trap SMC
65 * TSW: Trap cache operations by set/way
66 * TWI: Trap WFI
67 * TIDCP: Trap L2CTLR/L2ECTLR
68 * BSU_IS: Upgrade barriers to the inner shareable domain
69 * FB: Force broadcast of all maintainance operations
70 * AMO: Override CPSR.A and enable signaling with VA
71 * IMO: Override CPSR.I and enable signaling with VI
72 * FMO: Override CPSR.F and enable signaling with VF
73 * SWIO: Turn set/way invalidates into set/way clean+invalidate
74 */
75#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
76 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
77 HCR_SWIO | HCR_TIDCP | HCR_RW)
78#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
79
80/* Hyp System Control Register (SCTLR_EL2) bits */
81#define SCTLR_EL2_EE (1 << 25)
82#define SCTLR_EL2_WXN (1 << 19)
83#define SCTLR_EL2_I (1 << 12)
84#define SCTLR_EL2_SA (1 << 3)
85#define SCTLR_EL2_C (1 << 2)
86#define SCTLR_EL2_A (1 << 1)
87#define SCTLR_EL2_M 1
88#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
89 SCTLR_EL2_SA | SCTLR_EL2_I)
90
91/* TCR_EL2 Registers bits */
92#define TCR_EL2_TBI (1 << 20)
93#define TCR_EL2_PS (7 << 16)
94#define TCR_EL2_PS_40B (2 << 16)
95#define TCR_EL2_TG0 (1 << 14)
96#define TCR_EL2_SH0 (3 << 12)
97#define TCR_EL2_ORGN0 (3 << 10)
98#define TCR_EL2_IRGN0 (3 << 8)
99#define TCR_EL2_T0SZ 0x3f
100#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
101 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
102
103#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
104
105/* VTCR_EL2 Registers bits */
106#define VTCR_EL2_PS_MASK (7 << 16)
107#define VTCR_EL2_PS_40B (2 << 16)
108#define VTCR_EL2_TG0_MASK (1 << 14)
109#define VTCR_EL2_TG0_4K (0 << 14)
110#define VTCR_EL2_TG0_64K (1 << 14)
111#define VTCR_EL2_SH0_MASK (3 << 12)
112#define VTCR_EL2_SH0_INNER (3 << 12)
113#define VTCR_EL2_ORGN0_MASK (3 << 10)
114#define VTCR_EL2_ORGN0_WBWA (1 << 10)
115#define VTCR_EL2_IRGN0_MASK (3 << 8)
116#define VTCR_EL2_IRGN0_WBWA (1 << 8)
117#define VTCR_EL2_SL0_MASK (3 << 6)
118#define VTCR_EL2_SL0_LVL1 (1 << 6)
119#define VTCR_EL2_T0SZ_MASK 0x3f
120#define VTCR_EL2_T0SZ_40B 24
121
122#ifdef CONFIG_ARM64_64K_PAGES
123/*
124 * Stage2 translation configuration:
125 * 40bits output (PS = 2)
126 * 40bits input (T0SZ = 24)
127 * 64kB pages (TG0 = 1)
128 * 2 level page tables (SL = 1)
129 */
130#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
131 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
132 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
133 VTCR_EL2_T0SZ_40B)
134#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
135#else
136/*
137 * Stage2 translation configuration:
138 * 40bits output (PS = 2)
139 * 40bits input (T0SZ = 24)
140 * 4kB pages (TG0 = 0)
141 * 3 level page tables (SL = 1)
142 */
143#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
144 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
145 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
146 VTCR_EL2_T0SZ_40B)
147#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
148#endif
149
150#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
151#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
152#define VTTBR_VMID_SHIFT (48LLU)
153#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
154
155/* Hyp System Trap Register */
156#define HSTR_EL2_TTEE (1 << 16)
157#define HSTR_EL2_T(x) (1 << x)
158
159/* Hyp Coprocessor Trap Register */
160#define CPTR_EL2_TCPAC (1 << 31)
161#define CPTR_EL2_TTA (1 << 20)
162#define CPTR_EL2_TFP (1 << 10)
163
164/* Hyp Debug Configuration Register bits */
165#define MDCR_EL2_TDRA (1 << 11)
166#define MDCR_EL2_TDOSA (1 << 10)
167#define MDCR_EL2_TDA (1 << 9)
168#define MDCR_EL2_TDE (1 << 8)
169#define MDCR_EL2_HPME (1 << 7)
170#define MDCR_EL2_TPM (1 << 6)
171#define MDCR_EL2_TPMCR (1 << 5)
172#define MDCR_EL2_HPMN_MASK (0x1F)
173
174/* Exception Syndrome Register (ESR) bits */
175#define ESR_EL2_EC_SHIFT (26)
176#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
177#define ESR_EL2_IL (1U << 25)
178#define ESR_EL2_ISS (ESR_EL2_IL - 1)
179#define ESR_EL2_ISV_SHIFT (24)
180#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
181#define ESR_EL2_SAS_SHIFT (22)
182#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
183#define ESR_EL2_SSE (1 << 21)
184#define ESR_EL2_SRT_SHIFT (16)
185#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
186#define ESR_EL2_SF (1 << 15)
187#define ESR_EL2_AR (1 << 14)
188#define ESR_EL2_EA (1 << 9)
189#define ESR_EL2_CM (1 << 8)
190#define ESR_EL2_S1PTW (1 << 7)
191#define ESR_EL2_WNR (1 << 6)
192#define ESR_EL2_FSC (0x3f)
193#define ESR_EL2_FSC_TYPE (0x3c)
194
195#define ESR_EL2_CV_SHIFT (24)
196#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
197#define ESR_EL2_COND_SHIFT (20)
198#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
199
200
201#define FSC_FAULT (0x04)
202#define FSC_PERM (0x0c)
203
204/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
205#define HPFAR_MASK (~0xFUL)
206
207#define ESR_EL2_EC_UNKNOWN (0x00)
208#define ESR_EL2_EC_WFI (0x01)
209#define ESR_EL2_EC_CP15_32 (0x03)
210#define ESR_EL2_EC_CP15_64 (0x04)
211#define ESR_EL2_EC_CP14_MR (0x05)
212#define ESR_EL2_EC_CP14_LS (0x06)
213#define ESR_EL2_EC_FP_ASIMD (0x07)
214#define ESR_EL2_EC_CP10_ID (0x08)
215#define ESR_EL2_EC_CP14_64 (0x0C)
216#define ESR_EL2_EC_ILL_ISS (0x0E)
217#define ESR_EL2_EC_SVC32 (0x11)
218#define ESR_EL2_EC_HVC32 (0x12)
219#define ESR_EL2_EC_SMC32 (0x13)
220#define ESR_EL2_EC_SVC64 (0x15)
221#define ESR_EL2_EC_HVC64 (0x16)
222#define ESR_EL2_EC_SMC64 (0x17)
223#define ESR_EL2_EC_SYS64 (0x18)
224#define ESR_EL2_EC_IABT (0x20)
225#define ESR_EL2_EC_IABT_HYP (0x21)
226#define ESR_EL2_EC_PC_ALIGN (0x22)
227#define ESR_EL2_EC_DABT (0x24)
228#define ESR_EL2_EC_DABT_HYP (0x25)
229#define ESR_EL2_EC_SP_ALIGN (0x26)
230#define ESR_EL2_EC_FP_EXC32 (0x28)
231#define ESR_EL2_EC_FP_EXC64 (0x2C)
232#define ESR_EL2_EC_SERRROR (0x2F)
233#define ESR_EL2_EC_BREAKPT (0x30)
234#define ESR_EL2_EC_BREAKPT_HYP (0x31)
235#define ESR_EL2_EC_SOFTSTP (0x32)
236#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
237#define ESR_EL2_EC_WATCHPT (0x34)
238#define ESR_EL2_EC_WATCHPT_HYP (0x35)
239#define ESR_EL2_EC_BKPT32 (0x38)
240#define ESR_EL2_EC_VECTOR32 (0x3A)
241#define ESR_EL2_EC_BRK64 (0x3C)
242
243#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
244
245#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..c92de4163eba
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM_KVM_ASM_H__
19#define __ARM_KVM_ASM_H__
20
21/*
22 * 0 is reserved as an invalid value.
23 * Order *must* be kept in sync with the hyp switch code.
24 */
25#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
26#define CSSELR_EL1 2 /* Cache Size Selection Register */
27#define SCTLR_EL1 3 /* System Control Register */
28#define ACTLR_EL1 4 /* Auxilliary Control Register */
29#define CPACR_EL1 5 /* Coprocessor Access Control */
30#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
31#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
32#define TCR_EL1 8 /* Translation Control Register */
33#define ESR_EL1 9 /* Exception Syndrome Register */
34#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
35#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
36#define FAR_EL1 12 /* Fault Address Register */
37#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
38#define VBAR_EL1 14 /* Vector Base Address Register */
39#define CONTEXTIDR_EL1 15 /* Context ID Register */
40#define TPIDR_EL0 16 /* Thread ID, User R/W */
41#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27
53
54/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
56#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
57#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
58#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
59#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
60#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
61#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
62#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
63#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
64#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
65#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
66#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
67#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
68#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
75#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
76#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
77#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
78#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
79#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
80#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
81#define NR_CP15_REGS (NR_SYS_REGS * 2)
82
83#define ARM_EXCEPTION_IRQ 0
84#define ARM_EXCEPTION_TRAP 1
85
86#ifndef __ASSEMBLY__
87struct kvm;
88struct kvm_vcpu;
89
90extern char __kvm_hyp_init[];
91extern char __kvm_hyp_init_end[];
92
93extern char __kvm_hyp_vector[];
94
95extern char __kvm_hyp_code_start[];
96extern char __kvm_hyp_code_end[];
97
98extern void __kvm_flush_vm_context(void);
99extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
100
101extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
102#endif
103
104#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..9a59301cd014
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_coproc.h
6 * Copyright (C) 2012 Rusty Russell IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ARM64_KVM_COPROC_H__
22#define __ARM64_KVM_COPROC_H__
23
24#include <linux/kvm_host.h>
25
26void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
27
28struct kvm_sys_reg_table {
29 const struct sys_reg_desc *table;
30 size_t num;
31};
32
33struct kvm_sys_reg_target_table {
34 struct kvm_sys_reg_table table64;
35 struct kvm_sys_reg_table table32;
36};
37
38void kvm_register_target_sys_reg_table(unsigned int target,
39 struct kvm_sys_reg_target_table *table);
40
41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
45int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
46
47#define kvm_coproc_table_init kvm_sys_reg_table_init
48void kvm_sys_reg_table_init(void);
49
50struct kvm_one_reg;
51int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
52int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
53int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
54unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
55
56#endif /* __ARM64_KVM_COPROC_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..eec073875218
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmio.h>
29#include <asm/ptrace.h>
30
31unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
32unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
33
34bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
35void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
36
37void kvm_inject_undefined(struct kvm_vcpu *vcpu);
38void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
39void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
40
41static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
42{
43 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
44}
45
46static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
47{
48 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
49}
50
51static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
52{
53 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
54}
55
56static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
57{
58 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
59}
60
61static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
62{
63 if (vcpu_mode_is_32bit(vcpu))
64 return kvm_condition_valid32(vcpu);
65
66 return true;
67}
68
69static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
70{
71 if (vcpu_mode_is_32bit(vcpu))
72 kvm_skip_instr32(vcpu, is_wide_instr);
73 else
74 *vcpu_pc(vcpu) += 4;
75}
76
77static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
78{
79 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
80}
81
82static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
83{
84 if (vcpu_mode_is_32bit(vcpu))
85 return vcpu_reg32(vcpu, reg_num);
86
87 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
88}
89
90/* Get vcpu SPSR for current mode */
91static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
92{
93 if (vcpu_mode_is_32bit(vcpu))
94 return vcpu_spsr32(vcpu);
95
96 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
97}
98
99static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
100{
101 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
102
103 if (vcpu_mode_is_32bit(vcpu))
104 return mode > COMPAT_PSR_MODE_USR;
105
106 return mode != PSR_MODE_EL0t;
107}
108
109static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
110{
111 return vcpu->arch.fault.esr_el2;
112}
113
114static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
115{
116 return vcpu->arch.fault.far_el2;
117}
118
119static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
120{
121 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
122}
123
124static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
125{
126 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
127}
128
129static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
130{
131 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
132}
133
134static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
135{
136 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
137}
138
139static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
140{
141 return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
142}
143
144static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
145{
146 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
147}
148
149static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
150{
151 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
152}
153
154static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
155{
156 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
157}
158
159/* This one is not specific to Data Abort */
160static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
161{
162 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
163}
164
165static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
166{
167 return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
168}
169
170static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
171{
172 return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
173}
174
175static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
176{
177 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
178}
179
180#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
new file mode 100644
index 000000000000..644d73956864
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_HOST_H__
23#define __ARM64_KVM_HOST_H__
24
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_mmio.h>
28
29#define KVM_MAX_VCPUS 4
30#define KVM_USER_MEM_SLOTS 32
31#define KVM_PRIVATE_MEM_SLOTS 4
32#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
33
34#include <kvm/arm_vgic.h>
35#include <kvm/arm_arch_timer.h>
36
37#define KVM_VCPU_MAX_FEATURES 2
38
39/* We don't currently support large pages. */
40#define KVM_HPAGE_GFN_SHIFT(x) 0
41#define KVM_NR_PAGE_SIZES 1
42#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
43
44struct kvm_vcpu;
45int kvm_target_cpu(void);
46int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
47int kvm_arch_dev_ioctl_check_extension(long ext);
48
49struct kvm_arch {
50 /* The VMID generation used for the virt. memory system */
51 u64 vmid_gen;
52 u32 vmid;
53
54 /* 1-level 2nd stage table and lock */
55 spinlock_t pgd_lock;
56 pgd_t *pgd;
57
58 /* VTTBR value associated with above pgd and vmid */
59 u64 vttbr;
60
61 /* Interrupt controller */
62 struct vgic_dist vgic;
63
64 /* Timer */
65 struct arch_timer_kvm timer;
66};
67
68#define KVM_NR_MEM_OBJS 40
69
70/*
71 * We don't want allocation failures within the mmu code, so we preallocate
72 * enough memory for a single page fault in a cache.
73 */
74struct kvm_mmu_memory_cache {
75 int nobjs;
76 void *objects[KVM_NR_MEM_OBJS];
77};
78
79struct kvm_vcpu_fault_info {
80 u32 esr_el2; /* Hyp Syndrom Register */
81 u64 far_el2; /* Hyp Fault Address Register */
82 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
83};
84
85struct kvm_cpu_context {
86 struct kvm_regs gp_regs;
87 union {
88 u64 sys_regs[NR_SYS_REGS];
89 u32 cp15[NR_CP15_REGS];
90 };
91};
92
93typedef struct kvm_cpu_context kvm_cpu_context_t;
94
95struct kvm_vcpu_arch {
96 struct kvm_cpu_context ctxt;
97
98 /* HYP configuration */
99 u64 hcr_el2;
100
101 /* Exception Information */
102 struct kvm_vcpu_fault_info fault;
103
104 /* Pointer to host CPU context */
105 kvm_cpu_context_t *host_cpu_context;
106
107 /* VGIC state */
108 struct vgic_cpu vgic_cpu;
109 struct arch_timer_cpu timer_cpu;
110
111 /*
112 * Anything that is not used directly from assembly code goes
113 * here.
114 */
115 /* dcache set/way operation pending */
116 int last_pcpu;
117 cpumask_t require_dcache_flush;
118
119 /* Don't run the guest */
120 bool pause;
121
122 /* IO related fields */
123 struct kvm_decode mmio_decode;
124
125 /* Interrupt related fields */
126 u64 irq_lines; /* IRQ and FIQ levels */
127
128 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache;
130
131 /* Target CPU and feature flags */
132 u32 target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134
135 /* Detect first run of a vcpu */
136 bool has_run_once;
137};
138
139#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
140#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
141#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)])
142
143struct kvm_vm_stat {
144 u32 remote_tlb_flush;
145};
146
147struct kvm_vcpu_stat {
148 u32 halt_wakeup;
149};
150
151struct kvm_vcpu_init;
152int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
153 const struct kvm_vcpu_init *init);
154unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
155int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
156struct kvm_one_reg;
157int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
158int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
159
160#define KVM_ARCH_WANT_MMU_NOTIFIER
161struct kvm;
162int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
163int kvm_unmap_hva_range(struct kvm *kvm,
164 unsigned long start, unsigned long end);
165void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
166
167/* We do not have shadow page tables, hence the empty hooks */
168static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
169{
170 return 0;
171}
172
173static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
174{
175 return 0;
176}
177
178struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
179struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
180
181u64 kvm_call_hyp(void *hypfn, ...);
182
183int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
184 int exception_index);
185
186int kvm_perf_init(void);
187int kvm_perf_teardown(void);
188
189static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
190 phys_addr_t pgd_ptr,
191 unsigned long hyp_stack_ptr,
192 unsigned long vector_ptr)
193{
194 /*
195 * Call initialization code, and switch to the full blown
196 * HYP code.
197 */
198 kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
199 hyp_stack_ptr, vector_ptr);
200}
201
202#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..fc2f689c0694
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMIO_H__
19#define __ARM64_KVM_MMIO_H__
20
21#include <linux/kvm_host.h>
22#include <asm/kvm_asm.h>
23#include <asm/kvm_arm.h>
24
25/*
26 * This is annoying. The mmio code requires this, even if we don't
27 * need any decoding. To be fixed.
28 */
29struct kvm_decode {
30 unsigned long rt;
31 bool sign_extend;
32};
33
34/*
35 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
36 * which is an anonymous type. Use our own type instead.
37 */
38struct kvm_exit_mmio {
39 phys_addr_t phys_addr;
40 u8 data[8];
41 u32 len;
42 bool is_write;
43};
44
45static inline void kvm_prepare_mmio(struct kvm_run *run,
46 struct kvm_exit_mmio *mmio)
47{
48 run->mmio.phys_addr = mmio->phys_addr;
49 run->mmio.len = mmio->len;
50 run->mmio.is_write = mmio->is_write;
51 memcpy(run->mmio.data, mmio->data, mmio->len);
52 run->exit_reason = KVM_EXIT_MMIO;
53}
54
55int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
56int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
57 phys_addr_t fault_ipa);
58
59#endif /* __ARM64_KVM_MMIO_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..efe609c6a3c9
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
44#ifdef __ASSEMBLY__
45
46/*
47 * Convert a kernel VA into a HYP VA.
48 * reg: VA to be converted.
49 */
50.macro kern_hyp_va reg
51 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
52.endm
53
54#else
55
56#include <asm/cachetype.h>
57#include <asm/cacheflush.h>
58
59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60
61/*
62 * Align KVM with the kernel's view of physical memory. Should be
63 * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
64 */
65#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
66#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
67#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
68
69/* Make sure we get the right size, and thus the right alignment */
70#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
71#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
72
73int create_hyp_mappings(void *from, void *to);
74int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
75void free_boot_hyp_pgd(void);
76void free_hyp_pgds(void);
77
78int kvm_alloc_stage2_pgd(struct kvm *kvm);
79void kvm_free_stage2_pgd(struct kvm *kvm);
80int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
81 phys_addr_t pa, unsigned long size);
82
83int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
84
85void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
86
87phys_addr_t kvm_mmu_get_httbr(void);
88phys_addr_t kvm_mmu_get_boot_httbr(void);
89phys_addr_t kvm_get_idmap_vector(void);
90int kvm_mmu_init(void);
91void kvm_clear_hyp_idmap(void);
92
93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
94
95static inline bool kvm_is_write_fault(unsigned long esr)
96{
97 unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
98
99 if (esr_ec == ESR_EL2_EC_IABT)
100 return false;
101
102 if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
103 return false;
104
105 return true;
106}
107
108static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
109static inline void kvm_clean_pgd(pgd_t *pgd) {}
110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {}
112static inline void kvm_clean_pte_entry(pte_t *pte) {}
113
114static inline void kvm_set_s2pte_writable(pte_t *pte)
115{
116 pte_val(*pte) |= PTE_S2_RDWR;
117}
118
119struct kvm;
120
121static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
122{
123 if (!icache_is_aliasing()) { /* PIPT */
124 unsigned long hva = gfn_to_hva(kvm, gfn);
125 flush_icache_range(hva, hva + PAGE_SIZE);
126 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
127 /* any kind of VIPT cache */
128 __flush_icache_all();
129 }
130}
131
132#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
133
134#endif /* __ASSEMBLY__ */
135#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..e301a4816355
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_PSCI_H__
19#define __ARM64_KVM_PSCI_H__
20
21bool kvm_psci_call(struct kvm_vcpu *vcpu);
22
23#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 381f556b664e..20925bcf4e2a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -90,6 +90,12 @@
90#define MT_NORMAL_NC 3 90#define MT_NORMAL_NC 3
91#define MT_NORMAL 4 91#define MT_NORMAL 4
92 92
93/*
94 * Memory types for Stage-2 translation
95 */
96#define MT_S2_NORMAL 0xf
97#define MT_S2_DEVICE_nGnRE 0x1
98
93#ifndef __ASSEMBLY__ 99#ifndef __ASSEMBLY__
94 100
95extern phys_addr_t memstart_addr; 101extern phys_addr_t memstart_addr;
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index e2bc385adb6b..a9eee33dfa62 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,12 +151,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
151{ 151{
152 unsigned int cpu = smp_processor_id(); 152 unsigned int cpu = smp_processor_id();
153 153
154#ifdef CONFIG_SMP
155 /* check for possible thread migration */
156 if (!cpumask_empty(mm_cpumask(next)) &&
157 !cpumask_test_cpu(cpu, mm_cpumask(next)))
158 __flush_icache_all();
159#endif
160 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) 154 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
161 check_and_switch_context(next, tsk); 155 check_and_switch_context(next, tsk);
162} 156}
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 63c9d0de05bb..e182a356c979 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -80,6 +80,24 @@
80#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) 80#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
81 81
82/* 82/*
83 * 2nd stage PTE definitions
84 */
85#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
86#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
87
88/*
89 * Memory Attribute override for Stage-2 (MemAttr[3:0])
90 */
91#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
92#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
93
94/*
95 * EL2/HYP PTE/PMD definitions
96 */
97#define PMD_HYP PMD_SECT_USER
98#define PTE_HYP PTE_USER
99
100/*
83 * 40-bit physical address supported. 101 * 40-bit physical address supported.
84 */ 102 */
85#define PHYS_MASK_SHIFT (40) 103#define PHYS_MASK_SHIFT (40)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 720fc4a2be49..065e58f01b1e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -76,6 +76,12 @@ extern pgprot_t pgprot_default;
76#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) 76#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
77#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) 77#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
78 78
79#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
80#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
81
82#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
83#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
84
79#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) 85#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
80#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 86#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
81#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 87#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
@@ -119,7 +125,7 @@ extern struct page *empty_zero_page;
119#define pte_none(pte) (!pte_val(pte)) 125#define pte_none(pte) (!pte_val(pte))
120#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 126#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
121#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 127#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
122#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 128#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
123 129
124#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 130#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
125#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 131#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -261,6 +267,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
261 267
262#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) 268#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
263 269
270#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
271 PMD_TYPE_TABLE)
272#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
273 PMD_TYPE_SECT)
274
275
264static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 276static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
265{ 277{
266 *pmdp = pmd; 278 *pmdp = pmd;
@@ -327,7 +339,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
327#endif 339#endif
328 340
329/* Find an entry in the third-level page table.. */ 341/* Find an entry in the third-level page table.. */
330#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 342#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
331 343
332static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 344static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
333{ 345{
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 41a71ee4c3df..0dacbbf9458b 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -171,7 +171,5 @@ extern unsigned long profile_pc(struct pt_regs *regs);
171#define profile_pc(regs) instruction_pointer(regs) 171#define profile_pc(regs) instruction_pointer(regs)
172#endif 172#endif
173 173
174extern int aarch32_break_trap(struct pt_regs *regs);
175
176#endif /* __ASSEMBLY__ */ 174#endif /* __ASSEMBLY__ */
177#endif 175#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 7065e920149d..0defa0728a9b 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
59 unsigned int tmp; 59 unsigned int tmp;
60 60
61 asm volatile( 61 asm volatile(
62 " ldaxr %w0, %1\n" 62 "2: ldaxr %w0, %1\n"
63 " cbnz %w0, 1f\n" 63 " cbnz %w0, 1f\n"
64 " stxr %w0, %w2, %1\n" 64 " stxr %w0, %w2, %1\n"
65 " cbnz %w0, 2b\n"
65 "1:\n" 66 "1:\n"
66 : "=&r" (tmp), "+Q" (lock->lock) 67 : "=&r" (tmp), "+Q" (lock->lock)
67 : "r" (1) 68 : "r" (1)
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h
new file mode 100644
index 000000000000..8da0bf4f7659
--- /dev/null
+++ b/arch/arm64/include/asm/sync_bitops.h
@@ -0,0 +1,26 @@
1#ifndef __ASM_SYNC_BITOPS_H__
2#define __ASM_SYNC_BITOPS_H__
3
4#include <asm/bitops.h>
5#include <asm/cmpxchg.h>
6
7/* sync_bitops functions are equivalent to the SMP implementation of the
8 * original functions, independently from CONFIG_SMP being defined.
9 *
10 * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
11 * under Xen you might be communicating with a completely external entity
12 * who might be on another CPU (e.g. two uniprocessor guests communicating
13 * via event channels and grant tables). So we need a variant of the bit
14 * ops which are SMP safe even on a UP kernel.
15 */
16
17#define sync_set_bit(nr, p) set_bit(nr, p)
18#define sync_clear_bit(nr, p) clear_bit(nr, p)
19#define sync_change_bit(nr, p) change_bit(nr, p)
20#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
21#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
22#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
23#define sync_test_bit(nr, addr) test_bit(nr, addr)
24#define sync_cmpxchg cmpxchg
25
26#endif
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h
index b24a31a7e2c9..81a076eb37fa 100644
--- a/arch/arm64/include/asm/timex.h
+++ b/arch/arm64/include/asm/timex.h
@@ -16,14 +16,14 @@
16#ifndef __ASM_TIMEX_H 16#ifndef __ASM_TIMEX_H
17#define __ASM_TIMEX_H 17#define __ASM_TIMEX_H
18 18
19#include <asm/arch_timer.h>
20
19/* 21/*
20 * Use the current timer as a cycle counter since this is what we use for 22 * Use the current timer as a cycle counter since this is what we use for
21 * the delay loop. 23 * the delay loop.
22 */ 24 */
23#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; }) 25#define get_cycles() arch_counter_get_cntvct()
24 26
25#include <asm-generic/timex.h> 27#include <asm-generic/timex.h>
26 28
27#define ARCH_HAS_READ_CURRENT_TIMER
28
29#endif 29#endif
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
new file mode 100644
index 000000000000..86553213c132
--- /dev/null
+++ b/arch/arm64/include/asm/xen/events.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_ARM64_XEN_EVENTS_H
2#define _ASM_ARM64_XEN_EVENTS_H
3
4#include <asm/ptrace.h>
5#include <asm/atomic.h>
6
7enum ipi_vector {
8 XEN_PLACEHOLDER_VECTOR,
9
10 /* Xen IPIs go here */
11 XEN_NR_IPIS,
12};
13
14static inline int xen_irqs_disabled(struct pt_regs *regs)
15{
16 return raw_irqs_disabled_flags((unsigned long) regs->pstate);
17}
18
19#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
20
21#endif /* _ASM_ARM64_XEN_EVENTS_H */
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
new file mode 100644
index 000000000000..74b0c423ff5b
--- /dev/null
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -0,0 +1 @@
#include <../../arm/include/asm/xen/hypercall.h>
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h
new file mode 100644
index 000000000000..f263da8e8769
--- /dev/null
+++ b/arch/arm64/include/asm/xen/hypervisor.h
@@ -0,0 +1 @@
#include <../../arm/include/asm/xen/hypervisor.h>
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h
new file mode 100644
index 000000000000..44457aebeed4
--- /dev/null
+++ b/arch/arm64/include/asm/xen/interface.h
@@ -0,0 +1 @@
#include <../../arm/include/asm/xen/interface.h>
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h
new file mode 100644
index 000000000000..bed87ec36780
--- /dev/null
+++ b/arch/arm64/include/asm/xen/page.h
@@ -0,0 +1 @@
#include <../../arm/include/asm/xen/page.h>