diff options
Diffstat (limited to 'arch/arm64/include')
32 files changed, 1517 insertions, 39 deletions
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 3300cbd18a89..fea9ee327206 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -123,9 +123,6 @@ static inline void __flush_icache_all(void) | |||
123 | #define flush_dcache_mmap_unlock(mapping) \ | 123 | #define flush_dcache_mmap_unlock(mapping) \ |
124 | spin_unlock_irq(&(mapping)->tree_lock) | 124 | spin_unlock_irq(&(mapping)->tree_lock) |
125 | 125 | ||
126 | #define flush_icache_user_range(vma,page,addr,len) \ | ||
127 | flush_dcache_page(page) | ||
128 | |||
129 | /* | 126 | /* |
130 | * We don't appear to need to do anything here. In fact, if we did, we'd | 127 | * We don't appear to need to do anything here. In fact, if we did, we'd |
131 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | 128 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index cf2749488cd4..5fe138e0b828 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -37,11 +37,14 @@ | |||
37 | }) | 37 | }) |
38 | 38 | ||
39 | #define ARM_CPU_IMP_ARM 0x41 | 39 | #define ARM_CPU_IMP_ARM 0x41 |
40 | #define ARM_CPU_IMP_APM 0x50 | ||
40 | 41 | ||
41 | #define ARM_CPU_PART_AEM_V8 0xD0F0 | 42 | #define ARM_CPU_PART_AEM_V8 0xD0F0 |
42 | #define ARM_CPU_PART_FOUNDATION 0xD000 | 43 | #define ARM_CPU_PART_FOUNDATION 0xD000 |
43 | #define ARM_CPU_PART_CORTEX_A57 0xD070 | 44 | #define ARM_CPU_PART_CORTEX_A57 0xD070 |
44 | 45 | ||
46 | #define APM_CPU_PART_POTENZA 0x0000 | ||
47 | |||
45 | #ifndef __ASSEMBLY__ | 48 | #ifndef __ASSEMBLY__ |
46 | 49 | ||
47 | /* | 50 | /* |
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7eaa0b302493..ef8235c68c09 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
@@ -83,6 +83,15 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) | |||
83 | } | 83 | } |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #ifdef CONFIG_COMPAT | ||
87 | int aarch32_break_handler(struct pt_regs *regs); | ||
88 | #else | ||
89 | static int aarch32_break_handler(struct pt_regs *regs) | ||
90 | { | ||
91 | return -EFAULT; | ||
92 | } | ||
93 | #endif | ||
94 | |||
86 | #endif /* __ASSEMBLY */ | 95 | #endif /* __ASSEMBLY */ |
87 | #endif /* __KERNEL__ */ | 96 | #endif /* __KERNEL__ */ |
88 | #endif /* __ASM_DEBUG_MONITORS_H */ | 97 | #endif /* __ASM_DEBUG_MONITORS_H */ |
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 0d8453c755a8..cf98b362094b 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h | |||
@@ -18,6 +18,9 @@ | |||
18 | 18 | ||
19 | struct dev_archdata { | 19 | struct dev_archdata { |
20 | struct dma_map_ops *dma_ops; | 20 | struct dma_map_ops *dma_ops; |
21 | #ifdef CONFIG_IOMMU_API | ||
22 | void *iommu; /* private IOMMU data */ | ||
23 | #endif | ||
21 | }; | 24 | }; |
22 | 25 | ||
23 | struct pdev_archdata { | 26 | struct pdev_archdata { |
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 994776894198..8d1810001aef 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -81,8 +81,12 @@ static inline void dma_mark_clean(void *addr, size_t size) | |||
81 | { | 81 | { |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 84 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) |
85 | dma_addr_t *dma_handle, gfp_t flags) | 85 | #define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) |
86 | |||
87 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
88 | dma_addr_t *dma_handle, gfp_t flags, | ||
89 | struct dma_attrs *attrs) | ||
86 | { | 90 | { |
87 | struct dma_map_ops *ops = get_dma_ops(dev); | 91 | struct dma_map_ops *ops = get_dma_ops(dev); |
88 | void *vaddr; | 92 | void *vaddr; |
@@ -90,13 +94,14 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |||
90 | if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) | 94 | if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) |
91 | return vaddr; | 95 | return vaddr; |
92 | 96 | ||
93 | vaddr = ops->alloc(dev, size, dma_handle, flags, NULL); | 97 | vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); |
94 | debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); | 98 | debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); |
95 | return vaddr; | 99 | return vaddr; |
96 | } | 100 | } |
97 | 101 | ||
98 | static inline void dma_free_coherent(struct device *dev, size_t size, | 102 | static inline void dma_free_attrs(struct device *dev, size_t size, |
99 | void *vaddr, dma_addr_t dev_addr) | 103 | void *vaddr, dma_addr_t dev_addr, |
104 | struct dma_attrs *attrs) | ||
100 | { | 105 | { |
101 | struct dma_map_ops *ops = get_dma_ops(dev); | 106 | struct dma_map_ops *ops = get_dma_ops(dev); |
102 | 107 | ||
@@ -104,7 +109,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
104 | return; | 109 | return; |
105 | 110 | ||
106 | debug_dma_free_coherent(dev, size, vaddr, dev_addr); | 111 | debug_dma_free_coherent(dev, size, vaddr, dev_addr); |
107 | ops->free(dev, size, vaddr, dev_addr, NULL); | 112 | ops->free(dev, size, vaddr, dev_addr, attrs); |
108 | } | 113 | } |
109 | 114 | ||
110 | /* | 115 | /* |
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h new file mode 100644 index 000000000000..5b7ca8ace95f --- /dev/null +++ b/arch/arm64/include/asm/hugetlb.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/hugetlb.h | ||
3 | * | ||
4 | * Copyright (C) 2013 Linaro Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef __ASM_HUGETLB_H | ||
23 | #define __ASM_HUGETLB_H | ||
24 | |||
25 | #include <asm-generic/hugetlb.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
29 | { | ||
30 | return *ptep; | ||
31 | } | ||
32 | |||
33 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
34 | pte_t *ptep, pte_t pte) | ||
35 | { | ||
36 | set_pte_at(mm, addr, ptep, pte); | ||
37 | } | ||
38 | |||
39 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
40 | unsigned long addr, pte_t *ptep) | ||
41 | { | ||
42 | ptep_clear_flush(vma, addr, ptep); | ||
43 | } | ||
44 | |||
45 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
46 | unsigned long addr, pte_t *ptep) | ||
47 | { | ||
48 | ptep_set_wrprotect(mm, addr, ptep); | ||
49 | } | ||
50 | |||
51 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
52 | unsigned long addr, pte_t *ptep) | ||
53 | { | ||
54 | return ptep_get_and_clear(mm, addr, ptep); | ||
55 | } | ||
56 | |||
57 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
58 | unsigned long addr, pte_t *ptep, | ||
59 | pte_t pte, int dirty) | ||
60 | { | ||
61 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
62 | } | ||
63 | |||
64 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
65 | unsigned long addr, unsigned long end, | ||
66 | unsigned long floor, | ||
67 | unsigned long ceiling) | ||
68 | { | ||
69 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
70 | } | ||
71 | |||
72 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
73 | unsigned long addr, unsigned long len) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static inline int prepare_hugepage_range(struct file *file, | ||
79 | unsigned long addr, unsigned long len) | ||
80 | { | ||
81 | struct hstate *h = hstate_file(file); | ||
82 | if (len & ~huge_page_mask(h)) | ||
83 | return -EINVAL; | ||
84 | if (addr & ~huge_page_mask(h)) | ||
85 | return -EINVAL; | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
90 | { | ||
91 | } | ||
92 | |||
93 | static inline int huge_pte_none(pte_t pte) | ||
94 | { | ||
95 | return pte_none(pte); | ||
96 | } | ||
97 | |||
98 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
99 | { | ||
100 | return pte_wrprotect(pte); | ||
101 | } | ||
102 | |||
103 | static inline int arch_prepare_hugepage(struct page *page) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static inline void arch_release_hugepage(struct page *page) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
113 | { | ||
114 | clear_bit(PG_dcache_clean, &page->flags); | ||
115 | } | ||
116 | |||
117 | #endif /* __ASM_HUGETLB_H */ | ||
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h new file mode 100644 index 000000000000..d2c79049ff11 --- /dev/null +++ b/arch/arm64/include/asm/hypervisor.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_ARM64_HYPERVISOR_H | ||
2 | #define _ASM_ARM64_HYPERVISOR_H | ||
3 | |||
4 | #include <asm/xen/hypervisor.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 2e12258aa7e4..1d12f89140ba 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -228,10 +228,12 @@ extern void __iounmap(volatile void __iomem *addr); | |||
228 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) | 228 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) |
229 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 229 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
230 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) | 230 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) |
231 | #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) | ||
231 | 232 | ||
232 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 233 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
233 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 234 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
234 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) | 235 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
236 | #define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL)) | ||
235 | #define iounmap __iounmap | 237 | #define iounmap __iounmap |
236 | 238 | ||
237 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) | 239 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h new file mode 100644 index 000000000000..a5f28e2720c7 --- /dev/null +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -0,0 +1,245 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_ARM_H__ | ||
19 | #define __ARM64_KVM_ARM_H__ | ||
20 | |||
21 | #include <asm/types.h> | ||
22 | |||
23 | /* Hyp Configuration Register (HCR) bits */ | ||
24 | #define HCR_ID (UL(1) << 33) | ||
25 | #define HCR_CD (UL(1) << 32) | ||
26 | #define HCR_RW_SHIFT 31 | ||
27 | #define HCR_RW (UL(1) << HCR_RW_SHIFT) | ||
28 | #define HCR_TRVM (UL(1) << 30) | ||
29 | #define HCR_HCD (UL(1) << 29) | ||
30 | #define HCR_TDZ (UL(1) << 28) | ||
31 | #define HCR_TGE (UL(1) << 27) | ||
32 | #define HCR_TVM (UL(1) << 26) | ||
33 | #define HCR_TTLB (UL(1) << 25) | ||
34 | #define HCR_TPU (UL(1) << 24) | ||
35 | #define HCR_TPC (UL(1) << 23) | ||
36 | #define HCR_TSW (UL(1) << 22) | ||
37 | #define HCR_TAC (UL(1) << 21) | ||
38 | #define HCR_TIDCP (UL(1) << 20) | ||
39 | #define HCR_TSC (UL(1) << 19) | ||
40 | #define HCR_TID3 (UL(1) << 18) | ||
41 | #define HCR_TID2 (UL(1) << 17) | ||
42 | #define HCR_TID1 (UL(1) << 16) | ||
43 | #define HCR_TID0 (UL(1) << 15) | ||
44 | #define HCR_TWE (UL(1) << 14) | ||
45 | #define HCR_TWI (UL(1) << 13) | ||
46 | #define HCR_DC (UL(1) << 12) | ||
47 | #define HCR_BSU (3 << 10) | ||
48 | #define HCR_BSU_IS (UL(1) << 10) | ||
49 | #define HCR_FB (UL(1) << 9) | ||
50 | #define HCR_VA (UL(1) << 8) | ||
51 | #define HCR_VI (UL(1) << 7) | ||
52 | #define HCR_VF (UL(1) << 6) | ||
53 | #define HCR_AMO (UL(1) << 5) | ||
54 | #define HCR_IMO (UL(1) << 4) | ||
55 | #define HCR_FMO (UL(1) << 3) | ||
56 | #define HCR_PTW (UL(1) << 2) | ||
57 | #define HCR_SWIO (UL(1) << 1) | ||
58 | #define HCR_VM (UL(1) << 0) | ||
59 | |||
60 | /* | ||
61 | * The bits we set in HCR: | ||
62 | * RW: 64bit by default, can be overriden for 32bit VMs | ||
63 | * TAC: Trap ACTLR | ||
64 | * TSC: Trap SMC | ||
65 | * TSW: Trap cache operations by set/way | ||
66 | * TWI: Trap WFI | ||
67 | * TIDCP: Trap L2CTLR/L2ECTLR | ||
68 | * BSU_IS: Upgrade barriers to the inner shareable domain | ||
69 | * FB: Force broadcast of all maintainance operations | ||
70 | * AMO: Override CPSR.A and enable signaling with VA | ||
71 | * IMO: Override CPSR.I and enable signaling with VI | ||
72 | * FMO: Override CPSR.F and enable signaling with VF | ||
73 | * SWIO: Turn set/way invalidates into set/way clean+invalidate | ||
74 | */ | ||
75 | #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ | ||
76 | HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ | ||
77 | HCR_SWIO | HCR_TIDCP | HCR_RW) | ||
78 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) | ||
79 | |||
80 | /* Hyp System Control Register (SCTLR_EL2) bits */ | ||
81 | #define SCTLR_EL2_EE (1 << 25) | ||
82 | #define SCTLR_EL2_WXN (1 << 19) | ||
83 | #define SCTLR_EL2_I (1 << 12) | ||
84 | #define SCTLR_EL2_SA (1 << 3) | ||
85 | #define SCTLR_EL2_C (1 << 2) | ||
86 | #define SCTLR_EL2_A (1 << 1) | ||
87 | #define SCTLR_EL2_M 1 | ||
88 | #define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \ | ||
89 | SCTLR_EL2_SA | SCTLR_EL2_I) | ||
90 | |||
91 | /* TCR_EL2 Registers bits */ | ||
92 | #define TCR_EL2_TBI (1 << 20) | ||
93 | #define TCR_EL2_PS (7 << 16) | ||
94 | #define TCR_EL2_PS_40B (2 << 16) | ||
95 | #define TCR_EL2_TG0 (1 << 14) | ||
96 | #define TCR_EL2_SH0 (3 << 12) | ||
97 | #define TCR_EL2_ORGN0 (3 << 10) | ||
98 | #define TCR_EL2_IRGN0 (3 << 8) | ||
99 | #define TCR_EL2_T0SZ 0x3f | ||
100 | #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ | ||
101 | TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) | ||
102 | |||
103 | #define TCR_EL2_FLAGS (TCR_EL2_PS_40B) | ||
104 | |||
105 | /* VTCR_EL2 Registers bits */ | ||
106 | #define VTCR_EL2_PS_MASK (7 << 16) | ||
107 | #define VTCR_EL2_PS_40B (2 << 16) | ||
108 | #define VTCR_EL2_TG0_MASK (1 << 14) | ||
109 | #define VTCR_EL2_TG0_4K (0 << 14) | ||
110 | #define VTCR_EL2_TG0_64K (1 << 14) | ||
111 | #define VTCR_EL2_SH0_MASK (3 << 12) | ||
112 | #define VTCR_EL2_SH0_INNER (3 << 12) | ||
113 | #define VTCR_EL2_ORGN0_MASK (3 << 10) | ||
114 | #define VTCR_EL2_ORGN0_WBWA (1 << 10) | ||
115 | #define VTCR_EL2_IRGN0_MASK (3 << 8) | ||
116 | #define VTCR_EL2_IRGN0_WBWA (1 << 8) | ||
117 | #define VTCR_EL2_SL0_MASK (3 << 6) | ||
118 | #define VTCR_EL2_SL0_LVL1 (1 << 6) | ||
119 | #define VTCR_EL2_T0SZ_MASK 0x3f | ||
120 | #define VTCR_EL2_T0SZ_40B 24 | ||
121 | |||
122 | #ifdef CONFIG_ARM64_64K_PAGES | ||
123 | /* | ||
124 | * Stage2 translation configuration: | ||
125 | * 40bits output (PS = 2) | ||
126 | * 40bits input (T0SZ = 24) | ||
127 | * 64kB pages (TG0 = 1) | ||
128 | * 2 level page tables (SL = 1) | ||
129 | */ | ||
130 | #define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \ | ||
131 | VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ | ||
132 | VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ | ||
133 | VTCR_EL2_T0SZ_40B) | ||
134 | #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) | ||
135 | #else | ||
136 | /* | ||
137 | * Stage2 translation configuration: | ||
138 | * 40bits output (PS = 2) | ||
139 | * 40bits input (T0SZ = 24) | ||
140 | * 4kB pages (TG0 = 0) | ||
141 | * 3 level page tables (SL = 1) | ||
142 | */ | ||
143 | #define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \ | ||
144 | VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ | ||
145 | VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ | ||
146 | VTCR_EL2_T0SZ_40B) | ||
147 | #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) | ||
148 | #endif | ||
149 | |||
150 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | ||
151 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | ||
152 | #define VTTBR_VMID_SHIFT (48LLU) | ||
153 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | ||
154 | |||
155 | /* Hyp System Trap Register */ | ||
156 | #define HSTR_EL2_TTEE (1 << 16) | ||
157 | #define HSTR_EL2_T(x) (1 << x) | ||
158 | |||
159 | /* Hyp Coprocessor Trap Register */ | ||
160 | #define CPTR_EL2_TCPAC (1 << 31) | ||
161 | #define CPTR_EL2_TTA (1 << 20) | ||
162 | #define CPTR_EL2_TFP (1 << 10) | ||
163 | |||
164 | /* Hyp Debug Configuration Register bits */ | ||
165 | #define MDCR_EL2_TDRA (1 << 11) | ||
166 | #define MDCR_EL2_TDOSA (1 << 10) | ||
167 | #define MDCR_EL2_TDA (1 << 9) | ||
168 | #define MDCR_EL2_TDE (1 << 8) | ||
169 | #define MDCR_EL2_HPME (1 << 7) | ||
170 | #define MDCR_EL2_TPM (1 << 6) | ||
171 | #define MDCR_EL2_TPMCR (1 << 5) | ||
172 | #define MDCR_EL2_HPMN_MASK (0x1F) | ||
173 | |||
174 | /* Exception Syndrome Register (ESR) bits */ | ||
175 | #define ESR_EL2_EC_SHIFT (26) | ||
176 | #define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) | ||
177 | #define ESR_EL2_IL (1U << 25) | ||
178 | #define ESR_EL2_ISS (ESR_EL2_IL - 1) | ||
179 | #define ESR_EL2_ISV_SHIFT (24) | ||
180 | #define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) | ||
181 | #define ESR_EL2_SAS_SHIFT (22) | ||
182 | #define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) | ||
183 | #define ESR_EL2_SSE (1 << 21) | ||
184 | #define ESR_EL2_SRT_SHIFT (16) | ||
185 | #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) | ||
186 | #define ESR_EL2_SF (1 << 15) | ||
187 | #define ESR_EL2_AR (1 << 14) | ||
188 | #define ESR_EL2_EA (1 << 9) | ||
189 | #define ESR_EL2_CM (1 << 8) | ||
190 | #define ESR_EL2_S1PTW (1 << 7) | ||
191 | #define ESR_EL2_WNR (1 << 6) | ||
192 | #define ESR_EL2_FSC (0x3f) | ||
193 | #define ESR_EL2_FSC_TYPE (0x3c) | ||
194 | |||
195 | #define ESR_EL2_CV_SHIFT (24) | ||
196 | #define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) | ||
197 | #define ESR_EL2_COND_SHIFT (20) | ||
198 | #define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) | ||
199 | |||
200 | |||
201 | #define FSC_FAULT (0x04) | ||
202 | #define FSC_PERM (0x0c) | ||
203 | |||
204 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | ||
205 | #define HPFAR_MASK (~0xFUL) | ||
206 | |||
207 | #define ESR_EL2_EC_UNKNOWN (0x00) | ||
208 | #define ESR_EL2_EC_WFI (0x01) | ||
209 | #define ESR_EL2_EC_CP15_32 (0x03) | ||
210 | #define ESR_EL2_EC_CP15_64 (0x04) | ||
211 | #define ESR_EL2_EC_CP14_MR (0x05) | ||
212 | #define ESR_EL2_EC_CP14_LS (0x06) | ||
213 | #define ESR_EL2_EC_FP_ASIMD (0x07) | ||
214 | #define ESR_EL2_EC_CP10_ID (0x08) | ||
215 | #define ESR_EL2_EC_CP14_64 (0x0C) | ||
216 | #define ESR_EL2_EC_ILL_ISS (0x0E) | ||
217 | #define ESR_EL2_EC_SVC32 (0x11) | ||
218 | #define ESR_EL2_EC_HVC32 (0x12) | ||
219 | #define ESR_EL2_EC_SMC32 (0x13) | ||
220 | #define ESR_EL2_EC_SVC64 (0x15) | ||
221 | #define ESR_EL2_EC_HVC64 (0x16) | ||
222 | #define ESR_EL2_EC_SMC64 (0x17) | ||
223 | #define ESR_EL2_EC_SYS64 (0x18) | ||
224 | #define ESR_EL2_EC_IABT (0x20) | ||
225 | #define ESR_EL2_EC_IABT_HYP (0x21) | ||
226 | #define ESR_EL2_EC_PC_ALIGN (0x22) | ||
227 | #define ESR_EL2_EC_DABT (0x24) | ||
228 | #define ESR_EL2_EC_DABT_HYP (0x25) | ||
229 | #define ESR_EL2_EC_SP_ALIGN (0x26) | ||
230 | #define ESR_EL2_EC_FP_EXC32 (0x28) | ||
231 | #define ESR_EL2_EC_FP_EXC64 (0x2C) | ||
232 | #define ESR_EL2_EC_SERRROR (0x2F) | ||
233 | #define ESR_EL2_EC_BREAKPT (0x30) | ||
234 | #define ESR_EL2_EC_BREAKPT_HYP (0x31) | ||
235 | #define ESR_EL2_EC_SOFTSTP (0x32) | ||
236 | #define ESR_EL2_EC_SOFTSTP_HYP (0x33) | ||
237 | #define ESR_EL2_EC_WATCHPT (0x34) | ||
238 | #define ESR_EL2_EC_WATCHPT_HYP (0x35) | ||
239 | #define ESR_EL2_EC_BKPT32 (0x38) | ||
240 | #define ESR_EL2_EC_VECTOR32 (0x3A) | ||
241 | #define ESR_EL2_EC_BRK64 (0x3C) | ||
242 | |||
243 | #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 | ||
244 | |||
245 | #endif /* __ARM64_KVM_ARM_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h new file mode 100644 index 000000000000..c92de4163eba --- /dev/null +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM_KVM_ASM_H__ | ||
19 | #define __ARM_KVM_ASM_H__ | ||
20 | |||
21 | /* | ||
22 | * 0 is reserved as an invalid value. | ||
23 | * Order *must* be kept in sync with the hyp switch code. | ||
24 | */ | ||
25 | #define MPIDR_EL1 1 /* MultiProcessor Affinity Register */ | ||
26 | #define CSSELR_EL1 2 /* Cache Size Selection Register */ | ||
27 | #define SCTLR_EL1 3 /* System Control Register */ | ||
28 | #define ACTLR_EL1 4 /* Auxilliary Control Register */ | ||
29 | #define CPACR_EL1 5 /* Coprocessor Access Control */ | ||
30 | #define TTBR0_EL1 6 /* Translation Table Base Register 0 */ | ||
31 | #define TTBR1_EL1 7 /* Translation Table Base Register 1 */ | ||
32 | #define TCR_EL1 8 /* Translation Control Register */ | ||
33 | #define ESR_EL1 9 /* Exception Syndrome Register */ | ||
34 | #define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */ | ||
35 | #define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */ | ||
36 | #define FAR_EL1 12 /* Fault Address Register */ | ||
37 | #define MAIR_EL1 13 /* Memory Attribute Indirection Register */ | ||
38 | #define VBAR_EL1 14 /* Vector Base Address Register */ | ||
39 | #define CONTEXTIDR_EL1 15 /* Context ID Register */ | ||
40 | #define TPIDR_EL0 16 /* Thread ID, User R/W */ | ||
41 | #define TPIDRRO_EL0 17 /* Thread ID, User R/O */ | ||
42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ | ||
43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ | ||
44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ | ||
45 | /* 32bit specific registers. Keep them at the end of the range */ | ||
46 | #define DACR32_EL2 21 /* Domain Access Control Register */ | ||
47 | #define IFSR32_EL2 22 /* Instruction Fault Status Register */ | ||
48 | #define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ | ||
49 | #define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ | ||
50 | #define TEECR32_EL1 25 /* ThumbEE Configuration Register */ | ||
51 | #define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ | ||
52 | #define NR_SYS_REGS 27 | ||
53 | |||
54 | /* 32bit mapping */ | ||
55 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | ||
56 | #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ | ||
57 | #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ | ||
58 | #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ | ||
59 | #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ | ||
60 | #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ | ||
61 | #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ | ||
62 | #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ | ||
63 | #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ | ||
64 | #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ | ||
65 | #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ | ||
66 | #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ | ||
67 | #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ | ||
68 | #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ | ||
69 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ | ||
70 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ | ||
71 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ | ||
72 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ | ||
73 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ | ||
74 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ | ||
75 | #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ | ||
76 | #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ | ||
77 | #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ | ||
78 | #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ | ||
79 | #define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ | ||
80 | #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ | ||
81 | #define NR_CP15_REGS (NR_SYS_REGS * 2) | ||
82 | |||
83 | #define ARM_EXCEPTION_IRQ 0 | ||
84 | #define ARM_EXCEPTION_TRAP 1 | ||
85 | |||
86 | #ifndef __ASSEMBLY__ | ||
87 | struct kvm; | ||
88 | struct kvm_vcpu; | ||
89 | |||
90 | extern char __kvm_hyp_init[]; | ||
91 | extern char __kvm_hyp_init_end[]; | ||
92 | |||
93 | extern char __kvm_hyp_vector[]; | ||
94 | |||
95 | extern char __kvm_hyp_code_start[]; | ||
96 | extern char __kvm_hyp_code_end[]; | ||
97 | |||
98 | extern void __kvm_flush_vm_context(void); | ||
99 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | ||
100 | |||
101 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
102 | #endif | ||
103 | |||
104 | #endif /* __ARM_KVM_ASM_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h new file mode 100644 index 000000000000..9a59301cd014 --- /dev/null +++ b/arch/arm64/include/asm/kvm_coproc.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/asm/kvm_coproc.h | ||
6 | * Copyright (C) 2012 Rusty Russell IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARM64_KVM_COPROC_H__ | ||
22 | #define __ARM64_KVM_COPROC_H__ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | |||
26 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); | ||
27 | |||
28 | struct kvm_sys_reg_table { | ||
29 | const struct sys_reg_desc *table; | ||
30 | size_t num; | ||
31 | }; | ||
32 | |||
33 | struct kvm_sys_reg_target_table { | ||
34 | struct kvm_sys_reg_table table64; | ||
35 | struct kvm_sys_reg_table table32; | ||
36 | }; | ||
37 | |||
38 | void kvm_register_target_sys_reg_table(unsigned int target, | ||
39 | struct kvm_sys_reg_target_table *table); | ||
40 | |||
41 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
42 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
43 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
44 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
45 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
46 | |||
47 | #define kvm_coproc_table_init kvm_sys_reg_table_init | ||
48 | void kvm_sys_reg_table_init(void); | ||
49 | |||
50 | struct kvm_one_reg; | ||
51 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | ||
52 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
53 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
54 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); | ||
55 | |||
56 | #endif /* __ARM64_KVM_COPROC_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h new file mode 100644 index 000000000000..eec073875218 --- /dev/null +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/kvm_emulate.h | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM64_KVM_EMULATE_H__ | ||
23 | #define __ARM64_KVM_EMULATE_H__ | ||
24 | |||
25 | #include <linux/kvm_host.h> | ||
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_arm.h> | ||
28 | #include <asm/kvm_mmio.h> | ||
29 | #include <asm/ptrace.h> | ||
30 | |||
31 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); | ||
32 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); | ||
33 | |||
34 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); | ||
35 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | ||
36 | |||
37 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | ||
38 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | ||
39 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | ||
40 | |||
41 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | ||
44 | } | ||
45 | |||
46 | static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu) | ||
47 | { | ||
48 | return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; | ||
49 | } | ||
50 | |||
51 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) | ||
52 | { | ||
53 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; | ||
54 | } | ||
55 | |||
56 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) | ||
57 | { | ||
58 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); | ||
59 | } | ||
60 | |||
61 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | if (vcpu_mode_is_32bit(vcpu)) | ||
64 | return kvm_condition_valid32(vcpu); | ||
65 | |||
66 | return true; | ||
67 | } | ||
68 | |||
69 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
70 | { | ||
71 | if (vcpu_mode_is_32bit(vcpu)) | ||
72 | kvm_skip_instr32(vcpu, is_wide_instr); | ||
73 | else | ||
74 | *vcpu_pc(vcpu) += 4; | ||
75 | } | ||
76 | |||
77 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | ||
78 | { | ||
79 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; | ||
80 | } | ||
81 | |||
82 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) | ||
83 | { | ||
84 | if (vcpu_mode_is_32bit(vcpu)) | ||
85 | return vcpu_reg32(vcpu, reg_num); | ||
86 | |||
87 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | ||
88 | } | ||
89 | |||
90 | /* Get vcpu SPSR for current mode */ | ||
91 | static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | if (vcpu_mode_is_32bit(vcpu)) | ||
94 | return vcpu_spsr32(vcpu); | ||
95 | |||
96 | return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; | ||
97 | } | ||
98 | |||
99 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; | ||
102 | |||
103 | if (vcpu_mode_is_32bit(vcpu)) | ||
104 | return mode > COMPAT_PSR_MODE_USR; | ||
105 | |||
106 | return mode != PSR_MODE_EL0t; | ||
107 | } | ||
108 | |||
109 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | return vcpu->arch.fault.esr_el2; | ||
112 | } | ||
113 | |||
114 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | return vcpu->arch.fault.far_el2; | ||
117 | } | ||
118 | |||
119 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | ||
120 | { | ||
121 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; | ||
122 | } | ||
123 | |||
124 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); | ||
127 | } | ||
128 | |||
129 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | ||
130 | { | ||
131 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); | ||
132 | } | ||
133 | |||
134 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | ||
135 | { | ||
136 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); | ||
137 | } | ||
138 | |||
139 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | ||
140 | { | ||
141 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; | ||
142 | } | ||
143 | |||
144 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); | ||
147 | } | ||
148 | |||
149 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | ||
150 | { | ||
151 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); | ||
152 | } | ||
153 | |||
154 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) | ||
155 | { | ||
156 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); | ||
157 | } | ||
158 | |||
159 | /* This one is not specific to Data Abort */ | ||
160 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); | ||
163 | } | ||
164 | |||
165 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | ||
166 | { | ||
167 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; | ||
168 | } | ||
169 | |||
170 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | ||
171 | { | ||
172 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; | ||
173 | } | ||
174 | |||
175 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | ||
176 | { | ||
177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | ||
178 | } | ||
179 | |||
180 | #endif /* __ARM64_KVM_EMULATE_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h new file mode 100644 index 000000000000..644d73956864 --- /dev/null +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/asm/kvm_host.h: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM64_KVM_HOST_H__ | ||
23 | #define __ARM64_KVM_HOST_H__ | ||
24 | |||
25 | #include <asm/kvm.h> | ||
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_mmio.h> | ||
28 | |||
29 | #define KVM_MAX_VCPUS 4 | ||
30 | #define KVM_USER_MEM_SLOTS 32 | ||
31 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
32 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
33 | |||
34 | #include <kvm/arm_vgic.h> | ||
35 | #include <kvm/arm_arch_timer.h> | ||
36 | |||
37 | #define KVM_VCPU_MAX_FEATURES 2 | ||
38 | |||
39 | /* We don't currently support large pages. */ | ||
40 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | ||
41 | #define KVM_NR_PAGE_SIZES 1 | ||
42 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | ||
43 | |||
44 | struct kvm_vcpu; | ||
45 | int kvm_target_cpu(void); | ||
46 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | ||
47 | int kvm_arch_dev_ioctl_check_extension(long ext); | ||
48 | |||
49 | struct kvm_arch { | ||
50 | /* The VMID generation used for the virt. memory system */ | ||
51 | u64 vmid_gen; | ||
52 | u32 vmid; | ||
53 | |||
54 | /* 1-level 2nd stage table and lock */ | ||
55 | spinlock_t pgd_lock; | ||
56 | pgd_t *pgd; | ||
57 | |||
58 | /* VTTBR value associated with above pgd and vmid */ | ||
59 | u64 vttbr; | ||
60 | |||
61 | /* Interrupt controller */ | ||
62 | struct vgic_dist vgic; | ||
63 | |||
64 | /* Timer */ | ||
65 | struct arch_timer_kvm timer; | ||
66 | }; | ||
67 | |||
68 | #define KVM_NR_MEM_OBJS 40 | ||
69 | |||
70 | /* | ||
71 | * We don't want allocation failures within the mmu code, so we preallocate | ||
72 | * enough memory for a single page fault in a cache. | ||
73 | */ | ||
74 | struct kvm_mmu_memory_cache { | ||
75 | int nobjs; | ||
76 | void *objects[KVM_NR_MEM_OBJS]; | ||
77 | }; | ||
78 | |||
79 | struct kvm_vcpu_fault_info { | ||
80 | u32 esr_el2; /* Hyp Syndrom Register */ | ||
81 | u64 far_el2; /* Hyp Fault Address Register */ | ||
82 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | ||
83 | }; | ||
84 | |||
85 | struct kvm_cpu_context { | ||
86 | struct kvm_regs gp_regs; | ||
87 | union { | ||
88 | u64 sys_regs[NR_SYS_REGS]; | ||
89 | u32 cp15[NR_CP15_REGS]; | ||
90 | }; | ||
91 | }; | ||
92 | |||
93 | typedef struct kvm_cpu_context kvm_cpu_context_t; | ||
94 | |||
95 | struct kvm_vcpu_arch { | ||
96 | struct kvm_cpu_context ctxt; | ||
97 | |||
98 | /* HYP configuration */ | ||
99 | u64 hcr_el2; | ||
100 | |||
101 | /* Exception Information */ | ||
102 | struct kvm_vcpu_fault_info fault; | ||
103 | |||
104 | /* Pointer to host CPU context */ | ||
105 | kvm_cpu_context_t *host_cpu_context; | ||
106 | |||
107 | /* VGIC state */ | ||
108 | struct vgic_cpu vgic_cpu; | ||
109 | struct arch_timer_cpu timer_cpu; | ||
110 | |||
111 | /* | ||
112 | * Anything that is not used directly from assembly code goes | ||
113 | * here. | ||
114 | */ | ||
115 | /* dcache set/way operation pending */ | ||
116 | int last_pcpu; | ||
117 | cpumask_t require_dcache_flush; | ||
118 | |||
119 | /* Don't run the guest */ | ||
120 | bool pause; | ||
121 | |||
122 | /* IO related fields */ | ||
123 | struct kvm_decode mmio_decode; | ||
124 | |||
125 | /* Interrupt related fields */ | ||
126 | u64 irq_lines; /* IRQ and FIQ levels */ | ||
127 | |||
128 | /* Cache some mmu pages needed inside spinlock regions */ | ||
129 | struct kvm_mmu_memory_cache mmu_page_cache; | ||
130 | |||
131 | /* Target CPU and feature flags */ | ||
132 | u32 target; | ||
133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | ||
134 | |||
135 | /* Detect first run of a vcpu */ | ||
136 | bool has_run_once; | ||
137 | }; | ||
138 | |||
139 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | ||
140 | #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) | ||
141 | #define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) | ||
142 | |||
143 | struct kvm_vm_stat { | ||
144 | u32 remote_tlb_flush; | ||
145 | }; | ||
146 | |||
147 | struct kvm_vcpu_stat { | ||
148 | u32 halt_wakeup; | ||
149 | }; | ||
150 | |||
151 | struct kvm_vcpu_init; | ||
152 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
153 | const struct kvm_vcpu_init *init); | ||
154 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | ||
155 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | ||
156 | struct kvm_one_reg; | ||
157 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | ||
158 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | ||
159 | |||
160 | #define KVM_ARCH_WANT_MMU_NOTIFIER | ||
161 | struct kvm; | ||
162 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
163 | int kvm_unmap_hva_range(struct kvm *kvm, | ||
164 | unsigned long start, unsigned long end); | ||
165 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
166 | |||
167 | /* We do not have shadow page tables, hence the empty hooks */ | ||
168 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) | ||
169 | { | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
174 | { | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | ||
179 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | ||
180 | |||
181 | u64 kvm_call_hyp(void *hypfn, ...); | ||
182 | |||
183 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
184 | int exception_index); | ||
185 | |||
186 | int kvm_perf_init(void); | ||
187 | int kvm_perf_teardown(void); | ||
188 | |||
189 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, | ||
190 | phys_addr_t pgd_ptr, | ||
191 | unsigned long hyp_stack_ptr, | ||
192 | unsigned long vector_ptr) | ||
193 | { | ||
194 | /* | ||
195 | * Call initialization code, and switch to the full blown | ||
196 | * HYP code. | ||
197 | */ | ||
198 | kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, | ||
199 | hyp_stack_ptr, vector_ptr); | ||
200 | } | ||
201 | |||
202 | #endif /* __ARM64_KVM_HOST_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h new file mode 100644 index 000000000000..fc2f689c0694 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmio.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_MMIO_H__ | ||
19 | #define __ARM64_KVM_MMIO_H__ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <asm/kvm_asm.h> | ||
23 | #include <asm/kvm_arm.h> | ||
24 | |||
25 | /* | ||
26 | * This is annoying. The mmio code requires this, even if we don't | ||
27 | * need any decoding. To be fixed. | ||
28 | */ | ||
29 | struct kvm_decode { | ||
30 | unsigned long rt; | ||
31 | bool sign_extend; | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * The in-kernel MMIO emulation code wants to use a copy of run->mmio, | ||
36 | * which is an anonymous type. Use our own type instead. | ||
37 | */ | ||
38 | struct kvm_exit_mmio { | ||
39 | phys_addr_t phys_addr; | ||
40 | u8 data[8]; | ||
41 | u32 len; | ||
42 | bool is_write; | ||
43 | }; | ||
44 | |||
45 | static inline void kvm_prepare_mmio(struct kvm_run *run, | ||
46 | struct kvm_exit_mmio *mmio) | ||
47 | { | ||
48 | run->mmio.phys_addr = mmio->phys_addr; | ||
49 | run->mmio.len = mmio->len; | ||
50 | run->mmio.is_write = mmio->is_write; | ||
51 | memcpy(run->mmio.data, mmio->data, mmio->len); | ||
52 | run->exit_reason = KVM_EXIT_MMIO; | ||
53 | } | ||
54 | |||
55 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
56 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
57 | phys_addr_t fault_ipa); | ||
58 | |||
59 | #endif /* __ARM64_KVM_MMIO_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..efe609c6a3c9 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_MMU_H__ | ||
19 | #define __ARM64_KVM_MMU_H__ | ||
20 | |||
21 | #include <asm/page.h> | ||
22 | #include <asm/memory.h> | ||
23 | |||
24 | /* | ||
25 | * As we only have the TTBR0_EL2 register, we cannot express | ||
26 | * "negative" addresses. This makes it impossible to directly share | ||
27 | * mappings with the kernel. | ||
28 | * | ||
29 | * Instead, give the HYP mode its own VA region at a fixed offset from | ||
30 | * the kernel by just masking the top bits (which are all ones for a | ||
31 | * kernel address). | ||
32 | */ | ||
33 | #define HYP_PAGE_OFFSET_SHIFT VA_BITS | ||
34 | #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) | ||
35 | #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) | ||
36 | |||
37 | /* | ||
38 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be | ||
39 | * shared across all the page-tables. Conveniently, we use the last | ||
40 | * possible page, where no kernel mapping will ever exist. | ||
41 | */ | ||
42 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) | ||
43 | |||
44 | #ifdef __ASSEMBLY__ | ||
45 | |||
46 | /* | ||
47 | * Convert a kernel VA into a HYP VA. | ||
48 | * reg: VA to be converted. | ||
49 | */ | ||
50 | .macro kern_hyp_va reg | ||
51 | and \reg, \reg, #HYP_PAGE_OFFSET_MASK | ||
52 | .endm | ||
53 | |||
54 | #else | ||
55 | |||
56 | #include <asm/cachetype.h> | ||
57 | #include <asm/cacheflush.h> | ||
58 | |||
59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | ||
60 | |||
61 | /* | ||
62 | * Align KVM with the kernel's view of physical memory. Should be | ||
63 | * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. | ||
64 | */ | ||
65 | #define KVM_PHYS_SHIFT PHYS_MASK_SHIFT | ||
66 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | ||
67 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | ||
68 | |||
69 | /* Make sure we get the right size, and thus the right alignment */ | ||
70 | #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT)) | ||
71 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | ||
72 | |||
73 | int create_hyp_mappings(void *from, void *to); | ||
74 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | ||
75 | void free_boot_hyp_pgd(void); | ||
76 | void free_hyp_pgds(void); | ||
77 | |||
78 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | ||
79 | void kvm_free_stage2_pgd(struct kvm *kvm); | ||
80 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | ||
81 | phys_addr_t pa, unsigned long size); | ||
82 | |||
83 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
84 | |||
85 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | ||
86 | |||
87 | phys_addr_t kvm_mmu_get_httbr(void); | ||
88 | phys_addr_t kvm_mmu_get_boot_httbr(void); | ||
89 | phys_addr_t kvm_get_idmap_vector(void); | ||
90 | int kvm_mmu_init(void); | ||
91 | void kvm_clear_hyp_idmap(void); | ||
92 | |||
93 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | ||
94 | |||
95 | static inline bool kvm_is_write_fault(unsigned long esr) | ||
96 | { | ||
97 | unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; | ||
98 | |||
99 | if (esr_ec == ESR_EL2_EC_IABT) | ||
100 | return false; | ||
101 | |||
102 | if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) | ||
103 | return false; | ||
104 | |||
105 | return true; | ||
106 | } | ||
107 | |||
108 | static inline void kvm_clean_dcache_area(void *addr, size_t size) {} | ||
109 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | ||
110 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | ||
111 | static inline void kvm_clean_pte(pte_t *pte) {} | ||
112 | static inline void kvm_clean_pte_entry(pte_t *pte) {} | ||
113 | |||
114 | static inline void kvm_set_s2pte_writable(pte_t *pte) | ||
115 | { | ||
116 | pte_val(*pte) |= PTE_S2_RDWR; | ||
117 | } | ||
118 | |||
119 | struct kvm; | ||
120 | |||
121 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
122 | { | ||
123 | if (!icache_is_aliasing()) { /* PIPT */ | ||
124 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
125 | flush_icache_range(hva, hva + PAGE_SIZE); | ||
126 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ | ||
127 | /* any kind of VIPT cache */ | ||
128 | __flush_icache_all(); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) | ||
133 | |||
134 | #endif /* __ASSEMBLY__ */ | ||
135 | #endif /* __ARM64_KVM_MMU_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h new file mode 100644 index 000000000000..e301a4816355 --- /dev/null +++ b/arch/arm64/include/asm/kvm_psci.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_PSCI_H__ | ||
19 | #define __ARM64_KVM_PSCI_H__ | ||
20 | |||
21 | bool kvm_psci_call(struct kvm_vcpu *vcpu); | ||
22 | |||
23 | #endif /* __ARM64_KVM_PSCI_H__ */ | ||
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 381f556b664e..20925bcf4e2a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -90,6 +90,12 @@ | |||
90 | #define MT_NORMAL_NC 3 | 90 | #define MT_NORMAL_NC 3 |
91 | #define MT_NORMAL 4 | 91 | #define MT_NORMAL 4 |
92 | 92 | ||
93 | /* | ||
94 | * Memory types for Stage-2 translation | ||
95 | */ | ||
96 | #define MT_S2_NORMAL 0xf | ||
97 | #define MT_S2_DEVICE_nGnRE 0x1 | ||
98 | |||
93 | #ifndef __ASSEMBLY__ | 99 | #ifndef __ASSEMBLY__ |
94 | 100 | ||
95 | extern phys_addr_t memstart_addr; | 101 | extern phys_addr_t memstart_addr; |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index e2bc385adb6b..a9eee33dfa62 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -151,12 +151,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
151 | { | 151 | { |
152 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
153 | 153 | ||
154 | #ifdef CONFIG_SMP | ||
155 | /* check for possible thread migration */ | ||
156 | if (!cpumask_empty(mm_cpumask(next)) && | ||
157 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | ||
158 | __flush_icache_all(); | ||
159 | #endif | ||
160 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 154 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
161 | check_and_switch_context(next, tsk); | 155 | check_and_switch_context(next, tsk); |
162 | } | 156 | } |
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 75fd13d289b9..e182a356c979 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -25,16 +25,27 @@ | |||
25 | /* | 25 | /* |
26 | * Hardware page table definitions. | 26 | * Hardware page table definitions. |
27 | * | 27 | * |
28 | * Level 1 descriptor (PUD). | ||
29 | */ | ||
30 | |||
31 | #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) | ||
32 | |||
33 | /* | ||
28 | * Level 2 descriptor (PMD). | 34 | * Level 2 descriptor (PMD). |
29 | */ | 35 | */ |
30 | #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) | 36 | #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) |
31 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) | 37 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) |
32 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) | 38 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) |
33 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) | 39 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) |
40 | #define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) | ||
34 | 41 | ||
35 | /* | 42 | /* |
36 | * Section | 43 | * Section |
37 | */ | 44 | */ |
45 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) | ||
46 | #define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) | ||
47 | #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ | ||
48 | #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ | ||
38 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 49 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
39 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) | 50 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) |
40 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) | 51 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) |
@@ -53,6 +64,7 @@ | |||
53 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) | 64 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) |
54 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) | 65 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) |
55 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) | 66 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) |
67 | #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) | ||
56 | #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ | 68 | #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ |
57 | #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ | 69 | #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ |
58 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | 70 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ |
@@ -68,6 +80,24 @@ | |||
68 | #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) | 80 | #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) |
69 | 81 | ||
70 | /* | 82 | /* |
83 | * 2nd stage PTE definitions | ||
84 | */ | ||
85 | #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ | ||
86 | #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ | ||
87 | |||
88 | /* | ||
89 | * Memory Attribute override for Stage-2 (MemAttr[3:0]) | ||
90 | */ | ||
91 | #define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2) | ||
92 | #define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2) | ||
93 | |||
94 | /* | ||
95 | * EL2/HYP PTE/PMD definitions | ||
96 | */ | ||
97 | #define PMD_HYP PMD_SECT_USER | ||
98 | #define PTE_HYP PTE_USER | ||
99 | |||
100 | /* | ||
71 | * 40-bit physical address supported. | 101 | * 40-bit physical address supported. |
72 | */ | 102 | */ |
73 | #define PHYS_MASK_SHIFT (40) | 103 | #define PHYS_MASK_SHIFT (40) |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3a768e96cf0e..f0bebc5e22cd 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -25,8 +25,8 @@ | |||
25 | * Software defined PTE bits definition. | 25 | * Software defined PTE bits definition. |
26 | */ | 26 | */ |
27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) | 27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ | 28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ |
29 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ | 29 | #define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ |
30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
32 | 32 | ||
@@ -66,7 +66,7 @@ extern pgprot_t pgprot_default; | |||
66 | 66 | ||
67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) | 67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) |
68 | 68 | ||
69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) | 69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) | 71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
@@ -76,7 +76,13 @@ extern pgprot_t pgprot_default; | |||
76 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) | 76 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) |
77 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) | 77 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) |
78 | 78 | ||
79 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) | 79 | #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) |
80 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | ||
81 | |||
82 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | ||
83 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) | ||
84 | |||
85 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | ||
80 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 86 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
81 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 87 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
82 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 88 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
@@ -119,7 +125,7 @@ extern struct page *empty_zero_page; | |||
119 | #define pte_none(pte) (!pte_val(pte)) | 125 | #define pte_none(pte) (!pte_val(pte)) |
120 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | 126 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) |
121 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | 127 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
122 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | 128 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) |
123 | 129 | ||
124 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 130 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
125 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | 131 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) |
@@ -173,12 +179,76 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
173 | /* | 179 | /* |
174 | * Huge pte definitions. | 180 | * Huge pte definitions. |
175 | */ | 181 | */ |
176 | #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) | 182 | #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
177 | #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) | 183 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) |
184 | |||
185 | /* | ||
186 | * Hugetlb definitions. | ||
187 | */ | ||
188 | #define HUGE_MAX_HSTATE 2 | ||
189 | #define HPAGE_SHIFT PMD_SHIFT | ||
190 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
191 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
192 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
178 | 193 | ||
179 | #define __HAVE_ARCH_PTE_SPECIAL | 194 | #define __HAVE_ARCH_PTE_SPECIAL |
180 | 195 | ||
181 | /* | 196 | /* |
197 | * Software PMD bits for THP | ||
198 | */ | ||
199 | |||
200 | #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | ||
201 | #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) | ||
202 | |||
203 | /* | ||
204 | * THP definitions. | ||
205 | */ | ||
206 | #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) | ||
207 | |||
208 | #define __HAVE_ARCH_PMD_WRITE | ||
209 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | ||
210 | |||
211 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
212 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | ||
213 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | ||
214 | #endif | ||
215 | |||
216 | #define PMD_BIT_FUNC(fn,op) \ | ||
217 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | ||
218 | |||
219 | PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); | ||
220 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); | ||
221 | PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); | ||
222 | PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); | ||
223 | PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); | ||
224 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | ||
225 | PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); | ||
226 | |||
227 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | ||
228 | |||
229 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | ||
230 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
231 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | ||
232 | |||
233 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) | ||
234 | |||
235 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
236 | { | ||
237 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN | | ||
238 | PMD_SECT_RDONLY | PMD_SECT_PROT_NONE | | ||
239 | PMD_SECT_VALID; | ||
240 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); | ||
241 | return pmd; | ||
242 | } | ||
243 | |||
244 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) | ||
245 | |||
246 | static inline int has_transparent_hugepage(void) | ||
247 | { | ||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | /* | ||
182 | * Mark the prot value as uncacheable and unbufferable. | 252 | * Mark the prot value as uncacheable and unbufferable. |
183 | */ | 253 | */ |
184 | #define pgprot_noncached(prot) \ | 254 | #define pgprot_noncached(prot) \ |
@@ -197,6 +267,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
197 | 267 | ||
198 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | 268 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) |
199 | 269 | ||
270 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | ||
271 | PMD_TYPE_TABLE) | ||
272 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | ||
273 | PMD_TYPE_SECT) | ||
274 | |||
275 | |||
200 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | 276 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
201 | { | 277 | { |
202 | *pmdp = pmd; | 278 | *pmdp = pmd; |
@@ -263,7 +339,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
263 | #endif | 339 | #endif |
264 | 340 | ||
265 | /* Find an entry in the third-level page table.. */ | 341 | /* Find an entry in the third-level page table.. */ |
266 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 342 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
267 | 343 | ||
268 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 344 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
269 | { | 345 | { |
@@ -281,12 +357,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |||
281 | 357 | ||
282 | /* | 358 | /* |
283 | * Encode and decode a swap entry: | 359 | * Encode and decode a swap entry: |
284 | * bits 0-1: present (must be zero) | 360 | * bits 0, 2: present (must both be zero) |
285 | * bit 2: PTE_FILE | 361 | * bit 3: PTE_FILE |
286 | * bits 3-8: swap type | 362 | * bits 4-8: swap type |
287 | * bits 9-63: swap offset | 363 | * bits 9-63: swap offset |
288 | */ | 364 | */ |
289 | #define __SWP_TYPE_SHIFT 3 | 365 | #define __SWP_TYPE_SHIFT 4 |
290 | #define __SWP_TYPE_BITS 6 | 366 | #define __SWP_TYPE_BITS 6 |
291 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) | 367 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
292 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | 368 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
@@ -306,15 +382,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |||
306 | 382 | ||
307 | /* | 383 | /* |
308 | * Encode and decode a file entry: | 384 | * Encode and decode a file entry: |
309 | * bits 0-1: present (must be zero) | 385 | * bits 0, 2: present (must both be zero) |
310 | * bit 2: PTE_FILE | 386 | * bit 3: PTE_FILE |
311 | * bits 3-63: file offset / PAGE_SIZE | 387 | * bits 4-63: file offset / PAGE_SIZE |
312 | */ | 388 | */ |
313 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) | 389 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) |
314 | #define pte_to_pgoff(x) (pte_val(x) >> 3) | 390 | #define pte_to_pgoff(x) (pte_val(x) >> 4) |
315 | #define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) | 391 | #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) |
316 | 392 | ||
317 | #define PTE_FILE_MAX_BITS 61 | 393 | #define PTE_FILE_MAX_BITS 60 |
318 | 394 | ||
319 | extern int kern_addr_valid(unsigned long addr); | 395 | extern int kern_addr_valid(unsigned long addr); |
320 | 396 | ||
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41a71ee4c3df..0dacbbf9458b 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -171,7 +171,5 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
171 | #define profile_pc(regs) instruction_pointer(regs) | 171 | #define profile_pc(regs) instruction_pointer(regs) |
172 | #endif | 172 | #endif |
173 | 173 | ||
174 | extern int aarch32_break_trap(struct pt_regs *regs); | ||
175 | |||
176 | #endif /* __ASSEMBLY__ */ | 174 | #endif /* __ASSEMBLY__ */ |
177 | #endif | 175 | #endif |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 7065e920149d..0defa0728a9b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
59 | unsigned int tmp; | 59 | unsigned int tmp; |
60 | 60 | ||
61 | asm volatile( | 61 | asm volatile( |
62 | " ldaxr %w0, %1\n" | 62 | "2: ldaxr %w0, %1\n" |
63 | " cbnz %w0, 1f\n" | 63 | " cbnz %w0, 1f\n" |
64 | " stxr %w0, %w2, %1\n" | 64 | " stxr %w0, %w2, %1\n" |
65 | " cbnz %w0, 2b\n" | ||
65 | "1:\n" | 66 | "1:\n" |
66 | : "=&r" (tmp), "+Q" (lock->lock) | 67 | : "=&r" (tmp), "+Q" (lock->lock) |
67 | : "r" (1) | 68 | : "r" (1) |
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h new file mode 100644 index 000000000000..8da0bf4f7659 --- /dev/null +++ b/arch/arm64/include/asm/sync_bitops.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef __ASM_SYNC_BITOPS_H__ | ||
2 | #define __ASM_SYNC_BITOPS_H__ | ||
3 | |||
4 | #include <asm/bitops.h> | ||
5 | #include <asm/cmpxchg.h> | ||
6 | |||
7 | /* sync_bitops functions are equivalent to the SMP implementation of the | ||
8 | * original functions, independently from CONFIG_SMP being defined. | ||
9 | * | ||
10 | * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But | ||
11 | * under Xen you might be communicating with a completely external entity | ||
12 | * who might be on another CPU (e.g. two uniprocessor guests communicating | ||
13 | * via event channels and grant tables). So we need a variant of the bit | ||
14 | * ops which are SMP safe even on a UP kernel. | ||
15 | */ | ||
16 | |||
17 | #define sync_set_bit(nr, p) set_bit(nr, p) | ||
18 | #define sync_clear_bit(nr, p) clear_bit(nr, p) | ||
19 | #define sync_change_bit(nr, p) change_bit(nr, p) | ||
20 | #define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) | ||
21 | #define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) | ||
22 | #define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) | ||
23 | #define sync_test_bit(nr, addr) test_bit(nr, addr) | ||
24 | #define sync_cmpxchg cmpxchg | ||
25 | |||
26 | #endif | ||
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h index b24a31a7e2c9..81a076eb37fa 100644 --- a/arch/arm64/include/asm/timex.h +++ b/arch/arm64/include/asm/timex.h | |||
@@ -16,14 +16,14 @@ | |||
16 | #ifndef __ASM_TIMEX_H | 16 | #ifndef __ASM_TIMEX_H |
17 | #define __ASM_TIMEX_H | 17 | #define __ASM_TIMEX_H |
18 | 18 | ||
19 | #include <asm/arch_timer.h> | ||
20 | |||
19 | /* | 21 | /* |
20 | * Use the current timer as a cycle counter since this is what we use for | 22 | * Use the current timer as a cycle counter since this is what we use for |
21 | * the delay loop. | 23 | * the delay loop. |
22 | */ | 24 | */ |
23 | #define get_cycles() ({ cycles_t c; read_current_timer(&c); c; }) | 25 | #define get_cycles() arch_counter_get_cntvct() |
24 | 26 | ||
25 | #include <asm-generic/timex.h> | 27 | #include <asm-generic/timex.h> |
26 | 28 | ||
27 | #define ARCH_HAS_READ_CURRENT_TIMER | ||
28 | |||
29 | #endif | 29 | #endif |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 654f0968030b..46b3beb4b773 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -187,4 +187,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
187 | 187 | ||
188 | #define tlb_migrate_finish(mm) do { } while (0) | 188 | #define tlb_migrate_finish(mm) do { } while (0) |
189 | 189 | ||
190 | static inline void | ||
191 | tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) | ||
192 | { | ||
193 | tlb_add_flush(tlb, addr); | ||
194 | } | ||
195 | |||
190 | #endif | 196 | #endif |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 122d6320f745..8b482035cfc2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -117,6 +117,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
117 | dsb(); | 117 | dsb(); |
118 | } | 118 | } |
119 | 119 | ||
120 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
121 | |||
120 | #endif | 122 | #endif |
121 | 123 | ||
122 | #endif | 124 | #endif |
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h new file mode 100644 index 000000000000..86553213c132 --- /dev/null +++ b/arch/arm64/include/asm/xen/events.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_ARM64_XEN_EVENTS_H | ||
2 | #define _ASM_ARM64_XEN_EVENTS_H | ||
3 | |||
4 | #include <asm/ptrace.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | enum ipi_vector { | ||
8 | XEN_PLACEHOLDER_VECTOR, | ||
9 | |||
10 | /* Xen IPIs go here */ | ||
11 | XEN_NR_IPIS, | ||
12 | }; | ||
13 | |||
14 | static inline int xen_irqs_disabled(struct pt_regs *regs) | ||
15 | { | ||
16 | return raw_irqs_disabled_flags((unsigned long) regs->pstate); | ||
17 | } | ||
18 | |||
19 | #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) | ||
20 | |||
21 | #endif /* _ASM_ARM64_XEN_EVENTS_H */ | ||
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h new file mode 100644 index 000000000000..74b0c423ff5b --- /dev/null +++ b/arch/arm64/include/asm/xen/hypercall.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/hypercall.h> | |||
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h new file mode 100644 index 000000000000..f263da8e8769 --- /dev/null +++ b/arch/arm64/include/asm/xen/hypervisor.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/hypervisor.h> | |||
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h new file mode 100644 index 000000000000..44457aebeed4 --- /dev/null +++ b/arch/arm64/include/asm/xen/interface.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/interface.h> | |||
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h new file mode 100644 index 000000000000..bed87ec36780 --- /dev/null +++ b/arch/arm64/include/asm/xen/page.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/page.h> | |||
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..5031f4263937 --- /dev/null +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/uapi/asm/kvm.h: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM_KVM_H__ | ||
23 | #define __ARM_KVM_H__ | ||
24 | |||
25 | #define KVM_SPSR_EL1 0 | ||
26 | #define KVM_SPSR_SVC KVM_SPSR_EL1 | ||
27 | #define KVM_SPSR_ABT 1 | ||
28 | #define KVM_SPSR_UND 2 | ||
29 | #define KVM_SPSR_IRQ 3 | ||
30 | #define KVM_SPSR_FIQ 4 | ||
31 | #define KVM_NR_SPSR 5 | ||
32 | |||
33 | #ifndef __ASSEMBLY__ | ||
34 | #include <asm/types.h> | ||
35 | #include <asm/ptrace.h> | ||
36 | |||
37 | #define __KVM_HAVE_GUEST_DEBUG | ||
38 | #define __KVM_HAVE_IRQ_LINE | ||
39 | |||
40 | #define KVM_REG_SIZE(id) \ | ||
41 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | ||
42 | |||
43 | struct kvm_regs { | ||
44 | struct user_pt_regs regs; /* sp = sp_el0 */ | ||
45 | |||
46 | __u64 sp_el1; | ||
47 | __u64 elr_el1; | ||
48 | |||
49 | __u64 spsr[KVM_NR_SPSR]; | ||
50 | |||
51 | struct user_fpsimd_state fp_regs; | ||
52 | }; | ||
53 | |||
54 | /* Supported Processor Types */ | ||
55 | #define KVM_ARM_TARGET_AEM_V8 0 | ||
56 | #define KVM_ARM_TARGET_FOUNDATION_V8 1 | ||
57 | #define KVM_ARM_TARGET_CORTEX_A57 2 | ||
58 | |||
59 | #define KVM_ARM_NUM_TARGETS 3 | ||
60 | |||
61 | /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ | ||
62 | #define KVM_ARM_DEVICE_TYPE_SHIFT 0 | ||
63 | #define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) | ||
64 | #define KVM_ARM_DEVICE_ID_SHIFT 16 | ||
65 | #define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) | ||
66 | |||
67 | /* Supported device IDs */ | ||
68 | #define KVM_ARM_DEVICE_VGIC_V2 0 | ||
69 | |||
70 | /* Supported VGIC address types */ | ||
71 | #define KVM_VGIC_V2_ADDR_TYPE_DIST 0 | ||
72 | #define KVM_VGIC_V2_ADDR_TYPE_CPU 1 | ||
73 | |||
74 | #define KVM_VGIC_V2_DIST_SIZE 0x1000 | ||
75 | #define KVM_VGIC_V2_CPU_SIZE 0x2000 | ||
76 | |||
77 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ | ||
78 | #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ | ||
79 | |||
80 | struct kvm_vcpu_init { | ||
81 | __u32 target; | ||
82 | __u32 features[7]; | ||
83 | }; | ||
84 | |||
85 | struct kvm_sregs { | ||
86 | }; | ||
87 | |||
88 | struct kvm_fpu { | ||
89 | }; | ||
90 | |||
91 | struct kvm_guest_debug_arch { | ||
92 | }; | ||
93 | |||
94 | struct kvm_debug_exit_arch { | ||
95 | }; | ||
96 | |||
97 | struct kvm_sync_regs { | ||
98 | }; | ||
99 | |||
100 | struct kvm_arch_memory_slot { | ||
101 | }; | ||
102 | |||
103 | /* If you need to interpret the index values, here is the key: */ | ||
104 | #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 | ||
105 | #define KVM_REG_ARM_COPROC_SHIFT 16 | ||
106 | |||
107 | /* Normal registers are mapped as coprocessor 16. */ | ||
108 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) | ||
109 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32)) | ||
110 | |||
111 | /* Some registers need more space to represent values. */ | ||
112 | #define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) | ||
113 | #define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 | ||
114 | #define KVM_REG_ARM_DEMUX_ID_SHIFT 8 | ||
115 | #define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) | ||
116 | #define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF | ||
117 | #define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 | ||
118 | |||
119 | /* AArch64 system registers */ | ||
120 | #define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT) | ||
121 | #define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000 | ||
122 | #define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14 | ||
123 | #define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800 | ||
124 | #define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11 | ||
125 | #define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780 | ||
126 | #define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7 | ||
127 | #define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078 | ||
128 | #define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3 | ||
129 | #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 | ||
130 | #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 | ||
131 | |||
132 | /* KVM_IRQ_LINE irq field index values */ | ||
133 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | ||
134 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | ||
135 | #define KVM_ARM_IRQ_VCPU_SHIFT 16 | ||
136 | #define KVM_ARM_IRQ_VCPU_MASK 0xff | ||
137 | #define KVM_ARM_IRQ_NUM_SHIFT 0 | ||
138 | #define KVM_ARM_IRQ_NUM_MASK 0xffff | ||
139 | |||
140 | /* irq_type field */ | ||
141 | #define KVM_ARM_IRQ_TYPE_CPU 0 | ||
142 | #define KVM_ARM_IRQ_TYPE_SPI 1 | ||
143 | #define KVM_ARM_IRQ_TYPE_PPI 2 | ||
144 | |||
145 | /* out-of-kernel GIC cpu interrupt injection irq_number field */ | ||
146 | #define KVM_ARM_IRQ_CPU_IRQ 0 | ||
147 | #define KVM_ARM_IRQ_CPU_FIQ 1 | ||
148 | |||
149 | /* Highest supported SPI, from VGIC_NR_IRQS */ | ||
150 | #define KVM_ARM_IRQ_GIC_MAX 127 | ||
151 | |||
152 | /* PSCI interface */ | ||
153 | #define KVM_PSCI_FN_BASE 0x95c1ba5e | ||
154 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) | ||
155 | |||
156 | #define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) | ||
157 | #define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) | ||
158 | #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) | ||
159 | #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) | ||
160 | |||
161 | #define KVM_PSCI_RET_SUCCESS 0 | ||
162 | #define KVM_PSCI_RET_NI ((unsigned long)-1) | ||
163 | #define KVM_PSCI_RET_INVAL ((unsigned long)-2) | ||
164 | #define KVM_PSCI_RET_DENIED ((unsigned long)-3) | ||
165 | |||
166 | #endif | ||
167 | |||
168 | #endif /* __ARM_KVM_H__ */ | ||