diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2012-12-10 10:35:24 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2013-06-07 09:03:35 -0400 |
commit | 37c437532b0126d1df5685080db9cecf3d918175 (patch) | |
tree | c8251c79af40f2b6c827edc5b1589e2092431fde /arch/arm64 | |
parent | aa8eff9bfbd531e0fcc8e68052f4ac545cd004c5 (diff) |
arm64: KVM: architecture specific MMU backend
Define the arm64 specific MMU backend:
- HYP/kernel VA offset
- S2 4/64kB definitions
- S2 page table populating and flushing
- icache cleaning
Reviewed-by: Christopher Covington <cov@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 135 |
1 files changed, 135 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..efe609c6a3c9 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_MMU_H__ | ||
19 | #define __ARM64_KVM_MMU_H__ | ||
20 | |||
21 | #include <asm/page.h> | ||
22 | #include <asm/memory.h> | ||
23 | |||
24 | /* | ||
25 | * As we only have the TTBR0_EL2 register, we cannot express | ||
26 | * "negative" addresses. This makes it impossible to directly share | ||
27 | * mappings with the kernel. | ||
28 | * | ||
29 | * Instead, give the HYP mode its own VA region at a fixed offset from | ||
30 | * the kernel by just masking the top bits (which are all ones for a | ||
31 | * kernel address). | ||
32 | */ | ||
33 | #define HYP_PAGE_OFFSET_SHIFT VA_BITS | ||
34 | #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) | ||
35 | #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) | ||
36 | |||
37 | /* | ||
38 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be | ||
39 | * shared across all the page-tables. Conveniently, we use the last | ||
40 | * possible page, where no kernel mapping will ever exist. | ||
41 | */ | ||
42 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) | ||
43 | |||
44 | #ifdef __ASSEMBLY__ | ||
45 | |||
46 | /* | ||
47 | * Convert a kernel VA into a HYP VA. | ||
48 | * reg: VA to be converted. | ||
49 | */ | ||
50 | .macro kern_hyp_va reg | ||
51 | and \reg, \reg, #HYP_PAGE_OFFSET_MASK | ||
52 | .endm | ||
53 | |||
54 | #else | ||
55 | |||
56 | #include <asm/cachetype.h> | ||
57 | #include <asm/cacheflush.h> | ||
58 | |||
59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | ||
60 | |||
61 | /* | ||
62 | * Align KVM with the kernel's view of physical memory. Should be | ||
63 | * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. | ||
64 | */ | ||
65 | #define KVM_PHYS_SHIFT PHYS_MASK_SHIFT | ||
66 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | ||
67 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | ||
68 | |||
69 | /* Make sure we get the right size, and thus the right alignment */ | ||
70 | #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT)) | ||
71 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | ||
72 | |||
73 | int create_hyp_mappings(void *from, void *to); | ||
74 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | ||
75 | void free_boot_hyp_pgd(void); | ||
76 | void free_hyp_pgds(void); | ||
77 | |||
78 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | ||
79 | void kvm_free_stage2_pgd(struct kvm *kvm); | ||
80 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | ||
81 | phys_addr_t pa, unsigned long size); | ||
82 | |||
83 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
84 | |||
85 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | ||
86 | |||
87 | phys_addr_t kvm_mmu_get_httbr(void); | ||
88 | phys_addr_t kvm_mmu_get_boot_httbr(void); | ||
89 | phys_addr_t kvm_get_idmap_vector(void); | ||
90 | int kvm_mmu_init(void); | ||
91 | void kvm_clear_hyp_idmap(void); | ||
92 | |||
93 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | ||
94 | |||
95 | static inline bool kvm_is_write_fault(unsigned long esr) | ||
96 | { | ||
97 | unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; | ||
98 | |||
99 | if (esr_ec == ESR_EL2_EC_IABT) | ||
100 | return false; | ||
101 | |||
102 | if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) | ||
103 | return false; | ||
104 | |||
105 | return true; | ||
106 | } | ||
107 | |||
108 | static inline void kvm_clean_dcache_area(void *addr, size_t size) {} | ||
109 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | ||
110 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | ||
111 | static inline void kvm_clean_pte(pte_t *pte) {} | ||
112 | static inline void kvm_clean_pte_entry(pte_t *pte) {} | ||
113 | |||
114 | static inline void kvm_set_s2pte_writable(pte_t *pte) | ||
115 | { | ||
116 | pte_val(*pte) |= PTE_S2_RDWR; | ||
117 | } | ||
118 | |||
119 | struct kvm; | ||
120 | |||
121 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
122 | { | ||
123 | if (!icache_is_aliasing()) { /* PIPT */ | ||
124 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
125 | flush_icache_range(hva, hva + PAGE_SIZE); | ||
126 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ | ||
127 | /* any kind of VIPT cache */ | ||
128 | __flush_icache_all(); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) | ||
133 | |||
134 | #endif /* __ASSEMBLY__ */ | ||
135 | #endif /* __ARM64_KVM_MMU_H__ */ | ||