aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-10-10 06:14:28 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-10-14 08:48:19 -0400
commit38f791a4e499792eeb2a3c0572dd5133511c5bbb (patch)
tree782fd34d0980d623fb1cfe7e026915e25983f12f /arch/arm64
parent8eef91239e57d2e932e7470879c9a504d5494ebb (diff)
arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2
This patch adds the necessary support for all host kernel PGSIZE and VA_SPACE configuration options for both EL2 and the Stage-2 page tables. However, for 40bit and 42bit PARange systems, the architecture mandates that VTCR_EL2.SL0 is maximum 1, resulting in fewer levels of stage-2 pagge tables than levels of host kernel page tables. At the same time, systems with a PARange > 42bit, we limit the IPA range by always setting VTCR_EL2.T0SZ to 24. To solve the situation with different levels of page tables for Stage-2 translation than the host kernel page tables, we allocate a dummy PGD with pointers to our actual inital level Stage-2 page table, in order for us to reuse the kernel pgtable manipulation primitives. Reproducing all these in KVM does not look pretty and unnecessarily complicates the 32-bit side. Systems with a PARange < 40bits are not yet supported. [ I have reworked this patch from its original form submitted by Jungseok to take the architecture constraints into consideration. There were too many changes from the original patch for me to preserve the authorship. Thanks to Catalin Marinas for his help in figuring out a good solution to this challenge. I have also fixed various bugs and missing error code handling from the original patch. - Christoffer ] Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Jungseok Lee <jungseoklee85@gmail.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h125
1 files changed, 116 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e36171974d6a..0caf7a59f6a1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -41,6 +41,18 @@
41 */ 41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) 42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43 43
44/*
45 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
46 * levels in addition to the PGD and potentially the PUD which are
47 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
48 * tables use one level of tables less than the kernel.
49 */
50#ifdef CONFIG_ARM64_64K_PAGES
51#define KVM_MMU_CACHE_MIN_PAGES 1
52#else
53#define KVM_MMU_CACHE_MIN_PAGES 2
54#endif
55
44#ifdef __ASSEMBLY__ 56#ifdef __ASSEMBLY__
45 57
46/* 58/*
@@ -53,6 +65,7 @@
53 65
54#else 66#else
55 67
68#include <asm/pgalloc.h>
56#include <asm/cachetype.h> 69#include <asm/cachetype.h>
57#include <asm/cacheflush.h> 70#include <asm/cacheflush.h>
58 71
@@ -65,10 +78,6 @@
65#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 78#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
66#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 79#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
67 80
68/* Make sure we get the right size, and thus the right alignment */
69#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
70#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
71
72int create_hyp_mappings(void *from, void *to); 81int create_hyp_mappings(void *from, void *to);
73int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 82int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
74void free_boot_hyp_pgd(void); 83void free_boot_hyp_pgd(void);
@@ -93,6 +102,7 @@ void kvm_clear_hyp_idmap(void);
93#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 102#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
94 103
95static inline void kvm_clean_pgd(pgd_t *pgd) {} 104static inline void kvm_clean_pgd(pgd_t *pgd) {}
105static inline void kvm_clean_pmd(pmd_t *pmd) {}
96static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 106static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
97static inline void kvm_clean_pte(pte_t *pte) {} 107static inline void kvm_clean_pte(pte_t *pte) {}
98static inline void kvm_clean_pte_entry(pte_t *pte) {} 108static inline void kvm_clean_pte_entry(pte_t *pte) {}
@@ -111,19 +121,116 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
111#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 121#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
112#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 122#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
113 123
124/*
125 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
126 * the entire IPA input range with a single pgd entry, and we would only need
127 * one pgd entry. Note that in this case, the pgd is actually not used by
128 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
129 * structure for the kernel pgtable macros to work.
130 */
131#if PGDIR_SHIFT > KVM_PHYS_SHIFT
132#define PTRS_PER_S2_PGD_SHIFT 0
133#else
134#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
135#endif
136#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
137#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
138
139/*
140 * If we are concatenating first level stage-2 page tables, we would have less
141 * than or equal to 16 pointers in the fake PGD, because that's what the
142 * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
143 * represents the first level for the host, and we add 1 to go to the next
144 * level (which uses contatenation) for the stage-2 tables.
145 */
146#if PTRS_PER_S2_PGD <= 16
147#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
148#else
149#define KVM_PREALLOC_LEVEL (0)
150#endif
151
152/**
153 * kvm_prealloc_hwpgd - allocate inital table for VTTBR
154 * @kvm: The KVM struct pointer for the VM.
155 * @pgd: The kernel pseudo pgd
156 *
157 * When the kernel uses more levels of page tables than the guest, we allocate
158 * a fake PGD and pre-populate it to point to the next-level page table, which
159 * will be the real initial page table pointed to by the VTTBR.
160 *
161 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
162 * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
163 * allocate 2 consecutive PUD pages.
164 */
165static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
166{
167 unsigned int i;
168 unsigned long hwpgd;
169
170 if (KVM_PREALLOC_LEVEL == 0)
171 return 0;
172
173 hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
174 if (!hwpgd)
175 return -ENOMEM;
176
177 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
178 if (KVM_PREALLOC_LEVEL == 1)
179 pgd_populate(NULL, pgd + i,
180 (pud_t *)hwpgd + i * PTRS_PER_PUD);
181 else if (KVM_PREALLOC_LEVEL == 2)
182 pud_populate(NULL, pud_offset(pgd, 0) + i,
183 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
184 }
185
186 return 0;
187}
188
189static inline void *kvm_get_hwpgd(struct kvm *kvm)
190{
191 pgd_t *pgd = kvm->arch.pgd;
192 pud_t *pud;
193
194 if (KVM_PREALLOC_LEVEL == 0)
195 return pgd;
196
197 pud = pud_offset(pgd, 0);
198 if (KVM_PREALLOC_LEVEL == 1)
199 return pud;
200
201 BUG_ON(KVM_PREALLOC_LEVEL != 2);
202 return pmd_offset(pud, 0);
203}
204
205static inline void kvm_free_hwpgd(struct kvm *kvm)
206{
207 if (KVM_PREALLOC_LEVEL > 0) {
208 unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
209 free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
210 }
211}
212
114static inline bool kvm_page_empty(void *ptr) 213static inline bool kvm_page_empty(void *ptr)
115{ 214{
116 struct page *ptr_page = virt_to_page(ptr); 215 struct page *ptr_page = virt_to_page(ptr);
117 return page_count(ptr_page) == 1; 216 return page_count(ptr_page) == 1;
118} 217}
119 218
120#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) 219#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
121#ifndef CONFIG_ARM64_64K_PAGES 220
122#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 221#ifdef __PAGETABLE_PMD_FOLDED
222#define kvm_pmd_table_empty(kvm, pmdp) (0)
223#else
224#define kvm_pmd_table_empty(kvm, pmdp) \
225 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
226#endif
227
228#ifdef __PAGETABLE_PUD_FOLDED
229#define kvm_pud_table_empty(kvm, pudp) (0)
123#else 230#else
124#define kvm_pmd_table_empty(pmdp) (0) 231#define kvm_pud_table_empty(kvm, pudp) \
232 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
125#endif 233#endif
126#define kvm_pud_table_empty(pudp) (0)
127 234
128 235
129struct kvm; 236struct kvm;