diff options
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r-- | arch/arm/kvm/mmu.c | 455 |
1 files changed, 257 insertions, 198 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 2f12e4056408..965706578f13 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -32,8 +32,15 @@ | |||
32 | 32 | ||
33 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | 33 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; |
34 | 34 | ||
35 | static pgd_t *boot_hyp_pgd; | ||
36 | static pgd_t *hyp_pgd; | ||
35 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 37 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
36 | 38 | ||
39 | static void *init_bounce_page; | ||
40 | static unsigned long hyp_idmap_start; | ||
41 | static unsigned long hyp_idmap_end; | ||
42 | static phys_addr_t hyp_idmap_vector; | ||
43 | |||
37 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
38 | { | 45 | { |
39 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | 46 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
@@ -71,172 +78,224 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |||
71 | return p; | 78 | return p; |
72 | } | 79 | } |
73 | 80 | ||
74 | static void free_ptes(pmd_t *pmd, unsigned long addr) | 81 | static void clear_pud_entry(pud_t *pud) |
75 | { | 82 | { |
76 | pte_t *pte; | 83 | pmd_t *pmd_table = pmd_offset(pud, 0); |
77 | unsigned int i; | 84 | pud_clear(pud); |
85 | pmd_free(NULL, pmd_table); | ||
86 | put_page(virt_to_page(pud)); | ||
87 | } | ||
78 | 88 | ||
79 | for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { | 89 | static void clear_pmd_entry(pmd_t *pmd) |
80 | if (!pmd_none(*pmd) && pmd_table(*pmd)) { | 90 | { |
81 | pte = pte_offset_kernel(pmd, addr); | 91 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
82 | pte_free_kernel(NULL, pte); | 92 | pmd_clear(pmd); |
83 | } | 93 | pte_free_kernel(NULL, pte_table); |
84 | pmd++; | 94 | put_page(virt_to_page(pmd)); |
95 | } | ||
96 | |||
97 | static bool pmd_empty(pmd_t *pmd) | ||
98 | { | ||
99 | struct page *pmd_page = virt_to_page(pmd); | ||
100 | return page_count(pmd_page) == 1; | ||
101 | } | ||
102 | |||
103 | static void clear_pte_entry(pte_t *pte) | ||
104 | { | ||
105 | if (pte_present(*pte)) { | ||
106 | kvm_set_pte(pte, __pte(0)); | ||
107 | put_page(virt_to_page(pte)); | ||
85 | } | 108 | } |
86 | } | 109 | } |
87 | 110 | ||
88 | static void free_hyp_pgd_entry(unsigned long addr) | 111 | static bool pte_empty(pte_t *pte) |
112 | { | ||
113 | struct page *pte_page = virt_to_page(pte); | ||
114 | return page_count(pte_page) == 1; | ||
115 | } | ||
116 | |||
117 | static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) | ||
89 | { | 118 | { |
90 | pgd_t *pgd; | 119 | pgd_t *pgd; |
91 | pud_t *pud; | 120 | pud_t *pud; |
92 | pmd_t *pmd; | 121 | pmd_t *pmd; |
93 | unsigned long hyp_addr = KERN_TO_HYP(addr); | 122 | pte_t *pte; |
123 | unsigned long long addr = start, end = start + size; | ||
124 | u64 range; | ||
125 | |||
126 | while (addr < end) { | ||
127 | pgd = pgdp + pgd_index(addr); | ||
128 | pud = pud_offset(pgd, addr); | ||
129 | if (pud_none(*pud)) { | ||
130 | addr += PUD_SIZE; | ||
131 | continue; | ||
132 | } | ||
94 | 133 | ||
95 | pgd = hyp_pgd + pgd_index(hyp_addr); | 134 | pmd = pmd_offset(pud, addr); |
96 | pud = pud_offset(pgd, hyp_addr); | 135 | if (pmd_none(*pmd)) { |
136 | addr += PMD_SIZE; | ||
137 | continue; | ||
138 | } | ||
97 | 139 | ||
98 | if (pud_none(*pud)) | 140 | pte = pte_offset_kernel(pmd, addr); |
99 | return; | 141 | clear_pte_entry(pte); |
100 | BUG_ON(pud_bad(*pud)); | 142 | range = PAGE_SIZE; |
101 | 143 | ||
102 | pmd = pmd_offset(pud, hyp_addr); | 144 | /* If we emptied the pte, walk back up the ladder */ |
103 | free_ptes(pmd, addr); | 145 | if (pte_empty(pte)) { |
104 | pmd_free(NULL, pmd); | 146 | clear_pmd_entry(pmd); |
105 | pud_clear(pud); | 147 | range = PMD_SIZE; |
148 | if (pmd_empty(pmd)) { | ||
149 | clear_pud_entry(pud); | ||
150 | range = PUD_SIZE; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | addr += range; | ||
155 | } | ||
106 | } | 156 | } |
107 | 157 | ||
108 | /** | 158 | /** |
109 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables | 159 | * free_boot_hyp_pgd - free HYP boot page tables |
110 | * | 160 | * |
111 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains | 161 | * Free the HYP boot page tables. The bounce page is also freed. |
112 | * either mappings in the kernel memory area (above PAGE_OFFSET), or | ||
113 | * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END). | ||
114 | */ | 162 | */ |
115 | void free_hyp_pmds(void) | 163 | void free_boot_hyp_pgd(void) |
116 | { | 164 | { |
117 | unsigned long addr; | ||
118 | |||
119 | mutex_lock(&kvm_hyp_pgd_mutex); | 165 | mutex_lock(&kvm_hyp_pgd_mutex); |
120 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | 166 | |
121 | free_hyp_pgd_entry(addr); | 167 | if (boot_hyp_pgd) { |
122 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) | 168 | unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
123 | free_hyp_pgd_entry(addr); | 169 | unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
170 | kfree(boot_hyp_pgd); | ||
171 | boot_hyp_pgd = NULL; | ||
172 | } | ||
173 | |||
174 | if (hyp_pgd) | ||
175 | unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | ||
176 | |||
177 | kfree(init_bounce_page); | ||
178 | init_bounce_page = NULL; | ||
179 | |||
124 | mutex_unlock(&kvm_hyp_pgd_mutex); | 180 | mutex_unlock(&kvm_hyp_pgd_mutex); |
125 | } | 181 | } |
126 | 182 | ||
127 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | 183 | /** |
128 | unsigned long end) | 184 | * free_hyp_pgds - free Hyp-mode page tables |
185 | * | ||
186 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and | ||
187 | * therefore contains either mappings in the kernel memory area (above | ||
188 | * PAGE_OFFSET), or device mappings in the vmalloc range (from | ||
189 | * VMALLOC_START to VMALLOC_END). | ||
190 | * | ||
191 | * boot_hyp_pgd should only map two pages for the init code. | ||
192 | */ | ||
193 | void free_hyp_pgds(void) | ||
129 | { | 194 | { |
130 | pte_t *pte; | ||
131 | unsigned long addr; | 195 | unsigned long addr; |
132 | struct page *page; | ||
133 | 196 | ||
134 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 197 | free_boot_hyp_pgd(); |
135 | unsigned long hyp_addr = KERN_TO_HYP(addr); | 198 | |
199 | mutex_lock(&kvm_hyp_pgd_mutex); | ||
136 | 200 | ||
137 | pte = pte_offset_kernel(pmd, hyp_addr); | 201 | if (hyp_pgd) { |
138 | BUG_ON(!virt_addr_valid(addr)); | 202 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
139 | page = virt_to_page(addr); | 203 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
140 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); | 204 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
205 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | ||
206 | kfree(hyp_pgd); | ||
207 | hyp_pgd = NULL; | ||
141 | } | 208 | } |
209 | |||
210 | mutex_unlock(&kvm_hyp_pgd_mutex); | ||
142 | } | 211 | } |
143 | 212 | ||
144 | static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, | 213 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, |
145 | unsigned long end, | 214 | unsigned long end, unsigned long pfn, |
146 | unsigned long *pfn_base) | 215 | pgprot_t prot) |
147 | { | 216 | { |
148 | pte_t *pte; | 217 | pte_t *pte; |
149 | unsigned long addr; | 218 | unsigned long addr; |
150 | 219 | ||
151 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 220 | addr = start; |
152 | unsigned long hyp_addr = KERN_TO_HYP(addr); | 221 | do { |
153 | 222 | pte = pte_offset_kernel(pmd, addr); | |
154 | pte = pte_offset_kernel(pmd, hyp_addr); | 223 | kvm_set_pte(pte, pfn_pte(pfn, prot)); |
155 | BUG_ON(pfn_valid(*pfn_base)); | 224 | get_page(virt_to_page(pte)); |
156 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); | 225 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
157 | (*pfn_base)++; | 226 | pfn++; |
158 | } | 227 | } while (addr += PAGE_SIZE, addr != end); |
159 | } | 228 | } |
160 | 229 | ||
161 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | 230 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, |
162 | unsigned long end, unsigned long *pfn_base) | 231 | unsigned long end, unsigned long pfn, |
232 | pgprot_t prot) | ||
163 | { | 233 | { |
164 | pmd_t *pmd; | 234 | pmd_t *pmd; |
165 | pte_t *pte; | 235 | pte_t *pte; |
166 | unsigned long addr, next; | 236 | unsigned long addr, next; |
167 | 237 | ||
168 | for (addr = start; addr < end; addr = next) { | 238 | addr = start; |
169 | unsigned long hyp_addr = KERN_TO_HYP(addr); | 239 | do { |
170 | pmd = pmd_offset(pud, hyp_addr); | 240 | pmd = pmd_offset(pud, addr); |
171 | 241 | ||
172 | BUG_ON(pmd_sect(*pmd)); | 242 | BUG_ON(pmd_sect(*pmd)); |
173 | 243 | ||
174 | if (pmd_none(*pmd)) { | 244 | if (pmd_none(*pmd)) { |
175 | pte = pte_alloc_one_kernel(NULL, hyp_addr); | 245 | pte = pte_alloc_one_kernel(NULL, addr); |
176 | if (!pte) { | 246 | if (!pte) { |
177 | kvm_err("Cannot allocate Hyp pte\n"); | 247 | kvm_err("Cannot allocate Hyp pte\n"); |
178 | return -ENOMEM; | 248 | return -ENOMEM; |
179 | } | 249 | } |
180 | pmd_populate_kernel(NULL, pmd, pte); | 250 | pmd_populate_kernel(NULL, pmd, pte); |
251 | get_page(virt_to_page(pmd)); | ||
252 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); | ||
181 | } | 253 | } |
182 | 254 | ||
183 | next = pmd_addr_end(addr, end); | 255 | next = pmd_addr_end(addr, end); |
184 | 256 | ||
185 | /* | 257 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
186 | * If pfn_base is NULL, we map kernel pages into HYP with the | 258 | pfn += (next - addr) >> PAGE_SHIFT; |
187 | * virtual address. Otherwise, this is considered an I/O | 259 | } while (addr = next, addr != end); |
188 | * mapping and we map the physical region starting at | ||
189 | * *pfn_base to [start, end[. | ||
190 | */ | ||
191 | if (!pfn_base) | ||
192 | create_hyp_pte_mappings(pmd, addr, next); | ||
193 | else | ||
194 | create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); | ||
195 | } | ||
196 | 260 | ||
197 | return 0; | 261 | return 0; |
198 | } | 262 | } |
199 | 263 | ||
200 | static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) | 264 | static int __create_hyp_mappings(pgd_t *pgdp, |
265 | unsigned long start, unsigned long end, | ||
266 | unsigned long pfn, pgprot_t prot) | ||
201 | { | 267 | { |
202 | unsigned long start = (unsigned long)from; | ||
203 | unsigned long end = (unsigned long)to; | ||
204 | pgd_t *pgd; | 268 | pgd_t *pgd; |
205 | pud_t *pud; | 269 | pud_t *pud; |
206 | pmd_t *pmd; | 270 | pmd_t *pmd; |
207 | unsigned long addr, next; | 271 | unsigned long addr, next; |
208 | int err = 0; | 272 | int err = 0; |
209 | 273 | ||
210 | if (start >= end) | ||
211 | return -EINVAL; | ||
212 | /* Check for a valid kernel memory mapping */ | ||
213 | if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1))) | ||
214 | return -EINVAL; | ||
215 | /* Check for a valid kernel IO mapping */ | ||
216 | if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))) | ||
217 | return -EINVAL; | ||
218 | |||
219 | mutex_lock(&kvm_hyp_pgd_mutex); | 274 | mutex_lock(&kvm_hyp_pgd_mutex); |
220 | for (addr = start; addr < end; addr = next) { | 275 | addr = start & PAGE_MASK; |
221 | unsigned long hyp_addr = KERN_TO_HYP(addr); | 276 | end = PAGE_ALIGN(end); |
222 | pgd = hyp_pgd + pgd_index(hyp_addr); | 277 | do { |
223 | pud = pud_offset(pgd, hyp_addr); | 278 | pgd = pgdp + pgd_index(addr); |
279 | pud = pud_offset(pgd, addr); | ||
224 | 280 | ||
225 | if (pud_none_or_clear_bad(pud)) { | 281 | if (pud_none_or_clear_bad(pud)) { |
226 | pmd = pmd_alloc_one(NULL, hyp_addr); | 282 | pmd = pmd_alloc_one(NULL, addr); |
227 | if (!pmd) { | 283 | if (!pmd) { |
228 | kvm_err("Cannot allocate Hyp pmd\n"); | 284 | kvm_err("Cannot allocate Hyp pmd\n"); |
229 | err = -ENOMEM; | 285 | err = -ENOMEM; |
230 | goto out; | 286 | goto out; |
231 | } | 287 | } |
232 | pud_populate(NULL, pud, pmd); | 288 | pud_populate(NULL, pud, pmd); |
289 | get_page(virt_to_page(pud)); | ||
290 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); | ||
233 | } | 291 | } |
234 | 292 | ||
235 | next = pgd_addr_end(addr, end); | 293 | next = pgd_addr_end(addr, end); |
236 | err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); | 294 | err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); |
237 | if (err) | 295 | if (err) |
238 | goto out; | 296 | goto out; |
239 | } | 297 | pfn += (next - addr) >> PAGE_SHIFT; |
298 | } while (addr = next, addr != end); | ||
240 | out: | 299 | out: |
241 | mutex_unlock(&kvm_hyp_pgd_mutex); | 300 | mutex_unlock(&kvm_hyp_pgd_mutex); |
242 | return err; | 301 | return err; |
@@ -250,27 +309,41 @@ out: | |||
250 | * The same virtual address as the kernel virtual address is also used | 309 | * The same virtual address as the kernel virtual address is also used |
251 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | 310 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
252 | * physical pages. | 311 | * physical pages. |
253 | * | ||
254 | * Note: Wrapping around zero in the "to" address is not supported. | ||
255 | */ | 312 | */ |
256 | int create_hyp_mappings(void *from, void *to) | 313 | int create_hyp_mappings(void *from, void *to) |
257 | { | 314 | { |
258 | return __create_hyp_mappings(from, to, NULL); | 315 | unsigned long phys_addr = virt_to_phys(from); |
316 | unsigned long start = KERN_TO_HYP((unsigned long)from); | ||
317 | unsigned long end = KERN_TO_HYP((unsigned long)to); | ||
318 | |||
319 | /* Check for a valid kernel memory mapping */ | ||
320 | if (!virt_addr_valid(from) || !virt_addr_valid(to - 1)) | ||
321 | return -EINVAL; | ||
322 | |||
323 | return __create_hyp_mappings(hyp_pgd, start, end, | ||
324 | __phys_to_pfn(phys_addr), PAGE_HYP); | ||
259 | } | 325 | } |
260 | 326 | ||
261 | /** | 327 | /** |
262 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode | 328 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
263 | * @from: The kernel start VA of the range | 329 | * @from: The kernel start VA of the range |
264 | * @to: The kernel end VA of the range (exclusive) | 330 | * @to: The kernel end VA of the range (exclusive) |
265 | * @addr: The physical start address which gets mapped | 331 | * @phys_addr: The physical start address which gets mapped |
266 | * | 332 | * |
267 | * The resulting HYP VA is the same as the kernel VA, modulo | 333 | * The resulting HYP VA is the same as the kernel VA, modulo |
268 | * HYP_PAGE_OFFSET. | 334 | * HYP_PAGE_OFFSET. |
269 | */ | 335 | */ |
270 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) | 336 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
271 | { | 337 | { |
272 | unsigned long pfn = __phys_to_pfn(addr); | 338 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
273 | return __create_hyp_mappings(from, to, &pfn); | 339 | unsigned long end = KERN_TO_HYP((unsigned long)to); |
340 | |||
341 | /* Check for a valid kernel IO mapping */ | ||
342 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) | ||
343 | return -EINVAL; | ||
344 | |||
345 | return __create_hyp_mappings(hyp_pgd, start, end, | ||
346 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | ||
274 | } | 347 | } |
275 | 348 | ||
276 | /** | 349 | /** |
@@ -307,42 +380,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
307 | return 0; | 380 | return 0; |
308 | } | 381 | } |
309 | 382 | ||
310 | static void clear_pud_entry(pud_t *pud) | ||
311 | { | ||
312 | pmd_t *pmd_table = pmd_offset(pud, 0); | ||
313 | pud_clear(pud); | ||
314 | pmd_free(NULL, pmd_table); | ||
315 | put_page(virt_to_page(pud)); | ||
316 | } | ||
317 | |||
318 | static void clear_pmd_entry(pmd_t *pmd) | ||
319 | { | ||
320 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | ||
321 | pmd_clear(pmd); | ||
322 | pte_free_kernel(NULL, pte_table); | ||
323 | put_page(virt_to_page(pmd)); | ||
324 | } | ||
325 | |||
326 | static bool pmd_empty(pmd_t *pmd) | ||
327 | { | ||
328 | struct page *pmd_page = virt_to_page(pmd); | ||
329 | return page_count(pmd_page) == 1; | ||
330 | } | ||
331 | |||
332 | static void clear_pte_entry(pte_t *pte) | ||
333 | { | ||
334 | if (pte_present(*pte)) { | ||
335 | kvm_set_pte(pte, __pte(0)); | ||
336 | put_page(virt_to_page(pte)); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | static bool pte_empty(pte_t *pte) | ||
341 | { | ||
342 | struct page *pte_page = virt_to_page(pte); | ||
343 | return page_count(pte_page) == 1; | ||
344 | } | ||
345 | |||
346 | /** | 383 | /** |
347 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | 384 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
348 | * @kvm: The VM pointer | 385 | * @kvm: The VM pointer |
@@ -356,43 +393,7 @@ static bool pte_empty(pte_t *pte) | |||
356 | */ | 393 | */ |
357 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | 394 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
358 | { | 395 | { |
359 | pgd_t *pgd; | 396 | unmap_range(kvm->arch.pgd, start, size); |
360 | pud_t *pud; | ||
361 | pmd_t *pmd; | ||
362 | pte_t *pte; | ||
363 | phys_addr_t addr = start, end = start + size; | ||
364 | u64 range; | ||
365 | |||
366 | while (addr < end) { | ||
367 | pgd = kvm->arch.pgd + pgd_index(addr); | ||
368 | pud = pud_offset(pgd, addr); | ||
369 | if (pud_none(*pud)) { | ||
370 | addr += PUD_SIZE; | ||
371 | continue; | ||
372 | } | ||
373 | |||
374 | pmd = pmd_offset(pud, addr); | ||
375 | if (pmd_none(*pmd)) { | ||
376 | addr += PMD_SIZE; | ||
377 | continue; | ||
378 | } | ||
379 | |||
380 | pte = pte_offset_kernel(pmd, addr); | ||
381 | clear_pte_entry(pte); | ||
382 | range = PAGE_SIZE; | ||
383 | |||
384 | /* If we emptied the pte, walk back up the ladder */ | ||
385 | if (pte_empty(pte)) { | ||
386 | clear_pmd_entry(pmd); | ||
387 | range = PMD_SIZE; | ||
388 | if (pmd_empty(pmd)) { | ||
389 | clear_pud_entry(pud); | ||
390 | range = PUD_SIZE; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | addr += range; | ||
395 | } | ||
396 | } | 397 | } |
397 | 398 | ||
398 | /** | 399 | /** |
@@ -728,47 +729,105 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |||
728 | 729 | ||
729 | phys_addr_t kvm_mmu_get_httbr(void) | 730 | phys_addr_t kvm_mmu_get_httbr(void) |
730 | { | 731 | { |
731 | VM_BUG_ON(!virt_addr_valid(hyp_pgd)); | ||
732 | return virt_to_phys(hyp_pgd); | 732 | return virt_to_phys(hyp_pgd); |
733 | } | 733 | } |
734 | 734 | ||
735 | phys_addr_t kvm_mmu_get_boot_httbr(void) | ||
736 | { | ||
737 | return virt_to_phys(boot_hyp_pgd); | ||
738 | } | ||
739 | |||
740 | phys_addr_t kvm_get_idmap_vector(void) | ||
741 | { | ||
742 | return hyp_idmap_vector; | ||
743 | } | ||
744 | |||
735 | int kvm_mmu_init(void) | 745 | int kvm_mmu_init(void) |
736 | { | 746 | { |
737 | if (!hyp_pgd) { | 747 | int err; |
748 | |||
749 | hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start); | ||
750 | hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end); | ||
751 | hyp_idmap_vector = virt_to_phys(__kvm_hyp_init); | ||
752 | |||
753 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | ||
754 | /* | ||
755 | * Our init code is crossing a page boundary. Allocate | ||
756 | * a bounce page, copy the code over and use that. | ||
757 | */ | ||
758 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | ||
759 | phys_addr_t phys_base; | ||
760 | |||
761 | init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
762 | if (!init_bounce_page) { | ||
763 | kvm_err("Couldn't allocate HYP init bounce page\n"); | ||
764 | err = -ENOMEM; | ||
765 | goto out; | ||
766 | } | ||
767 | |||
768 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | ||
769 | /* | ||
770 | * Warning: the code we just copied to the bounce page | ||
771 | * must be flushed to the point of coherency. | ||
772 | * Otherwise, the data may be sitting in L2, and HYP | ||
773 | * mode won't be able to observe it as it runs with | ||
774 | * caches off at that point. | ||
775 | */ | ||
776 | kvm_flush_dcache_to_poc(init_bounce_page, len); | ||
777 | |||
778 | phys_base = virt_to_phys(init_bounce_page); | ||
779 | hyp_idmap_vector += phys_base - hyp_idmap_start; | ||
780 | hyp_idmap_start = phys_base; | ||
781 | hyp_idmap_end = phys_base + len; | ||
782 | |||
783 | kvm_info("Using HYP init bounce page @%lx\n", | ||
784 | (unsigned long)phys_base); | ||
785 | } | ||
786 | |||
787 | hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
788 | boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
789 | if (!hyp_pgd || !boot_hyp_pgd) { | ||
738 | kvm_err("Hyp mode PGD not allocated\n"); | 790 | kvm_err("Hyp mode PGD not allocated\n"); |
739 | return -ENOMEM; | 791 | err = -ENOMEM; |
792 | goto out; | ||
740 | } | 793 | } |
741 | 794 | ||
742 | return 0; | 795 | /* Create the idmap in the boot page tables */ |
743 | } | 796 | err = __create_hyp_mappings(boot_hyp_pgd, |
797 | hyp_idmap_start, hyp_idmap_end, | ||
798 | __phys_to_pfn(hyp_idmap_start), | ||
799 | PAGE_HYP); | ||
744 | 800 | ||
745 | /** | 801 | if (err) { |
746 | * kvm_clear_idmap - remove all idmaps from the hyp pgd | 802 | kvm_err("Failed to idmap %lx-%lx\n", |
747 | * | 803 | hyp_idmap_start, hyp_idmap_end); |
748 | * Free the underlying pmds for all pgds in range and clear the pgds (but | 804 | goto out; |
749 | * don't free them) afterwards. | 805 | } |
750 | */ | ||
751 | void kvm_clear_hyp_idmap(void) | ||
752 | { | ||
753 | unsigned long addr, end; | ||
754 | unsigned long next; | ||
755 | pgd_t *pgd = hyp_pgd; | ||
756 | pud_t *pud; | ||
757 | pmd_t *pmd; | ||
758 | 806 | ||
759 | addr = virt_to_phys(__hyp_idmap_text_start); | 807 | /* Map the very same page at the trampoline VA */ |
760 | end = virt_to_phys(__hyp_idmap_text_end); | 808 | err = __create_hyp_mappings(boot_hyp_pgd, |
809 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | ||
810 | __phys_to_pfn(hyp_idmap_start), | ||
811 | PAGE_HYP); | ||
812 | if (err) { | ||
813 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", | ||
814 | TRAMPOLINE_VA); | ||
815 | goto out; | ||
816 | } | ||
761 | 817 | ||
762 | pgd += pgd_index(addr); | 818 | /* Map the same page again into the runtime page tables */ |
763 | do { | 819 | err = __create_hyp_mappings(hyp_pgd, |
764 | next = pgd_addr_end(addr, end); | 820 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, |
765 | if (pgd_none_or_clear_bad(pgd)) | 821 | __phys_to_pfn(hyp_idmap_start), |
766 | continue; | 822 | PAGE_HYP); |
767 | pud = pud_offset(pgd, addr); | 823 | if (err) { |
768 | pmd = pmd_offset(pud, addr); | 824 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", |
825 | TRAMPOLINE_VA); | ||
826 | goto out; | ||
827 | } | ||
769 | 828 | ||
770 | pud_clear(pud); | 829 | return 0; |
771 | kvm_clean_pmd_entry(pmd); | 830 | out: |
772 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); | 831 | free_hyp_pgds(); |
773 | } while (pgd++, addr = next, addr < end); | 832 | return err; |
774 | } | 833 | } |