diff options
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r-- | arch/arm/kvm/mmu.c | 370 |
1 files changed, 369 insertions, 1 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 4decdb618019..4347d68f052f 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -23,12 +23,21 @@ | |||
23 | #include <asm/pgalloc.h> | 23 | #include <asm/pgalloc.h> |
24 | #include <asm/kvm_arm.h> | 24 | #include <asm/kvm_arm.h> |
25 | #include <asm/kvm_mmu.h> | 25 | #include <asm/kvm_mmu.h> |
26 | #include <asm/kvm_asm.h> | ||
26 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
28 | #include <trace/events/kvm.h> | ||
29 | |||
30 | #include "trace.h" | ||
27 | 31 | ||
28 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | 32 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; |
29 | 33 | ||
30 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 34 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
31 | 35 | ||
36 | static void kvm_tlb_flush_vmid(struct kvm *kvm) | ||
37 | { | ||
38 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | ||
39 | } | ||
40 | |||
32 | static void kvm_set_pte(pte_t *pte, pte_t new_pte) | 41 | static void kvm_set_pte(pte_t *pte, pte_t new_pte) |
33 | { | 42 | { |
34 | pte_val(*pte) = new_pte; | 43 | pte_val(*pte) = new_pte; |
@@ -39,6 +48,38 @@ static void kvm_set_pte(pte_t *pte, pte_t new_pte) | |||
39 | flush_pmd_entry(pte); | 48 | flush_pmd_entry(pte); |
40 | } | 49 | } |
41 | 50 | ||
51 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | ||
52 | int min, int max) | ||
53 | { | ||
54 | void *page; | ||
55 | |||
56 | BUG_ON(max > KVM_NR_MEM_OBJS); | ||
57 | if (cache->nobjs >= min) | ||
58 | return 0; | ||
59 | while (cache->nobjs < max) { | ||
60 | page = (void *)__get_free_page(PGALLOC_GFP); | ||
61 | if (!page) | ||
62 | return -ENOMEM; | ||
63 | cache->objects[cache->nobjs++] = page; | ||
64 | } | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | ||
69 | { | ||
70 | while (mc->nobjs) | ||
71 | free_page((unsigned long)mc->objects[--mc->nobjs]); | ||
72 | } | ||
73 | |||
74 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | ||
75 | { | ||
76 | void *p; | ||
77 | |||
78 | BUG_ON(!mc || !mc->nobjs); | ||
79 | p = mc->objects[--mc->nobjs]; | ||
80 | return p; | ||
81 | } | ||
82 | |||
42 | static void free_ptes(pmd_t *pmd, unsigned long addr) | 83 | static void free_ptes(pmd_t *pmd, unsigned long addr) |
43 | { | 84 | { |
44 | pte_t *pte; | 85 | pte_t *pte; |
@@ -217,11 +258,333 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) | |||
217 | return __create_hyp_mappings(from, to, &pfn); | 258 | return __create_hyp_mappings(from, to, &pfn); |
218 | } | 259 | } |
219 | 260 | ||
261 | /** | ||
262 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | ||
263 | * @kvm: The KVM struct pointer for the VM. | ||
264 | * | ||
265 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can | ||
266 | * support either full 40-bit input addresses or limited to 32-bit input | ||
267 | * addresses). Clears the allocated pages. | ||
268 | * | ||
269 | * Note we don't need locking here as this is only called when the VM is | ||
270 | * created, which can only be done once. | ||
271 | */ | ||
272 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | ||
273 | { | ||
274 | pgd_t *pgd; | ||
275 | |||
276 | if (kvm->arch.pgd != NULL) { | ||
277 | kvm_err("kvm_arch already initialized?\n"); | ||
278 | return -EINVAL; | ||
279 | } | ||
280 | |||
281 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); | ||
282 | if (!pgd) | ||
283 | return -ENOMEM; | ||
284 | |||
285 | /* stage-2 pgd must be aligned to its size */ | ||
286 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); | ||
287 | |||
288 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
289 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
290 | kvm->arch.pgd = pgd; | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static void clear_pud_entry(pud_t *pud) | ||
296 | { | ||
297 | pmd_t *pmd_table = pmd_offset(pud, 0); | ||
298 | pud_clear(pud); | ||
299 | pmd_free(NULL, pmd_table); | ||
300 | put_page(virt_to_page(pud)); | ||
301 | } | ||
302 | |||
303 | static void clear_pmd_entry(pmd_t *pmd) | ||
304 | { | ||
305 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | ||
306 | pmd_clear(pmd); | ||
307 | pte_free_kernel(NULL, pte_table); | ||
308 | put_page(virt_to_page(pmd)); | ||
309 | } | ||
310 | |||
311 | static bool pmd_empty(pmd_t *pmd) | ||
312 | { | ||
313 | struct page *pmd_page = virt_to_page(pmd); | ||
314 | return page_count(pmd_page) == 1; | ||
315 | } | ||
316 | |||
317 | static void clear_pte_entry(pte_t *pte) | ||
318 | { | ||
319 | if (pte_present(*pte)) { | ||
320 | kvm_set_pte(pte, __pte(0)); | ||
321 | put_page(virt_to_page(pte)); | ||
322 | } | ||
323 | } | ||
324 | |||
325 | static bool pte_empty(pte_t *pte) | ||
326 | { | ||
327 | struct page *pte_page = virt_to_page(pte); | ||
328 | return page_count(pte_page) == 1; | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | ||
333 | * @kvm: The VM pointer | ||
334 | * @start: The intermediate physical base address of the range to unmap | ||
335 | * @size: The size of the area to unmap | ||
336 | * | ||
337 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | ||
338 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | ||
339 | * destroying the VM), otherwise another faulting VCPU may come in and mess | ||
340 | * with things behind our backs. | ||
341 | */ | ||
342 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | ||
343 | { | ||
344 | pgd_t *pgd; | ||
345 | pud_t *pud; | ||
346 | pmd_t *pmd; | ||
347 | pte_t *pte; | ||
348 | phys_addr_t addr = start, end = start + size; | ||
349 | u64 range; | ||
350 | |||
351 | while (addr < end) { | ||
352 | pgd = kvm->arch.pgd + pgd_index(addr); | ||
353 | pud = pud_offset(pgd, addr); | ||
354 | if (pud_none(*pud)) { | ||
355 | addr += PUD_SIZE; | ||
356 | continue; | ||
357 | } | ||
358 | |||
359 | pmd = pmd_offset(pud, addr); | ||
360 | if (pmd_none(*pmd)) { | ||
361 | addr += PMD_SIZE; | ||
362 | continue; | ||
363 | } | ||
364 | |||
365 | pte = pte_offset_kernel(pmd, addr); | ||
366 | clear_pte_entry(pte); | ||
367 | range = PAGE_SIZE; | ||
368 | |||
369 | /* If we emptied the pte, walk back up the ladder */ | ||
370 | if (pte_empty(pte)) { | ||
371 | clear_pmd_entry(pmd); | ||
372 | range = PMD_SIZE; | ||
373 | if (pmd_empty(pmd)) { | ||
374 | clear_pud_entry(pud); | ||
375 | range = PUD_SIZE; | ||
376 | } | ||
377 | } | ||
378 | |||
379 | addr += range; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * kvm_free_stage2_pgd - free all stage-2 tables | ||
385 | * @kvm: The KVM struct pointer for the VM. | ||
386 | * | ||
387 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | ||
388 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | ||
389 | * and setting the struct pointer to NULL. | ||
390 | * | ||
391 | * Note we don't need locking here as this is only called when the VM is | ||
392 | * destroyed, which can only be done once. | ||
393 | */ | ||
394 | void kvm_free_stage2_pgd(struct kvm *kvm) | ||
395 | { | ||
396 | if (kvm->arch.pgd == NULL) | ||
397 | return; | ||
398 | |||
399 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | ||
400 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | ||
401 | kvm->arch.pgd = NULL; | ||
402 | } | ||
403 | |||
404 | |||
405 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | ||
406 | phys_addr_t addr, const pte_t *new_pte, bool iomap) | ||
407 | { | ||
408 | pgd_t *pgd; | ||
409 | pud_t *pud; | ||
410 | pmd_t *pmd; | ||
411 | pte_t *pte, old_pte; | ||
412 | |||
413 | /* Create 2nd stage page table mapping - Level 1 */ | ||
414 | pgd = kvm->arch.pgd + pgd_index(addr); | ||
415 | pud = pud_offset(pgd, addr); | ||
416 | if (pud_none(*pud)) { | ||
417 | if (!cache) | ||
418 | return 0; /* ignore calls from kvm_set_spte_hva */ | ||
419 | pmd = mmu_memory_cache_alloc(cache); | ||
420 | pud_populate(NULL, pud, pmd); | ||
421 | pmd += pmd_index(addr); | ||
422 | get_page(virt_to_page(pud)); | ||
423 | } else | ||
424 | pmd = pmd_offset(pud, addr); | ||
425 | |||
426 | /* Create 2nd stage page table mapping - Level 2 */ | ||
427 | if (pmd_none(*pmd)) { | ||
428 | if (!cache) | ||
429 | return 0; /* ignore calls from kvm_set_spte_hva */ | ||
430 | pte = mmu_memory_cache_alloc(cache); | ||
431 | clean_pte_table(pte); | ||
432 | pmd_populate_kernel(NULL, pmd, pte); | ||
433 | pte += pte_index(addr); | ||
434 | get_page(virt_to_page(pmd)); | ||
435 | } else | ||
436 | pte = pte_offset_kernel(pmd, addr); | ||
437 | |||
438 | if (iomap && pte_present(*pte)) | ||
439 | return -EFAULT; | ||
440 | |||
441 | /* Create 2nd stage page table mapping - Level 3 */ | ||
442 | old_pte = *pte; | ||
443 | kvm_set_pte(pte, *new_pte); | ||
444 | if (pte_present(old_pte)) | ||
445 | kvm_tlb_flush_vmid(kvm); | ||
446 | else | ||
447 | get_page(virt_to_page(pte)); | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * kvm_phys_addr_ioremap - map a device range to guest IPA | ||
454 | * | ||
455 | * @kvm: The KVM pointer | ||
456 | * @guest_ipa: The IPA at which to insert the mapping | ||
457 | * @pa: The physical address of the device | ||
458 | * @size: The size of the mapping | ||
459 | */ | ||
460 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | ||
461 | phys_addr_t pa, unsigned long size) | ||
462 | { | ||
463 | phys_addr_t addr, end; | ||
464 | int ret = 0; | ||
465 | unsigned long pfn; | ||
466 | struct kvm_mmu_memory_cache cache = { 0, }; | ||
467 | |||
468 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | ||
469 | pfn = __phys_to_pfn(pa); | ||
470 | |||
471 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | ||
472 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); | ||
473 | |||
474 | ret = mmu_topup_memory_cache(&cache, 2, 2); | ||
475 | if (ret) | ||
476 | goto out; | ||
477 | spin_lock(&kvm->mmu_lock); | ||
478 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); | ||
479 | spin_unlock(&kvm->mmu_lock); | ||
480 | if (ret) | ||
481 | goto out; | ||
482 | |||
483 | pfn++; | ||
484 | } | ||
485 | |||
486 | out: | ||
487 | mmu_free_memory_cache(&cache); | ||
488 | return ret; | ||
489 | } | ||
490 | |||
220 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | 491 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
221 | { | 492 | { |
222 | return -EINVAL; | 493 | return -EINVAL; |
223 | } | 494 | } |
224 | 495 | ||
496 | static void handle_hva_to_gpa(struct kvm *kvm, | ||
497 | unsigned long start, | ||
498 | unsigned long end, | ||
499 | void (*handler)(struct kvm *kvm, | ||
500 | gpa_t gpa, void *data), | ||
501 | void *data) | ||
502 | { | ||
503 | struct kvm_memslots *slots; | ||
504 | struct kvm_memory_slot *memslot; | ||
505 | |||
506 | slots = kvm_memslots(kvm); | ||
507 | |||
508 | /* we only care about the pages that the guest sees */ | ||
509 | kvm_for_each_memslot(memslot, slots) { | ||
510 | unsigned long hva_start, hva_end; | ||
511 | gfn_t gfn, gfn_end; | ||
512 | |||
513 | hva_start = max(start, memslot->userspace_addr); | ||
514 | hva_end = min(end, memslot->userspace_addr + | ||
515 | (memslot->npages << PAGE_SHIFT)); | ||
516 | if (hva_start >= hva_end) | ||
517 | continue; | ||
518 | |||
519 | /* | ||
520 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
521 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | ||
522 | */ | ||
523 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
524 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
525 | |||
526 | for (; gfn < gfn_end; ++gfn) { | ||
527 | gpa_t gpa = gfn << PAGE_SHIFT; | ||
528 | handler(kvm, gpa, data); | ||
529 | } | ||
530 | } | ||
531 | } | ||
532 | |||
533 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
534 | { | ||
535 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | ||
536 | kvm_tlb_flush_vmid(kvm); | ||
537 | } | ||
538 | |||
539 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
540 | { | ||
541 | unsigned long end = hva + PAGE_SIZE; | ||
542 | |||
543 | if (!kvm->arch.pgd) | ||
544 | return 0; | ||
545 | |||
546 | trace_kvm_unmap_hva(hva); | ||
547 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | int kvm_unmap_hva_range(struct kvm *kvm, | ||
552 | unsigned long start, unsigned long end) | ||
553 | { | ||
554 | if (!kvm->arch.pgd) | ||
555 | return 0; | ||
556 | |||
557 | trace_kvm_unmap_hva_range(start, end); | ||
558 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | ||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
563 | { | ||
564 | pte_t *pte = (pte_t *)data; | ||
565 | |||
566 | stage2_set_pte(kvm, NULL, gpa, pte, false); | ||
567 | } | ||
568 | |||
569 | |||
570 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
571 | { | ||
572 | unsigned long end = hva + PAGE_SIZE; | ||
573 | pte_t stage2_pte; | ||
574 | |||
575 | if (!kvm->arch.pgd) | ||
576 | return; | ||
577 | |||
578 | trace_kvm_set_spte_hva(hva); | ||
579 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | ||
580 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | ||
581 | } | ||
582 | |||
583 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | ||
584 | { | ||
585 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | ||
586 | } | ||
587 | |||
225 | phys_addr_t kvm_mmu_get_httbr(void) | 588 | phys_addr_t kvm_mmu_get_httbr(void) |
226 | { | 589 | { |
227 | VM_BUG_ON(!virt_addr_valid(hyp_pgd)); | 590 | VM_BUG_ON(!virt_addr_valid(hyp_pgd)); |
@@ -230,7 +593,12 @@ phys_addr_t kvm_mmu_get_httbr(void) | |||
230 | 593 | ||
231 | int kvm_mmu_init(void) | 594 | int kvm_mmu_init(void) |
232 | { | 595 | { |
233 | return hyp_pgd ? 0 : -ENOMEM; | 596 | if (!hyp_pgd) { |
597 | kvm_err("Hyp mode PGD not allocated\n"); | ||
598 | return -ENOMEM; | ||
599 | } | ||
600 | |||
601 | return 0; | ||
234 | } | 602 | } |
235 | 603 | ||
236 | /** | 604 | /** |