diff options
author | Suzuki K Poulose <suzuki.poulose@arm.com> | 2016-03-22 14:56:21 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2016-04-21 08:58:16 -0400 |
commit | 64f324979210d4064adf64f19da40c125c9dd137 (patch) | |
tree | 7cdbfd6440ca7439bea9aaa3cf59035992ef7a55 /arch/arm/kvm | |
parent | 70fd19068573e449d47eb2daa69cf5db541ef4f5 (diff) |
kvm-arm: Add explicit hyp page table modifiers
We have common routines to modify hyp and stage2 page tables
based on the 'kvm' parameter. For a smoother transition to
using separate routines for each, duplicate the routines
and modify the copy to work on hyp.
Marks the forked routines with _hyp_ and gets rid of the
kvm parameter which is no longer needed and is NULL for hyp.
Also, gets rid of calls to kvm_tlb_flush_by_vmid_ipa() calls
from the hyp versions. Uses explicit host page table accessors
instead of the kvm_* page table helpers.
Suggested-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/mmu.c | 104 |
1 files changed, 99 insertions, 5 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index f93f717b5d8b..af526f67022c 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -388,6 +388,100 @@ static void stage2_flush_vm(struct kvm *kvm) | |||
388 | srcu_read_unlock(&kvm->srcu, idx); | 388 | srcu_read_unlock(&kvm->srcu, idx); |
389 | } | 389 | } |
390 | 390 | ||
391 | static void clear_hyp_pgd_entry(pgd_t *pgd) | ||
392 | { | ||
393 | pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL); | ||
394 | pgd_clear(pgd); | ||
395 | pud_free(NULL, pud_table); | ||
396 | put_page(virt_to_page(pgd)); | ||
397 | } | ||
398 | |||
399 | static void clear_hyp_pud_entry(pud_t *pud) | ||
400 | { | ||
401 | pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0); | ||
402 | VM_BUG_ON(pud_huge(*pud)); | ||
403 | pud_clear(pud); | ||
404 | pmd_free(NULL, pmd_table); | ||
405 | put_page(virt_to_page(pud)); | ||
406 | } | ||
407 | |||
408 | static void clear_hyp_pmd_entry(pmd_t *pmd) | ||
409 | { | ||
410 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | ||
411 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); | ||
412 | pmd_clear(pmd); | ||
413 | pte_free_kernel(NULL, pte_table); | ||
414 | put_page(virt_to_page(pmd)); | ||
415 | } | ||
416 | |||
417 | static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) | ||
418 | { | ||
419 | pte_t *pte, *start_pte; | ||
420 | |||
421 | start_pte = pte = pte_offset_kernel(pmd, addr); | ||
422 | do { | ||
423 | if (!pte_none(*pte)) { | ||
424 | kvm_set_pte(pte, __pte(0)); | ||
425 | put_page(virt_to_page(pte)); | ||
426 | } | ||
427 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
428 | |||
429 | if (hyp_pte_table_empty(start_pte)) | ||
430 | clear_hyp_pmd_entry(pmd); | ||
431 | } | ||
432 | |||
433 | static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) | ||
434 | { | ||
435 | phys_addr_t next; | ||
436 | pmd_t *pmd, *start_pmd; | ||
437 | |||
438 | start_pmd = pmd = pmd_offset(pud, addr); | ||
439 | do { | ||
440 | next = pmd_addr_end(addr, end); | ||
441 | /* Hyp doesn't use huge pmds */ | ||
442 | if (!pmd_none(*pmd)) | ||
443 | unmap_hyp_ptes(pmd, addr, next); | ||
444 | } while (pmd++, addr = next, addr != end); | ||
445 | |||
446 | if (hyp_pmd_table_empty(start_pmd)) | ||
447 | clear_hyp_pud_entry(pud); | ||
448 | } | ||
449 | |||
450 | static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) | ||
451 | { | ||
452 | phys_addr_t next; | ||
453 | pud_t *pud, *start_pud; | ||
454 | |||
455 | start_pud = pud = pud_offset(pgd, addr); | ||
456 | do { | ||
457 | next = pud_addr_end(addr, end); | ||
458 | /* Hyp doesn't use huge puds */ | ||
459 | if (!pud_none(*pud)) | ||
460 | unmap_hyp_pmds(pud, addr, next); | ||
461 | } while (pud++, addr = next, addr != end); | ||
462 | |||
463 | if (hyp_pud_table_empty(start_pud)) | ||
464 | clear_hyp_pgd_entry(pgd); | ||
465 | } | ||
466 | |||
467 | static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) | ||
468 | { | ||
469 | pgd_t *pgd; | ||
470 | phys_addr_t addr = start, end = start + size; | ||
471 | phys_addr_t next; | ||
472 | |||
473 | /* | ||
474 | * We don't unmap anything from HYP, except at the hyp tear down. | ||
475 | * Hence, we don't have to invalidate the TLBs here. | ||
476 | */ | ||
477 | pgd = pgdp + pgd_index(addr); | ||
478 | do { | ||
479 | next = pgd_addr_end(addr, end); | ||
480 | if (!pgd_none(*pgd)) | ||
481 | unmap_hyp_puds(pgd, addr, next); | ||
482 | } while (pgd++, addr = next, addr != end); | ||
483 | } | ||
484 | |||
391 | /** | 485 | /** |
392 | * free_boot_hyp_pgd - free HYP boot page tables | 486 | * free_boot_hyp_pgd - free HYP boot page tables |
393 | * | 487 | * |
@@ -398,14 +492,14 @@ void free_boot_hyp_pgd(void) | |||
398 | mutex_lock(&kvm_hyp_pgd_mutex); | 492 | mutex_lock(&kvm_hyp_pgd_mutex); |
399 | 493 | ||
400 | if (boot_hyp_pgd) { | 494 | if (boot_hyp_pgd) { |
401 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); | 495 | unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
402 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 496 | unmap_hyp_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
403 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); | 497 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); |
404 | boot_hyp_pgd = NULL; | 498 | boot_hyp_pgd = NULL; |
405 | } | 499 | } |
406 | 500 | ||
407 | if (hyp_pgd) | 501 | if (hyp_pgd) |
408 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 502 | unmap_hyp_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
409 | 503 | ||
410 | mutex_unlock(&kvm_hyp_pgd_mutex); | 504 | mutex_unlock(&kvm_hyp_pgd_mutex); |
411 | } | 505 | } |
@@ -430,9 +524,9 @@ void free_hyp_pgds(void) | |||
430 | 524 | ||
431 | if (hyp_pgd) { | 525 | if (hyp_pgd) { |
432 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | 526 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
433 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 527 | unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
434 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) | 528 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
435 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 529 | unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
436 | 530 | ||
437 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); | 531 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
438 | hyp_pgd = NULL; | 532 | hyp_pgd = NULL; |