diff options
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 1 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 7 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 65 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 1 |
4 files changed, 74 insertions, 0 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index f867060035ec..63e0ecc04901 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | |||
52 | void free_boot_hyp_pgd(void); | 52 | void free_boot_hyp_pgd(void); |
53 | void free_hyp_pgds(void); | 53 | void free_hyp_pgds(void); |
54 | 54 | ||
55 | void stage2_unmap_vm(struct kvm *kvm); | ||
55 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | 56 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
56 | void kvm_free_stage2_pgd(struct kvm *kvm); | 57 | void kvm_free_stage2_pgd(struct kvm *kvm); |
57 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 58 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 4043769583e7..da87c07d8577 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -701,6 +701,13 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, | |||
701 | if (ret) | 701 | if (ret) |
702 | return ret; | 702 | return ret; |
703 | 703 | ||
704 | /* | ||
705 | * Ensure a rebooted VM will fault in RAM pages and detect if the | ||
706 | * guest MMU is turned off and flush the caches as needed. | ||
707 | */ | ||
708 | if (vcpu->arch.has_run_once) | ||
709 | stage2_unmap_vm(vcpu->kvm); | ||
710 | |||
704 | vcpu_reset_hcr(vcpu); | 711 | vcpu_reset_hcr(vcpu); |
705 | 712 | ||
706 | /* | 713 | /* |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index f2a9874ff5cb..3756dd3e85c2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -611,6 +611,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |||
611 | unmap_range(kvm, kvm->arch.pgd, start, size); | 611 | unmap_range(kvm, kvm->arch.pgd, start, size); |
612 | } | 612 | } |
613 | 613 | ||
614 | static void stage2_unmap_memslot(struct kvm *kvm, | ||
615 | struct kvm_memory_slot *memslot) | ||
616 | { | ||
617 | hva_t hva = memslot->userspace_addr; | ||
618 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | ||
619 | phys_addr_t size = PAGE_SIZE * memslot->npages; | ||
620 | hva_t reg_end = hva + size; | ||
621 | |||
622 | /* | ||
623 | * A memory region could potentially cover multiple VMAs, and any holes | ||
624 | * between them, so iterate over all of them to find out if we should | ||
625 | * unmap any of them. | ||
626 | * | ||
627 | * +--------------------------------------------+ | ||
628 | * +---------------+----------------+ +----------------+ | ||
629 | * | : VMA 1 | VMA 2 | | VMA 3 : | | ||
630 | * +---------------+----------------+ +----------------+ | ||
631 | * | memory region | | ||
632 | * +--------------------------------------------+ | ||
633 | */ | ||
634 | do { | ||
635 | struct vm_area_struct *vma = find_vma(current->mm, hva); | ||
636 | hva_t vm_start, vm_end; | ||
637 | |||
638 | if (!vma || vma->vm_start >= reg_end) | ||
639 | break; | ||
640 | |||
641 | /* | ||
642 | * Take the intersection of this VMA with the memory region | ||
643 | */ | ||
644 | vm_start = max(hva, vma->vm_start); | ||
645 | vm_end = min(reg_end, vma->vm_end); | ||
646 | |||
647 | if (!(vma->vm_flags & VM_PFNMAP)) { | ||
648 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | ||
649 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); | ||
650 | } | ||
651 | hva = vm_end; | ||
652 | } while (hva < reg_end); | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | ||
657 | * @kvm: The struct kvm pointer | ||
658 | * | ||
659 | * Go through the memregions and unmap any reguler RAM | ||
660 | * backing memory already mapped to the VM. | ||
661 | */ | ||
662 | void stage2_unmap_vm(struct kvm *kvm) | ||
663 | { | ||
664 | struct kvm_memslots *slots; | ||
665 | struct kvm_memory_slot *memslot; | ||
666 | int idx; | ||
667 | |||
668 | idx = srcu_read_lock(&kvm->srcu); | ||
669 | spin_lock(&kvm->mmu_lock); | ||
670 | |||
671 | slots = kvm_memslots(kvm); | ||
672 | kvm_for_each_memslot(memslot, slots) | ||
673 | stage2_unmap_memslot(kvm, memslot); | ||
674 | |||
675 | spin_unlock(&kvm->mmu_lock); | ||
676 | srcu_read_unlock(&kvm->srcu, idx); | ||
677 | } | ||
678 | |||
614 | /** | 679 | /** |
615 | * kvm_free_stage2_pgd - free all stage-2 tables | 680 | * kvm_free_stage2_pgd - free all stage-2 tables |
616 | * @kvm: The KVM struct pointer for the VM. | 681 | * @kvm: The KVM struct pointer for the VM. |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 123b521a9908..14a74f136272 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -83,6 +83,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | |||
83 | void free_boot_hyp_pgd(void); | 83 | void free_boot_hyp_pgd(void); |
84 | void free_hyp_pgds(void); | 84 | void free_hyp_pgds(void); |
85 | 85 | ||
86 | void stage2_unmap_vm(struct kvm *kvm); | ||
86 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | 87 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
87 | void kvm_free_stage2_pgd(struct kvm *kvm); | 88 | void kvm_free_stage2_pgd(struct kvm *kvm); |
88 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 89 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |