aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r--arch/arm/kvm/mmu.c92
1 files changed, 86 insertions, 6 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8664ff17cbbe..1dc9778a00af 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -612,6 +612,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
612 unmap_range(kvm, kvm->arch.pgd, start, size); 612 unmap_range(kvm, kvm->arch.pgd, start, size);
613} 613}
614 614
615static void stage2_unmap_memslot(struct kvm *kvm,
616 struct kvm_memory_slot *memslot)
617{
618 hva_t hva = memslot->userspace_addr;
619 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
620 phys_addr_t size = PAGE_SIZE * memslot->npages;
621 hva_t reg_end = hva + size;
622
623 /*
624 * A memory region could potentially cover multiple VMAs, and any holes
625 * between them, so iterate over all of them to find out if we should
626 * unmap any of them.
627 *
628 * +--------------------------------------------+
629 * +---------------+----------------+ +----------------+
630 * | : VMA 1 | VMA 2 | | VMA 3 : |
631 * +---------------+----------------+ +----------------+
632 * | memory region |
633 * +--------------------------------------------+
634 */
635 do {
636 struct vm_area_struct *vma = find_vma(current->mm, hva);
637 hva_t vm_start, vm_end;
638
639 if (!vma || vma->vm_start >= reg_end)
640 break;
641
642 /*
643 * Take the intersection of this VMA with the memory region
644 */
645 vm_start = max(hva, vma->vm_start);
646 vm_end = min(reg_end, vma->vm_end);
647
648 if (!(vma->vm_flags & VM_PFNMAP)) {
649 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
650 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
651 }
652 hva = vm_end;
653 } while (hva < reg_end);
654}
655
656/**
657 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
658 * @kvm: The struct kvm pointer
659 *
660 * Go through the memregions and unmap any reguler RAM
661 * backing memory already mapped to the VM.
662 */
663void stage2_unmap_vm(struct kvm *kvm)
664{
665 struct kvm_memslots *slots;
666 struct kvm_memory_slot *memslot;
667 int idx;
668
669 idx = srcu_read_lock(&kvm->srcu);
670 spin_lock(&kvm->mmu_lock);
671
672 slots = kvm_memslots(kvm);
673 kvm_for_each_memslot(memslot, slots)
674 stage2_unmap_memslot(kvm, memslot);
675
676 spin_unlock(&kvm->mmu_lock);
677 srcu_read_unlock(&kvm->srcu, idx);
678}
679
615/** 680/**
616 * kvm_free_stage2_pgd - free all stage-2 tables 681 * kvm_free_stage2_pgd - free all stage-2 tables
617 * @kvm: The KVM struct pointer for the VM. 682 * @kvm: The KVM struct pointer for the VM.
@@ -853,6 +918,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
853 struct vm_area_struct *vma; 918 struct vm_area_struct *vma;
854 pfn_t pfn; 919 pfn_t pfn;
855 pgprot_t mem_type = PAGE_S2; 920 pgprot_t mem_type = PAGE_S2;
921 bool fault_ipa_uncached;
856 922
857 write_fault = kvm_is_write_fault(vcpu); 923 write_fault = kvm_is_write_fault(vcpu);
858 if (fault_status == FSC_PERM && !write_fault) { 924 if (fault_status == FSC_PERM && !write_fault) {
@@ -919,6 +985,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
919 if (!hugetlb && !force_pte) 985 if (!hugetlb && !force_pte)
920 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 986 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
921 987
988 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
989
922 if (hugetlb) { 990 if (hugetlb) {
923 pmd_t new_pmd = pfn_pmd(pfn, mem_type); 991 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
924 new_pmd = pmd_mkhuge(new_pmd); 992 new_pmd = pmd_mkhuge(new_pmd);
@@ -926,7 +994,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
926 kvm_set_s2pmd_writable(&new_pmd); 994 kvm_set_s2pmd_writable(&new_pmd);
927 kvm_set_pfn_dirty(pfn); 995 kvm_set_pfn_dirty(pfn);
928 } 996 }
929 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); 997 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
998 fault_ipa_uncached);
930 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 999 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
931 } else { 1000 } else {
932 pte_t new_pte = pfn_pte(pfn, mem_type); 1001 pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -934,7 +1003,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
934 kvm_set_s2pte_writable(&new_pte); 1003 kvm_set_s2pte_writable(&new_pte);
935 kvm_set_pfn_dirty(pfn); 1004 kvm_set_pfn_dirty(pfn);
936 } 1005 }
937 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); 1006 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1007 fault_ipa_uncached);
938 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 1008 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
939 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); 1009 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
940 } 1010 }
@@ -1294,11 +1364,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1294 hva = vm_end; 1364 hva = vm_end;
1295 } while (hva < reg_end); 1365 } while (hva < reg_end);
1296 1366
1297 if (ret) { 1367 spin_lock(&kvm->mmu_lock);
1298 spin_lock(&kvm->mmu_lock); 1368 if (ret)
1299 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); 1369 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1300 spin_unlock(&kvm->mmu_lock); 1370 else
1301 } 1371 stage2_flush_memslot(kvm, memslot);
1372 spin_unlock(&kvm->mmu_lock);
1302 return ret; 1373 return ret;
1303} 1374}
1304 1375
@@ -1310,6 +1381,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1310int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 1381int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1311 unsigned long npages) 1382 unsigned long npages)
1312{ 1383{
1384 /*
1385 * Readonly memslots are not incoherent with the caches by definition,
1386 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1387 * that the guest may consider devices and hence map as uncached.
1388 * To prevent incoherency issues in these cases, tag all readonly
1389 * regions as incoherent.
1390 */
1391 if (slot->flags & KVM_MEM_READONLY)
1392 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1313 return 0; 1393 return 0;
1314} 1394}
1315 1395