aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/mmu.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-10-10 11:00:32 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-10-13 06:36:53 -0400
commit8eef91239e57d2e932e7470879c9a504d5494ebb (patch)
tree6ae20eb63e0544a5166fdfd32643d9c1dbecb55b /arch/arm/kvm/mmu.c
parent4a513fb009b96cf3d86491e00565367ceec29073 (diff)
arm/arm64: KVM: map MMIO regions at creation time
There is really no point in faulting in memory regions page by page if they are not backed by demand paged system RAM but by a linear passthrough mapping of a host MMIO region. So instead, detect such regions at setup time and install the mappings for the backing all at once. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r--arch/arm/kvm/mmu.c77
1 files changed, 69 insertions, 8 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index a7eabd1c4287..6038027ab1d6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1134,13 +1134,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
1134 const struct kvm_memory_slot *old, 1134 const struct kvm_memory_slot *old,
1135 enum kvm_mr_change change) 1135 enum kvm_mr_change change)
1136{ 1136{
1137 gpa_t gpa = old->base_gfn << PAGE_SHIFT;
1138 phys_addr_t size = old->npages << PAGE_SHIFT;
1139 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1140 spin_lock(&kvm->mmu_lock);
1141 unmap_stage2_range(kvm, gpa, size);
1142 spin_unlock(&kvm->mmu_lock);
1143 }
1144} 1137}
1145 1138
1146int kvm_arch_prepare_memory_region(struct kvm *kvm, 1139int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1148,7 +1141,69 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1148 struct kvm_userspace_memory_region *mem, 1141 struct kvm_userspace_memory_region *mem,
1149 enum kvm_mr_change change) 1142 enum kvm_mr_change change)
1150{ 1143{
1151 return 0; 1144 hva_t hva = mem->userspace_addr;
1145 hva_t reg_end = hva + mem->memory_size;
1146 bool writable = !(mem->flags & KVM_MEM_READONLY);
1147 int ret = 0;
1148
1149 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1150 return 0;
1151
1152 /*
1153 * A memory region could potentially cover multiple VMAs, and any holes
1154 * between them, so iterate over all of them to find out if we can map
1155 * any of them right now.
1156 *
1157 * +--------------------------------------------+
1158 * +---------------+----------------+ +----------------+
1159 * | : VMA 1 | VMA 2 | | VMA 3 : |
1160 * +---------------+----------------+ +----------------+
1161 * | memory region |
1162 * +--------------------------------------------+
1163 */
1164 do {
1165 struct vm_area_struct *vma = find_vma(current->mm, hva);
1166 hva_t vm_start, vm_end;
1167
1168 if (!vma || vma->vm_start >= reg_end)
1169 break;
1170
1171 /*
1172 * Mapping a read-only VMA is only allowed if the
1173 * memory region is configured as read-only.
1174 */
1175 if (writable && !(vma->vm_flags & VM_WRITE)) {
1176 ret = -EPERM;
1177 break;
1178 }
1179
1180 /*
1181 * Take the intersection of this VMA with the memory region
1182 */
1183 vm_start = max(hva, vma->vm_start);
1184 vm_end = min(reg_end, vma->vm_end);
1185
1186 if (vma->vm_flags & VM_PFNMAP) {
1187 gpa_t gpa = mem->guest_phys_addr +
1188 (vm_start - mem->userspace_addr);
1189 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1190 vm_start - vma->vm_start;
1191
1192 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1193 vm_end - vm_start,
1194 writable);
1195 if (ret)
1196 break;
1197 }
1198 hva = vm_end;
1199 } while (hva < reg_end);
1200
1201 if (ret) {
1202 spin_lock(&kvm->mmu_lock);
1203 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1204 spin_unlock(&kvm->mmu_lock);
1205 }
1206 return ret;
1152} 1207}
1153 1208
1154void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 1209void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -1173,4 +1228,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
1173void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1228void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1174 struct kvm_memory_slot *slot) 1229 struct kvm_memory_slot *slot)
1175{ 1230{
1231 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1232 phys_addr_t size = slot->npages << PAGE_SHIFT;
1233
1234 spin_lock(&kvm->mmu_lock);
1235 unmap_stage2_range(kvm, gpa, size);
1236 spin_unlock(&kvm->mmu_lock);
1176} 1237}