aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-11-17 09:58:53 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2014-11-25 08:57:27 -0500
commit849260c72c6b8bd53850cb00b80027db3a273c2c (patch)
treece059d25111bf862c2da787adecf2d765ec0af01 /arch
parent840f4bfbe03f1ce94ade8fdf84e8cd925ef15a48 (diff)
arm, arm64: KVM: handle potential incoherency of readonly memslots
Readonly memslots are often used to implement emulation of ROMs and NOR flashes, in which case the guest may legally map these regions as uncached. To deal with the incoherency associated with uncached guest mappings, treat all readonly memslots as incoherent, and ensure that pages that belong to regions tagged as such are flushed to DRAM before being passed to the guest. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kvm/mmu.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index cb924c6d56a6..f2a9874ff5cb 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -919,7 +919,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
919 if (!hugetlb && !force_pte) 919 if (!hugetlb && !force_pte)
920 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 920 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
921 921
922 fault_ipa_uncached = false; 922 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
923 923
924 if (hugetlb) { 924 if (hugetlb) {
925 pmd_t new_pmd = pfn_pmd(pfn, mem_type); 925 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
@@ -1298,11 +1298,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1298 hva = vm_end; 1298 hva = vm_end;
1299 } while (hva < reg_end); 1299 } while (hva < reg_end);
1300 1300
1301 if (ret) { 1301 spin_lock(&kvm->mmu_lock);
1302 spin_lock(&kvm->mmu_lock); 1302 if (ret)
1303 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); 1303 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1304 spin_unlock(&kvm->mmu_lock); 1304 else
1305 } 1305 stage2_flush_memslot(kvm, memslot);
1306 spin_unlock(&kvm->mmu_lock);
1306 return ret; 1307 return ret;
1307} 1308}
1308 1309
@@ -1314,6 +1315,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1314int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 1315int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1315 unsigned long npages) 1316 unsigned long npages)
1316{ 1317{
1318 /*
1319 * Readonly memslots are not incoherent with the caches by definition,
1320 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1321 * that the guest may consider devices and hence map as uncached.
1322 * To prevent incoherency issues in these cases, tag all readonly
1323 * regions as incoherent.
1324 */
1325 if (slot->flags & KVM_MEM_READONLY)
1326 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1317 return 0; 1327 return 0;
1318} 1328}
1319 1329