summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-10-23 12:11:19 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2018-01-08 09:20:45 -0500
commitd0e22b4ac3ba23c611739f554392bf5e217df49f (patch)
tree33dd9a41741e6d8b653de9d715b6b277a2424c63 /virt
parentfefb876b9b96fa7e4ed3d906979ea45b4cf07349 (diff)
KVM: arm/arm64: Limit icache invalidation to prefetch aborts
We've so far eagerly invalidated the icache, no matter how the page was faulted in (data or prefetch abort). But we can easily track execution by setting the XN bits in the S2 page tables, get the prefetch abort at HYP and perform the icache invalidation at that time only. As for most VMs, the instruction working set is pretty small compared to the data set, this is likely to save some traffic (specially as the invalidation is broadcast). Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/mmu.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 2174244f6317..0417c8e2a81c 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1292,7 +1292,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1292 unsigned long fault_status) 1292 unsigned long fault_status)
1293{ 1293{
1294 int ret; 1294 int ret;
1295 bool write_fault, writable, hugetlb = false, force_pte = false; 1295 bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
1296 unsigned long mmu_seq; 1296 unsigned long mmu_seq;
1297 gfn_t gfn = fault_ipa >> PAGE_SHIFT; 1297 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1298 struct kvm *kvm = vcpu->kvm; 1298 struct kvm *kvm = vcpu->kvm;
@@ -1304,7 +1304,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1304 unsigned long flags = 0; 1304 unsigned long flags = 0;
1305 1305
1306 write_fault = kvm_is_write_fault(vcpu); 1306 write_fault = kvm_is_write_fault(vcpu);
1307 if (fault_status == FSC_PERM && !write_fault) { 1307 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1308 VM_BUG_ON(write_fault && exec_fault);
1309
1310 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1308 kvm_err("Unexpected L2 read permission error\n"); 1311 kvm_err("Unexpected L2 read permission error\n");
1309 return -EFAULT; 1312 return -EFAULT;
1310 } 1313 }
@@ -1398,7 +1401,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1398 kvm_set_pfn_dirty(pfn); 1401 kvm_set_pfn_dirty(pfn);
1399 } 1402 }
1400 clean_dcache_guest_page(vcpu, pfn, PMD_SIZE); 1403 clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
1401 invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE); 1404
1405 if (exec_fault) {
1406 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1407 invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
1408 }
1402 1409
1403 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1410 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1404 } else { 1411 } else {
@@ -1410,7 +1417,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1410 mark_page_dirty(kvm, gfn); 1417 mark_page_dirty(kvm, gfn);
1411 } 1418 }
1412 clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE); 1419 clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
1413 invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE); 1420
1421 if (exec_fault) {
1422 new_pte = kvm_s2pte_mkexec(new_pte);
1423 invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
1424 }
1414 1425
1415 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); 1426 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1416 } 1427 }