aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/mmu.c
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-01-05 16:13:24 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-29 17:24:57 -0500
commit0d3e4d4fade6b04e933b11e69e80044f35e9cd60 (patch)
tree57b219954fcb587cd0bbf8b3a4abbe40c269a2da /arch/arm/kvm/mmu.c
parent363ef89f8e9bcedc28b976d0fe2d858fe139c122 (diff)
arm/arm64: KVM: Use kernel mapping to perform invalidation on page fault
When handling a fault in stage-2, we need to resync I$ and D$, just to be sure we don't leave any old cache line behind. That's very good, except that we do so using the *user* address. Under heavy load (swapping like crazy), we may end up in a situation where the page gets mapped in stage-2 while being unmapped from userspace by another CPU. At that point, the DC/IC instructions can generate a fault, which we handle with kvm->mmu_lock held. The box quickly deadlocks, user is unhappy. Instead, perform this invalidation through the kernel mapping, which is guaranteed to be present. The box is much happier, and so am I. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r--arch/arm/kvm/mmu.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 78e68abcb01f..136662547ca6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -957,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
957 return !pfn_valid(pfn); 957 return !pfn_valid(pfn);
958} 958}
959 959
960static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
961 unsigned long size, bool uncached)
962{
963 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
964}
965
960static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 966static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
961 struct kvm_memory_slot *memslot, unsigned long hva, 967 struct kvm_memory_slot *memslot, unsigned long hva,
962 unsigned long fault_status) 968 unsigned long fault_status)
@@ -1046,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1046 kvm_set_s2pmd_writable(&new_pmd); 1052 kvm_set_s2pmd_writable(&new_pmd);
1047 kvm_set_pfn_dirty(pfn); 1053 kvm_set_pfn_dirty(pfn);
1048 } 1054 }
1049 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, 1055 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
1050 fault_ipa_uncached);
1051 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1056 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1052 } else { 1057 } else {
1053 pte_t new_pte = pfn_pte(pfn, mem_type); 1058 pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1055,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1055 kvm_set_s2pte_writable(&new_pte); 1060 kvm_set_s2pte_writable(&new_pte);
1056 kvm_set_pfn_dirty(pfn); 1061 kvm_set_pfn_dirty(pfn);
1057 } 1062 }
1058 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, 1063 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
1059 fault_ipa_uncached);
1060 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 1064 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
1061 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); 1065 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
1062 } 1066 }