aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-01-05 16:13:24 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-29 17:24:57 -0500
commit0d3e4d4fade6b04e933b11e69e80044f35e9cd60 (patch)
tree57b219954fcb587cd0bbf8b3a4abbe40c269a2da /arch/arm/include
parent363ef89f8e9bcedc28b976d0fe2d858fe139c122 (diff)
arm/arm64: KVM: Use kernel mapping to perform invalidation on page fault
When handling a fault in stage-2, we need to resync I$ and D$, just to be sure we don't leave any old cache line behind. That's very good, except that we do so using the *user* address. Under heavy load (swapping like crazy), we may end up in a situation where the page gets mapped in stage-2 while being unmapped from userspace by another CPU. At that point, the DC/IC instructions can generate a fault, which we handle with kvm->mmu_lock held. The box quickly deadlocks, user is unhappy. Instead, perform this invalidation through the kernel mapping, which is guaranteed to be present. The box is much happier, and so am I. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h43
1 files changed, 34 insertions, 9 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 552c31f5a3f7..1bca8f8af442 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -162,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
162 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; 162 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
163} 163}
164 164
165static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 165static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
166 unsigned long size, 166 unsigned long size,
167 bool ipa_uncached) 167 bool ipa_uncached)
168{ 168{
169 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
170 kvm_flush_dcache_to_poc((void *)hva, size);
171
172 /* 169 /*
173 * If we are going to insert an instruction page and the icache is 170 * If we are going to insert an instruction page and the icache is
174 * either VIPT or PIPT, there is a potential problem where the host 171 * either VIPT or PIPT, there is a potential problem where the host
@@ -180,10 +177,38 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
180 * 177 *
181 * VIVT caches are tagged using both the ASID and the VMID and doesn't 178 * VIVT caches are tagged using both the ASID and the VMID and doesn't
182 * need any kind of flushing (DDI 0406C.b - Page B3-1392). 179 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
180 *
181 * We need to do this through a kernel mapping (using the
182 * user-space mapping has proved to be the wrong
183 * solution). For that, we need to kmap one page at a time,
184 * and iterate over the range.
183 */ 185 */
184 if (icache_is_pipt()) { 186
185 __cpuc_coherent_user_range(hva, hva + size); 187 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
186 } else if (!icache_is_vivt_asid_tagged()) { 188
189 VM_BUG_ON(size & PAGE_MASK);
190
191 if (!need_flush && !icache_is_pipt())
192 goto vipt_cache;
193
194 while (size) {
195 void *va = kmap_atomic_pfn(pfn);
196
197 if (need_flush)
198 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
199
200 if (icache_is_pipt())
201 __cpuc_coherent_user_range((unsigned long)va,
202 (unsigned long)va + PAGE_SIZE);
203
204 size -= PAGE_SIZE;
205 pfn++;
206
207 kunmap_atomic(va);
208 }
209
210vipt_cache:
211 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
187 /* any kind of VIPT cache */ 212 /* any kind of VIPT cache */
188 __flush_icache_all(); 213 __flush_icache_all();
189 } 214 }