aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-01-25 07:29:59 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-12 00:41:47 -0500
commitac4c8fcf5ebceb0a2b6342707e4835a13f5bcf22 (patch)
treeaa65000ba16b48e61de808708aa30de7fabd7304 /arch/arm/include
parentbfb55d4087cfc4346b1ec5a04cbac04e9df10ab4 (diff)
arm/arm64: KVM: Enforce unconditional flush to PoC when mapping to stage-2
commit 8f36ebaf21fdae99c091c67e8b6fab33969f2667 upstream. When we fault in a page, we flush it to the PoC (Point of Coherency) if the faulting vcpu has its own caches off, so that it can observe the page we just brought it. But if the vcpu has its caches on, we skip that step. Bad things happen when *another* vcpu tries to access that page with its own caches disabled. At that point, there is no garantee that the data has made it to the PoC, and we access stale data. The obvious fix is to always flush to PoC when a page is faulted in, no matter what the state of the vcpu is. Fixes: 2d58b733c876 ("arm64: KVM: force cache clean on page fault when caches are off") Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h9
1 files changed, 1 insertions, 8 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 74a44727f8e1..a58bbaa3ec60 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -150,18 +150,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
150 * and iterate over the range. 150 * and iterate over the range.
151 */ 151 */
152 152
153 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
154
155 VM_BUG_ON(size & ~PAGE_MASK); 153 VM_BUG_ON(size & ~PAGE_MASK);
156 154
157 if (!need_flush && !icache_is_pipt())
158 goto vipt_cache;
159
160 while (size) { 155 while (size) {
161 void *va = kmap_atomic_pfn(pfn); 156 void *va = kmap_atomic_pfn(pfn);
162 157
163 if (need_flush) 158 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
164 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
165 159
166 if (icache_is_pipt()) 160 if (icache_is_pipt())
167 __cpuc_coherent_user_range((unsigned long)va, 161 __cpuc_coherent_user_range((unsigned long)va,
@@ -173,7 +167,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
173 kunmap_atomic(va); 167 kunmap_atomic(va);
174 } 168 }
175 169
176vipt_cache:
177 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { 170 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
178 /* any kind of VIPT cache */ 171 /* any kind of VIPT cache */
179 __flush_icache_all(); 172 __flush_icache_all();