aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2014-12-19 11:48:06 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-29 17:24:56 -0500
commit363ef89f8e9bcedc28b976d0fe2d858fe139c122 (patch)
tree55a6c32ba12f3716892b78c5ac769b6ed2d488d5 /arch/arm/include/asm
parent3c1e716508335eb132c9349cb1a1716c8f7e3d2e (diff)
arm/arm64: KVM: Invalidate data cache on unmap
Let's assume a guest has created an uncached mapping, and written to that page. Let's also assume that the host uses a cache-coherent IO subsystem. Let's finally assume that the host is under memory pressure and starts to swap things out. Before this "uncached" page is evicted, we need to make sure we invalidate potential speculated, clean cache lines that are sitting there, or the IO subsystem is going to swap out the cached view, loosing the data that has been written directly into memory. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 286644c729ba..552c31f5a3f7 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
44 44
45#ifndef __ASSEMBLY__ 45#ifndef __ASSEMBLY__
46 46
47#include <linux/highmem.h>
47#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
48#include <asm/pgalloc.h> 49#include <asm/pgalloc.h>
49 50
@@ -188,6 +189,36 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
188 } 189 }
189} 190}
190 191
192static inline void __kvm_flush_dcache_pte(pte_t pte)
193{
194 void *va = kmap_atomic(pte_page(pte));
195
196 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
197
198 kunmap_atomic(va);
199}
200
201static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
202{
203 unsigned long size = PMD_SIZE;
204 pfn_t pfn = pmd_pfn(pmd);
205
206 while (size) {
207 void *va = kmap_atomic_pfn(pfn);
208
209 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
210
211 pfn++;
212 size -= PAGE_SIZE;
213
214 kunmap_atomic(va);
215 }
216}
217
218static inline void __kvm_flush_dcache_pud(pud_t pud)
219{
220}
221
191#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 222#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
192 223
193void kvm_set_way_flush(struct kvm_vcpu *vcpu); 224void kvm_set_way_flush(struct kvm_vcpu *vcpu);