aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2014-01-14 14:13:10 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2014-03-02 20:15:20 -0500
commit2d58b733c87689d3d5144e4ac94ea861cc729145 (patch)
treeedd6af8c4b9f314684b372712babfb8abb323450 /arch/arm64
parent1b385cbdd74aa803e966e01e5fe49490d6044e30 (diff)
arm64: KVM: force cache clean on page fault when caches are off
In order for the guest with caches off to observe data written contained in a given page, we need to make sure that page is committed to memory, and not just hanging in the cache (as guest accesses are completely bypassing the cache until it decides to enable it). For this purpose, hook into the coherent_icache_guest_page function and flush the region if the guest SCTLR_EL1 register doesn't show the MMU and caches as being enabled. The function also get renamed to coherent_cache_guest_page. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h16
1 files changed, 12 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7f1f9408ff66..6eaf69b5e42c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -106,7 +106,6 @@ static inline bool kvm_is_write_fault(unsigned long esr)
106 return true; 106 return true;
107} 107}
108 108
109static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
110static inline void kvm_clean_pgd(pgd_t *pgd) {} 109static inline void kvm_clean_pgd(pgd_t *pgd) {}
111static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
112static inline void kvm_clean_pte(pte_t *pte) {} 111static inline void kvm_clean_pte(pte_t *pte) {}
@@ -124,9 +123,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
124 123
125struct kvm; 124struct kvm;
126 125
127static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, 126#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
128 unsigned long size) 127
128static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
129{ 129{
130 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
131}
132
133static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
134 unsigned long size)
135{
136 if (!vcpu_has_cache_enabled(vcpu))
137 kvm_flush_dcache_to_poc((void *)hva, size);
138
130 if (!icache_is_aliasing()) { /* PIPT */ 139 if (!icache_is_aliasing()) { /* PIPT */
131 flush_icache_range(hva, hva + size); 140 flush_icache_range(hva, hva + size);
132 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 141 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
@@ -135,7 +144,6 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
135 } 144 }
136} 145}
137 146
138#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
139#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 147#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
140 148
141#endif /* __ASSEMBLY__ */ 149#endif /* __ASSEMBLY__ */