aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2014-01-15 07:50:23 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2014-03-02 20:15:22 -0500
commit9d218a1fcf4c6b759d442ef702842fae92e1ea61 (patch)
tree1d94bc1753a84945ef1d31c4e162c660829c2875
parenta3c8bd31af260a17d626514f636849ee1cd1f63e (diff)
arm64: KVM: flush VM pages before letting the guest enable caches
When the guest runs with caches disabled (like in an early boot sequence, for example), all the writes are diectly going to RAM, bypassing the caches altogether. Once the MMU and caches are enabled, whatever sits in the cache becomes suddenly visible, which isn't what the guest expects. A way to avoid this potential disaster is to invalidate the cache when the MMU is being turned on. For this, we hook into the SCTLR_EL1 trapping code, and scan the stage-2 page tables, invalidating the pages/sections that have already been mapped in. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/kvm/mmu.c93
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/kvm/sys_regs.c5
4 files changed, 101 insertions, 1 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 891afe78311a..eb85b81eea6f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -155,6 +155,8 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
155#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) 155#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
156#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 156#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
157 157
158void stage2_flush_vm(struct kvm *kvm);
159
158#endif /* !__ASSEMBLY__ */ 160#endif /* !__ASSEMBLY__ */
159 161
160#endif /* __ARM_KVM_MMU_H__ */ 162#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index c1c08b240f35..d7e998c6a08f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -187,6 +187,99 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
187 } 187 }
188} 188}
189 189
190static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
191 phys_addr_t addr, phys_addr_t end)
192{
193 pte_t *pte;
194
195 pte = pte_offset_kernel(pmd, addr);
196 do {
197 if (!pte_none(*pte)) {
198 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
199 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
200 }
201 } while (pte++, addr += PAGE_SIZE, addr != end);
202}
203
204static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
205 phys_addr_t addr, phys_addr_t end)
206{
207 pmd_t *pmd;
208 phys_addr_t next;
209
210 pmd = pmd_offset(pud, addr);
211 do {
212 next = kvm_pmd_addr_end(addr, end);
213 if (!pmd_none(*pmd)) {
214 if (kvm_pmd_huge(*pmd)) {
215 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
216 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
217 } else {
218 stage2_flush_ptes(kvm, pmd, addr, next);
219 }
220 }
221 } while (pmd++, addr = next, addr != end);
222}
223
224static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
225 phys_addr_t addr, phys_addr_t end)
226{
227 pud_t *pud;
228 phys_addr_t next;
229
230 pud = pud_offset(pgd, addr);
231 do {
232 next = kvm_pud_addr_end(addr, end);
233 if (!pud_none(*pud)) {
234 if (pud_huge(*pud)) {
235 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
236 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
237 } else {
238 stage2_flush_pmds(kvm, pud, addr, next);
239 }
240 }
241 } while (pud++, addr = next, addr != end);
242}
243
244static void stage2_flush_memslot(struct kvm *kvm,
245 struct kvm_memory_slot *memslot)
246{
247 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
248 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
249 phys_addr_t next;
250 pgd_t *pgd;
251
252 pgd = kvm->arch.pgd + pgd_index(addr);
253 do {
254 next = kvm_pgd_addr_end(addr, end);
255 stage2_flush_puds(kvm, pgd, addr, next);
256 } while (pgd++, addr = next, addr != end);
257}
258
259/**
260 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
261 * @kvm: The struct kvm pointer
262 *
263 * Go through the stage 2 page tables and invalidate any cache lines
264 * backing memory already mapped to the VM.
265 */
266void stage2_flush_vm(struct kvm *kvm)
267{
268 struct kvm_memslots *slots;
269 struct kvm_memory_slot *memslot;
270 int idx;
271
272 idx = srcu_read_lock(&kvm->srcu);
273 spin_lock(&kvm->mmu_lock);
274
275 slots = kvm_memslots(kvm);
276 kvm_for_each_memslot(memslot, slots)
277 stage2_flush_memslot(kvm, memslot);
278
279 spin_unlock(&kvm->mmu_lock);
280 srcu_read_unlock(&kvm->srcu, idx);
281}
282
190/** 283/**
191 * free_boot_hyp_pgd - free HYP boot page tables 284 * free_boot_hyp_pgd - free HYP boot page tables
192 * 285 *
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 00c0cc8b8045..7d29847a893b 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -150,5 +150,7 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
150 150
151#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 151#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
152 152
153void stage2_flush_vm(struct kvm *kvm);
154
153#endif /* __ASSEMBLY__ */ 155#endif /* __ASSEMBLY__ */
154#endif /* __ARM64_KVM_MMU_H__ */ 156#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2097e5ecba42..03244582bc55 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -27,6 +27,7 @@
27#include <asm/kvm_host.h> 27#include <asm/kvm_host.h>
28#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h> 29#include <asm/kvm_coproc.h>
30#include <asm/kvm_mmu.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
31#include <asm/cputype.h> 32#include <asm/cputype.h>
32#include <trace/events/kvm.h> 33#include <trace/events/kvm.h>
@@ -154,8 +155,10 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
154{ 155{
155 access_vm_reg(vcpu, p, r); 156 access_vm_reg(vcpu, p, r);
156 157
157 if (vcpu_has_cache_enabled(vcpu)) /* MMU+Caches enabled? */ 158 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
158 vcpu->arch.hcr_el2 &= ~HCR_TVM; 159 vcpu->arch.hcr_el2 &= ~HCR_TVM;
160 stage2_flush_vm(vcpu->kvm);
161 }
159 162
160 return true; 163 return true;
161} 164}