aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-10-23 12:11:16 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2018-01-08 09:20:43 -0500
commit4fee94736603cd6fd83c1ea1ee0388d1d2dbe11b (patch)
tree733ee370f17b3840a30d9f11e162eb4a4786cf69 /arch/arm64
parenta15f693935a9f1fec8241cafaca27be4483d4464 (diff)
arm64: KVM: Add invalidate_icache_range helper
We currently tightly couple dcache clean with icache invalidation, but KVM could do without the initial flush to PoU, as we've already flushed things to PoC. Let's introduce invalidate_icache_range which is limited to invalidating the icache from the linear mapping (and thus has none of the userspace fault handling complexity), and wire it in KVM instead of flush_icache_range. Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/assembler.h21
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h4
-rw-r--r--arch/arm64/mm/cache.S32
4 files changed, 52 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index aef72d886677..0884e1fdfd30 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -388,6 +388,27 @@ alternative_endif
388 .endm 388 .endm
389 389
390/* 390/*
391 * Macro to perform an instruction cache maintenance for the interval
392 * [start, end)
393 *
394 * start, end: virtual addresses describing the region
395 * label: A label to branch to on user fault.
396 * Corrupts: tmp1, tmp2
397 */
398 .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
399 icache_line_size \tmp1, \tmp2
400 sub \tmp2, \tmp1, #1
401 bic \tmp2, \start, \tmp2
4029997:
403USER(\label, ic ivau, \tmp2) // invalidate I line PoU
404 add \tmp2, \tmp2, \tmp1
405 cmp \tmp2, \end
406 b.lo 9997b
407 dsb ish
408 isb
409 .endm
410
411/*
391 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present 412 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
392 */ 413 */
393 .macro reset_pmuserenr_el0, tmpreg 414 .macro reset_pmuserenr_el0, tmpreg
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 955130762a3c..bef9f418f089 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -52,6 +52,12 @@
52 * - start - virtual start address 52 * - start - virtual start address
53 * - end - virtual end address 53 * - end - virtual end address
54 * 54 *
55 * invalidate_icache_range(start, end)
56 *
57 * Invalidate the I-cache in the region described by start, end.
58 * - start - virtual start address
59 * - end - virtual end address
60 *
55 * __flush_cache_user_range(start, end) 61 * __flush_cache_user_range(start, end)
56 * 62 *
57 * Ensure coherency between the I-cache and the D-cache in the 63 * Ensure coherency between the I-cache and the D-cache in the
@@ -66,6 +72,7 @@
66 * - size - region size 72 * - size - region size
67 */ 73 */
68extern void flush_icache_range(unsigned long start, unsigned long end); 74extern void flush_icache_range(unsigned long start, unsigned long end);
75extern int invalidate_icache_range(unsigned long start, unsigned long end);
69extern void __flush_dcache_area(void *addr, size_t len); 76extern void __flush_dcache_area(void *addr, size_t len);
70extern void __inval_dcache_area(void *addr, size_t len); 77extern void __inval_dcache_area(void *addr, size_t len);
71extern void __clean_dcache_area_poc(void *addr, size_t len); 78extern void __clean_dcache_area_poc(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 8034b96fb3a4..56b3e03c85e7 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -250,8 +250,8 @@ static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
250 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 250 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
251 void *va = page_address(pfn_to_page(pfn)); 251 void *va = page_address(pfn_to_page(pfn));
252 252
253 flush_icache_range((unsigned long)va, 253 invalidate_icache_range((unsigned long)va,
254 (unsigned long)va + size); 254 (unsigned long)va + size);
255 } 255 }
256} 256}
257 257
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 7f1dbe962cf5..bedd23da83f4 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -60,16 +60,7 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
60 b.lo 1b 60 b.lo 1b
61 dsb ish 61 dsb ish
62 62
63 icache_line_size x2, x3 63 invalidate_icache_by_line x0, x1, x2, x3, 9f
64 sub x3, x2, #1
65 bic x4, x0, x3
661:
67USER(9f, ic ivau, x4 ) // invalidate I line PoU
68 add x4, x4, x2
69 cmp x4, x1
70 b.lo 1b
71 dsb ish
72 isb
73 mov x0, #0 64 mov x0, #0
741: 651:
75 uaccess_ttbr0_disable x1 66 uaccess_ttbr0_disable x1
@@ -81,6 +72,27 @@ ENDPROC(flush_icache_range)
81ENDPROC(__flush_cache_user_range) 72ENDPROC(__flush_cache_user_range)
82 73
83/* 74/*
75 * invalidate_icache_range(start,end)
76 *
77 * Ensure that the I cache is invalid within specified region.
78 *
79 * - start - virtual start address of region
80 * - end - virtual end address of region
81 */
82ENTRY(invalidate_icache_range)
83 uaccess_ttbr0_enable x2, x3
84
85 invalidate_icache_by_line x0, x1, x2, x3, 2f
86 mov x0, xzr
871:
88 uaccess_ttbr0_disable x1
89 ret
902:
91 mov x0, #-EFAULT
92 b 1b
93ENDPROC(invalidate_icache_range)
94
95/*
84 * __flush_dcache_area(kaddr, size) 96 * __flush_dcache_area(kaddr, size)
85 * 97 *
86 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 98 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)