aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/kernel/head.S18
-rw-r--r--arch/arm64/mm/cache.S23
3 files changed, 24 insertions, 18 deletions
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 4d4f650c290e..b4b43a94dffd 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -67,6 +67,7 @@
67 */ 67 */
68extern void flush_icache_range(unsigned long start, unsigned long end); 68extern void flush_icache_range(unsigned long start, unsigned long end);
69extern void __flush_dcache_area(void *addr, size_t len); 69extern void __flush_dcache_area(void *addr, size_t len);
70extern void __inval_dcache_area(void *addr, size_t len);
70extern void __clean_dcache_area_poc(void *addr, size_t len); 71extern void __clean_dcache_area_poc(void *addr, size_t len);
71extern void __clean_dcache_area_pou(void *addr, size_t len); 72extern void __clean_dcache_area_pou(void *addr, size_t len);
72extern long __flush_cache_user_range(unsigned long start, unsigned long end); 73extern long __flush_cache_user_range(unsigned long start, unsigned long end);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 973df7de7bf8..73a0531e0187 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -143,8 +143,8 @@ preserve_boot_args:
143 dmb sy // needed before dc ivac with 143 dmb sy // needed before dc ivac with
144 // MMU off 144 // MMU off
145 145
146 add x1, x0, #0x20 // 4 x 8 bytes 146 mov x1, #0x20 // 4 x 8 bytes
147 b __inval_cache_range // tail call 147 b __inval_dcache_area // tail call
148ENDPROC(preserve_boot_args) 148ENDPROC(preserve_boot_args)
149 149
150/* 150/*
@@ -221,20 +221,20 @@ __create_page_tables:
221 * dirty cache lines being evicted. 221 * dirty cache lines being evicted.
222 */ 222 */
223 adrp x0, idmap_pg_dir 223 adrp x0, idmap_pg_dir
224 adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 224 ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
225 bl __inval_cache_range 225 bl __inval_dcache_area
226 226
227 /* 227 /*
228 * Clear the idmap and swapper page tables. 228 * Clear the idmap and swapper page tables.
229 */ 229 */
230 adrp x0, idmap_pg_dir 230 adrp x0, idmap_pg_dir
231 adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 231 ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
2321: stp xzr, xzr, [x0], #16 2321: stp xzr, xzr, [x0], #16
233 stp xzr, xzr, [x0], #16 233 stp xzr, xzr, [x0], #16
234 stp xzr, xzr, [x0], #16 234 stp xzr, xzr, [x0], #16
235 stp xzr, xzr, [x0], #16 235 stp xzr, xzr, [x0], #16
236 cmp x0, x6 236 subs x1, x1, #64
237 b.lo 1b 237 b.ne 1b
238 238
239 mov x7, SWAPPER_MM_MMUFLAGS 239 mov x7, SWAPPER_MM_MMUFLAGS
240 240
@@ -307,9 +307,9 @@ __create_page_tables:
307 * tables again to remove any speculatively loaded cache lines. 307 * tables again to remove any speculatively loaded cache lines.
308 */ 308 */
309 adrp x0, idmap_pg_dir 309 adrp x0, idmap_pg_dir
310 adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 310 ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
311 dmb sy 311 dmb sy
312 bl __inval_cache_range 312 bl __inval_dcache_area
313 313
314 ret x28 314 ret x28
315ENDPROC(__create_page_tables) 315ENDPROC(__create_page_tables)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 83c27b6e6dca..ed47fbbb4b05 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -109,20 +109,25 @@ ENTRY(__clean_dcache_area_pou)
109ENDPROC(__clean_dcache_area_pou) 109ENDPROC(__clean_dcache_area_pou)
110 110
111/* 111/*
112 * __dma_inv_area(start, size) 112 * __inval_dcache_area(kaddr, size)
113 * - start - virtual start address of region 113 *
114 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
115 * are invalidated. Any partial lines at the ends of the interval are
116 * also cleaned to PoC to prevent data loss.
117 *
118 * - kaddr - kernel address
114 * - size - size in question 119 * - size - size in question
115 */ 120 */
116__dma_inv_area: 121ENTRY(__inval_dcache_area)
117 add x1, x1, x0
118 /* FALLTHROUGH */ 122 /* FALLTHROUGH */
119 123
120/* 124/*
121 * __inval_cache_range(start, end) 125 * __dma_inv_area(start, size)
122 * - start - start address of region 126 * - start - virtual start address of region
123 * - end - end address of region 127 * - size - size in question
124 */ 128 */
125ENTRY(__inval_cache_range) 129__dma_inv_area:
130 add x1, x1, x0
126 dcache_line_size x2, x3 131 dcache_line_size x2, x3
127 sub x3, x2, #1 132 sub x3, x2, #1
128 tst x1, x3 // end cache line aligned? 133 tst x1, x3 // end cache line aligned?
@@ -140,7 +145,7 @@ ENTRY(__inval_cache_range)
140 b.lo 2b 145 b.lo 2b
141 dsb sy 146 dsb sy
142 ret 147 ret
143ENDPIPROC(__inval_cache_range) 148ENDPIPROC(__inval_dcache_area)
144ENDPROC(__dma_inv_area) 149ENDPROC(__dma_inv_area)
145 150
146/* 151/*