aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/booting.txt10
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/mm/cache.S9
3 files changed, 45 insertions, 4 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index a9691cc48fe3..beb754e87c65 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -111,8 +111,14 @@ Before jumping into the kernel, the following conditions must be met:
111- Caches, MMUs 111- Caches, MMUs
112 The MMU must be off. 112 The MMU must be off.
113 Instruction cache may be on or off. 113 Instruction cache may be on or off.
114 Data cache must be off and invalidated. 114 The address range corresponding to the loaded kernel image must be
115 External caches (if present) must be configured and disabled. 115 cleaned to the PoC. In the presence of a system cache or other
116 coherent masters with caches enabled, this will typically require
117 cache maintenance by VA rather than set/way operations.
118 System caches which respect the architected cache maintenance by VA
119 operations must be configured and may be enabled.
120 System caches which do not respect architected cache maintenance by VA
121 operations (not recommended) must be configured and disabled.
116 122
117- Architected timers 123- Architected timers
118 CNTFRQ must be programmed with the timer frequency and CNTVOFF must 124 CNTFRQ must be programmed with the timer frequency and CNTVOFF must
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 61035d6814cb..26109682d2fa 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/assembler.h> 26#include <asm/assembler.h>
27#include <asm/ptrace.h> 27#include <asm/ptrace.h>
28#include <asm/asm-offsets.h> 28#include <asm/asm-offsets.h>
29#include <asm/cache.h>
29#include <asm/cputype.h> 30#include <asm/cputype.h>
30#include <asm/memory.h> 31#include <asm/memory.h>
31#include <asm/thread_info.h> 32#include <asm/thread_info.h>
@@ -229,7 +230,11 @@ ENTRY(set_cpu_boot_mode_flag)
229 cmp w20, #BOOT_CPU_MODE_EL2 230 cmp w20, #BOOT_CPU_MODE_EL2
230 b.ne 1f 231 b.ne 1f
231 add x1, x1, #4 232 add x1, x1, #4
2321: str w20, [x1] // This CPU has booted in EL1 2331: dc cvac, x1 // Clean potentially dirty cache line
234 dsb sy
235 str w20, [x1] // This CPU has booted in EL1
236 dc civac, x1 // Clean&invalidate potentially stale cache line
237 dsb sy
233 ret 238 ret
234ENDPROC(set_cpu_boot_mode_flag) 239ENDPROC(set_cpu_boot_mode_flag)
235 240
@@ -240,8 +245,9 @@ ENDPROC(set_cpu_boot_mode_flag)
240 * This is not in .bss, because we set it sufficiently early that the boot-time 245 * This is not in .bss, because we set it sufficiently early that the boot-time
241 * zeroing of .bss would clobber it. 246 * zeroing of .bss would clobber it.
242 */ 247 */
243 .pushsection .data 248 .pushsection .data..cacheline_aligned
244ENTRY(__boot_cpu_mode) 249ENTRY(__boot_cpu_mode)
250 .align L1_CACHE_SHIFT
245 .long BOOT_CPU_MODE_EL2 251 .long BOOT_CPU_MODE_EL2
246 .long 0 252 .long 0
247 .popsection 253 .popsection
@@ -408,6 +414,15 @@ ENDPROC(__calc_phys_offset)
408 */ 414 */
409__create_page_tables: 415__create_page_tables:
410 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses 416 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
417 mov x27, lr
418
419 /*
420 * Invalidate the idmap and swapper page tables to avoid potential
421 * dirty cache lines being evicted.
422 */
423 mov x0, x25
424 add x1, x26, #SWAPPER_DIR_SIZE
425 bl __inval_cache_range
411 426
412 /* 427 /*
413 * Clear the idmap and swapper page tables. 428 * Clear the idmap and swapper page tables.
@@ -470,6 +485,17 @@ __create_page_tables:
470 add x0, x26, #2 * PAGE_SIZE // section table address 485 add x0, x26, #2 * PAGE_SIZE // section table address
471 create_pgd_entry x26, x0, x5, x6, x7 486 create_pgd_entry x26, x0, x5, x6, x7
472#endif 487#endif
488
489 /*
490 * Since the page tables have been populated with non-cacheable
491 * accesses (MMU disabled), invalidate the idmap and swapper page
492 * tables again to remove any speculatively loaded cache lines.
493 */
494 mov x0, x25
495 add x1, x26, #SWAPPER_DIR_SIZE
496 bl __inval_cache_range
497
498 mov lr, x27
473 ret 499 ret
474ENDPROC(__create_page_tables) 500ENDPROC(__create_page_tables)
475 .ltorg 501 .ltorg
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index c46f48b33c14..e803a62e0e45 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -168,6 +168,14 @@ ENTRY(__flush_dcache_area)
168ENDPROC(__flush_dcache_area) 168ENDPROC(__flush_dcache_area)
169 169
170/* 170/*
171 * __inval_cache_range(start, end)
172 * - start - start address of region
173 * - end - end address of region
174 */
175ENTRY(__inval_cache_range)
176 /* FALLTHROUGH */
177
178/*
171 * __dma_inv_range(start, end) 179 * __dma_inv_range(start, end)
172 * - start - virtual start address of region 180 * - start - virtual start address of region
173 * - end - virtual end address of region 181 * - end - virtual end address of region
@@ -183,6 +191,7 @@ __dma_inv_range:
183 b.lo 1b 191 b.lo 1b
184 dsb sy 192 dsb sy
185 ret 193 ret
194ENDPROC(__inval_cache_range)
186ENDPROC(__dma_inv_range) 195ENDPROC(__dma_inv_range)
187 196
188/* 197/*