aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2015-03-24 09:50:27 -0400
committerWill Deacon <will.deacon@arm.com>2015-03-24 10:36:35 -0400
commit91d57155dc5ab4b311624b7ee570339b6af19ad5 (patch)
tree66e5df4b229063a7b00a7ccc09be5216f1b9b6eb /arch/arm64
parente4c5a6851058386c9e109ad529717a23173918bc (diff)
arm64: head.S: ensure visibility of page tables
After writing the page tables, we use __inval_cache_range to invalidate any stale cache entries. Strongly Ordered memory accesses are not ordered w.r.t. cache maintenance instructions, and hence explicit memory barriers are required to provide this ordering. However, __inval_cache_range was written to be used on Normal Cacheable memory once the MMU and caches are on, and does not have any barriers prior to the DC instructions. This patch adds a DMB between the page tables being written and the corresponding cachelines being invalidated, ensuring that the invalidation makes the new data visible to subsequent cacheable accesses. A barrier is not required before the prior invalidate as we do not access the page table memory area prior to this, and earlier barriers in preserve_boot_args and set_cpu_boot_mode_flag ensures ordering w.r.t. any stores performed prior to entering Linux. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Fixes: c218bca74eeafa2f ("arm64: Relax the kernel cache requirements for boot") Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/head.S1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 51c9811e683c..bbc474cd0ca8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -458,6 +458,7 @@ __create_page_tables:
458 */ 458 */
459 mov x0, x25 459 mov x0, x25
460 add x1, x26, #SWAPPER_DIR_SIZE 460 add x1, x26, #SWAPPER_DIR_SIZE
461 dmb sy
461 bl __inval_cache_range 462 bl __inval_cache_range
462 463
463 mov lr, x27 464 mov lr, x27