aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/kaslr.c12
2 files changed, 9 insertions, 4 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 973df7de7bf8..adb0910b88f5 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -354,7 +354,6 @@ __primary_switched:
354 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? 354 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
355 b.ne 0f 355 b.ne 0f
356 mov x0, x21 // pass FDT address in x0 356 mov x0, x21 // pass FDT address in x0
357 mov x1, x23 // pass modulo offset in x1
358 bl kaslr_early_init // parse FDT for KASLR options 357 bl kaslr_early_init // parse FDT for KASLR options
359 cbz x0, 0f // KASLR disabled? just proceed 358 cbz x0, 0f // KASLR disabled? just proceed
360 orr x23, x23, x0 // record KASLR offset 359 orr x23, x23, x0 // record KASLR offset
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index a9710efb8c01..1d95c204186b 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
75 * containing function pointers) to be reinitialized, and zero-initialized 75 * containing function pointers) to be reinitialized, and zero-initialized
76 * .bss variables will be reset to 0. 76 * .bss variables will be reset to 0.
77 */ 77 */
78u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) 78u64 __init kaslr_early_init(u64 dt_phys)
79{ 79{
80 void *fdt; 80 void *fdt;
81 u64 seed, offset, mask, module_range; 81 u64 seed, offset, mask, module_range;
@@ -133,9 +133,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
134 * happens, increase the KASLR offset by the size of the kernel image 134 * happens, increase the KASLR offset by the size of the kernel image
135 * rounded up by SWAPPER_BLOCK_SIZE. 135 * rounded up by SWAPPER_BLOCK_SIZE.
136 *
137 * NOTE: The references to _text and _end below will already take the
138 * modulo offset (the physical displacement modulo 2 MB) into
139 * account, given that the physical placement is controlled by
140 * the loader, and will not change as a result of the virtual
141 * mapping we choose.
136 */ 142 */
137 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != 143 if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
138 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { 144 (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT)) {
139 u64 kimg_sz = _end - _text; 145 u64 kimg_sz = _end - _text;
140 offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE)) 146 offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
141 & mask; 147 & mask;