aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/init.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-03-30 09:18:42 -0400
committerWill Deacon <will.deacon@arm.com>2016-04-14 11:20:45 -0400
commit177e15f0c1444cd392374ec7175c4787fd911369 (patch)
tree9259c8fc1afb276bc606efb72aa34f5ac9b2ff72 /arch/arm64/mm/init.c
parent3bab79edc67152cb6cadb12773b7c7d05228eb77 (diff)
arm64: add the initrd region to the linear mapping explicitly
Instead of going out of our way to relocate the initrd if it turns out to occupy memory that is not covered by the linear mapping, just add the initrd to the linear mapping. This puts the burden on the bootloader to pass initrd= and mem= options that are mutually consistent. Note that, since the placement of the linear region in the PA space is also dependent on the placement of the kernel Image, which may reside anywhere in memory, we may still end up with a situation where the initrd and the kernel Image are simply too far apart to be covered by the linear region. Since we now leave it up to the bootloader to pass the initrd in memory that is guaranteed to be accessible by the kernel, add a mention of this to the arm64 boot protocol specification as well. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm/init.c')
-rw-r--r--arch/arm64/mm/init.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 83ae8b5e5666..82ced5fa1e66 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -207,6 +207,35 @@ void __init arm64_memblock_init(void)
207 memblock_add(__pa(_text), (u64)(_end - _text)); 207 memblock_add(__pa(_text), (u64)(_end - _text));
208 } 208 }
209 209
210 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
211 /*
212 * Add back the memory we just removed if it results in the
213 * initrd to become inaccessible via the linear mapping.
214 * Otherwise, this is a no-op
215 */
216 u64 base = initrd_start & PAGE_MASK;
217 u64 size = PAGE_ALIGN(initrd_end) - base;
218
219 /*
220 * We can only add back the initrd memory if we don't end up
221 * with more memory than we can address via the linear mapping.
222 * It is up to the bootloader to position the kernel and the
223 * initrd reasonably close to each other (i.e., within 32 GB of
224 * each other) so that all granule/#levels combinations can
225 * always access both.
226 */
227 if (WARN(base < memblock_start_of_DRAM() ||
228 base + size > memblock_start_of_DRAM() +
229 linear_region_size,
230 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
231 initrd_start = 0;
232 } else {
233 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
234 memblock_add(base, size);
235 memblock_reserve(base, size);
236 }
237 }
238
210 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 239 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
211 extern u16 memstart_offset_seed; 240 extern u16 memstart_offset_seed;
212 u64 range = linear_region_size - 241 u64 range = linear_region_size -