aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-10-11 06:29:14 -0400
committerWill Deacon <will.deacon@arm.com>2018-10-12 10:25:16 -0400
commitd91680e687f47984ffd3200c8e5d587903e7bd11 (patch)
tree1d76fe99328e0a6439b8163e508fcca1df44cb97
parent0238df646e6224016a45505d2c111a24669ebe21 (diff)
arm64: Fix /proc/iomem for reserved but not memory regions
We describe ranges of 'reserved' memory to userspace via /proc/iomem. Commit 50d7ba36b916 ("arm64: export memblock_reserve()d regions via /proc/iomem") updated the logic to export regions that were reserved because their contents should be preserved. This allowed kexec-tools to tell the difference between 'reserved' memory that must be preserved and not overwritten, (e.g. the ACPI tables), and 'nomap' memory that must not be touched without knowing the memory-attributes (e.g. RAS CPER regions). The above commit wrongly assumed that memblock_reserve() would not be used to reserve regions that aren't memory. It turns out this is exactly what early_init_dt_reserve_memory_arch() will do if it finds a DT reserved-memory that was also carved out of the memory node, which results in a WARN_ON_ONCE() and the region being reserved instead of ignored. The ramoops description on hikey and dragonboard-410c both do this, so we can't simply write this configuration off as "buggy firmware". Avoid this issue by rewriting reserve_memblock_reserved_regions() so that only the portions of reserved regions which overlap with mapped memory are actually reserved. Fixes: 50d7ba36b916 ("arm64: export memblock_reserve()d regions via /proc/iomem") Reported-by: John Stultz <john.stultz@linaro.org> Reported-by: Paolo Pisati <p.pisati@gmail.com> CC: Akashi Takahiro <takahiro.akashi@linaro.org> CC: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: James Morse <james.morse@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/kernel/setup.c56
1 files changed, 27 insertions, 29 deletions
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 5b4fac434c84..b3354ff94e79 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -64,6 +64,9 @@
64#include <asm/xen/hypervisor.h> 64#include <asm/xen/hypervisor.h>
65#include <asm/mmu_context.h> 65#include <asm/mmu_context.h>
66 66
67static int num_standard_resources;
68static struct resource *standard_resources;
69
67phys_addr_t __fdt_pointer __initdata; 70phys_addr_t __fdt_pointer __initdata;
68 71
69/* 72/*
@@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
206{ 209{
207 struct memblock_region *region; 210 struct memblock_region *region;
208 struct resource *res; 211 struct resource *res;
212 unsigned long i = 0;
209 213
210 kernel_code.start = __pa_symbol(_text); 214 kernel_code.start = __pa_symbol(_text);
211 kernel_code.end = __pa_symbol(__init_begin - 1); 215 kernel_code.end = __pa_symbol(__init_begin - 1);
212 kernel_data.start = __pa_symbol(_sdata); 216 kernel_data.start = __pa_symbol(_sdata);
213 kernel_data.end = __pa_symbol(_end - 1); 217 kernel_data.end = __pa_symbol(_end - 1);
214 218
219 num_standard_resources = memblock.memory.cnt;
220 standard_resources = alloc_bootmem_low(num_standard_resources *
221 sizeof(*standard_resources));
222
215 for_each_memblock(memory, region) { 223 for_each_memblock(memory, region) {
216 res = alloc_bootmem_low(sizeof(*res)); 224 res = &standard_resources[i++];
217 if (memblock_is_nomap(region)) { 225 if (memblock_is_nomap(region)) {
218 res->name = "reserved"; 226 res->name = "reserved";
219 res->flags = IORESOURCE_MEM; 227 res->flags = IORESOURCE_MEM;
@@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
243 251
244static int __init reserve_memblock_reserved_regions(void) 252static int __init reserve_memblock_reserved_regions(void)
245{ 253{
246 phys_addr_t start, end, roundup_end = 0; 254 u64 i, j;
247 struct resource *mem, *res; 255
248 u64 i; 256 for (i = 0; i < num_standard_resources; ++i) {
249 257 struct resource *mem = &standard_resources[i];
250 for_each_reserved_mem_region(i, &start, &end) { 258 phys_addr_t r_start, r_end, mem_size = resource_size(mem);
251 if (end <= roundup_end) 259
252 continue; /* done already */ 260 if (!memblock_is_region_reserved(mem->start, mem_size))
253
254 start = __pfn_to_phys(PFN_DOWN(start));
255 end = __pfn_to_phys(PFN_UP(end)) - 1;
256 roundup_end = end;
257
258 res = kzalloc(sizeof(*res), GFP_ATOMIC);
259 if (WARN_ON(!res))
260 return -ENOMEM;
261 res->start = start;
262 res->end = end;
263 res->name = "reserved";
264 res->flags = IORESOURCE_MEM;
265
266 mem = request_resource_conflict(&iomem_resource, res);
267 /*
268 * We expected memblock_reserve() regions to conflict with
269 * memory created by request_standard_resources().
270 */
271 if (WARN_ON_ONCE(!mem))
272 continue; 261 continue;
273 kfree(res);
274 262
275 reserve_region_with_split(mem, start, end, "reserved"); 263 for_each_reserved_mem_region(j, &r_start, &r_end) {
264 resource_size_t start, end;
265
266 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
267 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
268
269 if (start > mem->end || end < mem->start)
270 continue;
271
272 reserve_region_with_split(mem, start, end, "reserved");
273 }
276 } 274 }
277 275
278 return 0; 276 return 0;