diff options
author | Laura Abbott <laura@labbott.name> | 2015-11-05 21:48:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-05 22:34:48 -0500 |
commit | a1c34a3bf00af2cede839879502e12dc68491ad5 (patch) | |
tree | e8c19a0ccec4e78d9e8ecf5038a1a7ca80693e14 | |
parent | c2d42c16ad83006a706d83e51a7268db04af733a (diff) |
mm: Don't offset memmap for flatmem
Srinivas Kandagatla reported bad page messages when trying to remove the
bottom 2MB on an ARM based IFC6410 board
BUG: Bad page state in process swapper pfn:fffa8
page:ef7fb500 count:0 mapcount:0 mapping: (null) index:0x0
flags: 0x96640253(locked|error|dirty|active|arch_1|reclaim|mlocked)
page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set
bad because of flags:
flags: 0x200041(locked|active|mlocked)
Modules linked in:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.19.0-rc3-00007-g412f9ba-dirty #816
Hardware name: Qualcomm (Flattened Device Tree)
unwind_backtrace
show_stack
dump_stack
bad_page
free_pages_prepare
free_hot_cold_page
__free_pages
free_highmem_page
mem_init
start_kernel
Disabling lock debugging due to kernel taint
Removing the lower 2MB made the start of the lowmem zone to no longer be
page block aligned. IFC6410 uses CONFIG_FLATMEM where alloc_node_mem_map
allocates memory for the mem_map. alloc_node_mem_map will offset for
unaligned nodes with the assumption the pfn/page translation functions
will account for the offset. The functions for CONFIG_FLATMEM do not
offset however, resulting in overrunning the memmap array. Just use the
allocated memmap without any offset when running with CONFIG_FLATMEM to
avoid the overrun.
Signed-off-by: Laura Abbott <laura@labbott.name>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Reported-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Bjorn Andersson <bjorn.andersson@sonymobile.com>
Cc: Santosh Shilimkar <ssantosh@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Arnd Bergman <arnd@arndb.de>
Cc: Stephen Boyd <sboyd@codeaurora.org>
Cc: Andy Gross <agross@codeaurora.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/page_alloc.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4aed338fc535..86f7d952e2cc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5421,6 +5421,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
5421 | 5421 | ||
5422 | static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | 5422 | static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) |
5423 | { | 5423 | { |
5424 | unsigned long __maybe_unused offset = 0; | ||
5425 | |||
5424 | /* Skip empty nodes */ | 5426 | /* Skip empty nodes */ |
5425 | if (!pgdat->node_spanned_pages) | 5427 | if (!pgdat->node_spanned_pages) |
5426 | return; | 5428 | return; |
@@ -5437,6 +5439,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
5437 | * for the buddy allocator to function correctly. | 5439 | * for the buddy allocator to function correctly. |
5438 | */ | 5440 | */ |
5439 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | 5441 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); |
5442 | offset = pgdat->node_start_pfn - start; | ||
5440 | end = pgdat_end_pfn(pgdat); | 5443 | end = pgdat_end_pfn(pgdat); |
5441 | end = ALIGN(end, MAX_ORDER_NR_PAGES); | 5444 | end = ALIGN(end, MAX_ORDER_NR_PAGES); |
5442 | size = (end - start) * sizeof(struct page); | 5445 | size = (end - start) * sizeof(struct page); |
@@ -5444,7 +5447,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
5444 | if (!map) | 5447 | if (!map) |
5445 | map = memblock_virt_alloc_node_nopanic(size, | 5448 | map = memblock_virt_alloc_node_nopanic(size, |
5446 | pgdat->node_id); | 5449 | pgdat->node_id); |
5447 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); | 5450 | pgdat->node_mem_map = map + offset; |
5448 | } | 5451 | } |
5449 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 5452 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
5450 | /* | 5453 | /* |
@@ -5452,9 +5455,9 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
5452 | */ | 5455 | */ |
5453 | if (pgdat == NODE_DATA(0)) { | 5456 | if (pgdat == NODE_DATA(0)) { |
5454 | mem_map = NODE_DATA(0)->node_mem_map; | 5457 | mem_map = NODE_DATA(0)->node_mem_map; |
5455 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 5458 | #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) |
5456 | if (page_to_pfn(mem_map) != pgdat->node_start_pfn) | 5459 | if (page_to_pfn(mem_map) != pgdat->node_start_pfn) |
5457 | mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); | 5460 | mem_map -= offset; |
5458 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 5461 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
5459 | } | 5462 | } |
5460 | #endif | 5463 | #endif |