aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2019-01-25 13:08:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-28 13:35:22 -0500
commit4aa9fc2a435abe95a1e8d7f8c7b3d6356514b37a (patch)
tree18fdac93ceb6768bfb1a380e586a5e7342ce40b7 /mm/page_alloc.c
parentf17b5f06cb92ef2250513a1e154c47b78df07d40 (diff)
Revert "mm, memory_hotplug: initialize struct pages for the full memory section"
This reverts commit 2830bf6f05fb3e05bc4743274b806c821807a684. The underlying assumption that one sparse section belongs into a single numa node doesn't hold really. Robert Shteynfeld has reported a boot failure. The boot log was not captured but his memory layout is as follows: Early memory node ranges node 1: [mem 0x0000000000001000-0x0000000000090fff] node 1: [mem 0x0000000000100000-0x00000000dbdf8fff] node 1: [mem 0x0000000100000000-0x0000001423ffffff] node 0: [mem 0x0000001424000000-0x0000002023ffffff] This means that node0 starts in the middle of a memory section which is also in node1. memmap_init_zone tries to initialize padding of a section even when it is outside of the given pfn range because there are code paths (e.g. memory hotplug) which assume that the full worth of memory section is always initialized. In this particular case, though, such a range is already intialized and most likely already managed by the page allocator. Scribbling over those pages corrupts the internal state and likely blows up when any of those pages gets used. Reported-by: Robert Shteynfeld <robert.shteynfeld@gmail.com> Fixes: 2830bf6f05fb ("mm, memory_hotplug: initialize struct pages for the full memory section") Cc: stable@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c12
1 files changed, 0 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d295c9bc01a8..35fdde041f5c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5701,18 +5701,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5701 cond_resched(); 5701 cond_resched();
5702 } 5702 }
5703 } 5703 }
5704#ifdef CONFIG_SPARSEMEM
5705 /*
5706 * If the zone does not span the rest of the section then
5707 * we should at least initialize those pages. Otherwise we
5708 * could blow up on a poisoned page in some paths which depend
5709 * on full sections being initialized (e.g. memory hotplug).
5710 */
5711 while (end_pfn % PAGES_PER_SECTION) {
5712 __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
5713 end_pfn++;
5714 }
5715#endif
5716} 5704}
5717 5705
5718#ifdef CONFIG_ZONE_DEVICE 5706#ifdef CONFIG_ZONE_DEVICE