aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2018-07-14 09:15:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-14 14:02:20 -0400
commite181ae0c5db9544de9c53239eb22bc012ce75033 (patch)
tree4af6ac8786ab6cc14c292b8dbf8aa8b75de62b30 /mm/page_alloc.c
parent2db39a2f491a48ec740e0214a7dd584eefc2137d (diff)
mm: zero unavailable pages before memmap init
We must zero struct pages for memory that is not backed by physical memory, or kernel does not have access to. Recently, there was a change which zeroed all memmap for all holes in e820. Unfortunately, it introduced a bug that is discussed here: https://www.spinics.net/lists/linux-mm/msg156764.html Linus, also saw this bug on his machine, and confirmed that reverting commit 124049decbb1 ("x86/e820: put !E820_TYPE_RAM regions into memblock.reserved") fixes the issue. The problem is that we incorrectly zero some struct pages after they were setup. The fix is to zero unavailable struct pages prior to initializing of struct pages. A more detailed fix should come later that would avoid double zeroing cases: one in __init_single_page(), the other one in zero_resv_unavail(). Fixes: 124049decbb1 ("x86/e820: put !E820_TYPE_RAM regions into memblock.reserved") Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1521100f1e63..5d800d61ddb7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6847 /* Initialise every node */ 6847 /* Initialise every node */
6848 mminit_verify_pageflags_layout(); 6848 mminit_verify_pageflags_layout();
6849 setup_nr_node_ids(); 6849 setup_nr_node_ids();
6850 zero_resv_unavail();
6850 for_each_online_node(nid) { 6851 for_each_online_node(nid) {
6851 pg_data_t *pgdat = NODE_DATA(nid); 6852 pg_data_t *pgdat = NODE_DATA(nid);
6852 free_area_init_node(nid, NULL, 6853 free_area_init_node(nid, NULL,
@@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6857 node_set_state(nid, N_MEMORY); 6858 node_set_state(nid, N_MEMORY);
6858 check_for_memory(pgdat, nid); 6859 check_for_memory(pgdat, nid);
6859 } 6860 }
6860 zero_resv_unavail();
6861} 6861}
6862 6862
6863static int __init cmdline_parse_core(char *p, unsigned long *core, 6863static int __init cmdline_parse_core(char *p, unsigned long *core,
@@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
7033 7033
7034void __init free_area_init(unsigned long *zones_size) 7034void __init free_area_init(unsigned long *zones_size)
7035{ 7035{
7036 zero_resv_unavail();
7036 free_area_init_node(0, zones_size, 7037 free_area_init_node(0, zones_size,
7037 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 7038 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
7038 zero_resv_unavail();
7039} 7039}
7040 7040
7041static int page_alloc_cpu_dead(unsigned int cpu) 7041static int page_alloc_cpu_dead(unsigned int cpu)