diff options
| author | Pavel Tatashin <pasha.tatashin@oracle.com> | 2018-07-16 11:16:30 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-16 12:41:57 -0400 |
| commit | d1b47a7c9efcf3c3384b70f6e3c8f1423b44d8c7 (patch) | |
| tree | 1d2fefe2d803db41242df44c15dc8fe7b770edd7 /mm | |
| parent | 9d3cce1e8b8561fed5f383d22a4d6949db4eadbe (diff) | |
mm: don't do zero_resv_unavail if memmap is not allocated
Moving zero_resv_unavail before memmap_init_zone(), caused a regression on
x86-32.
The cause is that we access struct pages before they are allocated when
CONFIG_FLAT_NODE_MEM_MAP is used.
free_area_init_nodes()
zero_resv_unavail()
mm_zero_struct_page(pfn_to_page(pfn)); <- struct page is not alloced
free_area_init_node()
if CONFIG_FLAT_NODE_MEM_MAP
alloc_node_mem_map()
memblock_virt_alloc_node_nopanic() <- struct page alloced here
On the other hand memblock_virt_alloc_node_nopanic() zeroes all the memory
that it returns, so we do not need to do zero_resv_unavail() here.
Fixes: e181ae0c5db9 ("mm: zero unavailable pages before memmap init")
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Tested-by: Matt Hart <matt@mattface.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/page_alloc.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5d800d61ddb7..a790ef4be74e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
| 6383 | free_area_init_core(pgdat); | 6383 | free_area_init_core(pgdat); |
| 6384 | } | 6384 | } |
| 6385 | 6385 | ||
| 6386 | #ifdef CONFIG_HAVE_MEMBLOCK | 6386 | #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) |
| 6387 | /* | 6387 | /* |
| 6388 | * Only struct pages that are backed by physical memory are zeroed and | 6388 | * Only struct pages that are backed by physical memory are zeroed and |
| 6389 | * initialized by going through __init_single_page(). But, there are some | 6389 | * initialized by going through __init_single_page(). But, there are some |
| @@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void) | |||
| 6421 | if (pgcnt) | 6421 | if (pgcnt) |
| 6422 | pr_info("Reserved but unavailable: %lld pages", pgcnt); | 6422 | pr_info("Reserved but unavailable: %lld pages", pgcnt); |
| 6423 | } | 6423 | } |
| 6424 | #endif /* CONFIG_HAVE_MEMBLOCK */ | 6424 | #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ |
| 6425 | 6425 | ||
| 6426 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 6426 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 6427 | 6427 | ||
