summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2018-01-31 19:21:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 20:18:40 -0500
commit9bb5a391f9a5707e04763cf14298fc4cc29bfecd (patch)
tree17485036aee72b0d81de1cd2fdd5e93e5d90ec65 /mm/page_alloc.c
parentda391d640c528bc5bb227ea5b39c882b75ac3167 (diff)
mm, memory_hotplug: fix memmap initialization
Bharata has noticed that onlining a newly added memory doesn't increase the total memory, pointing to commit f7f99100d8d9 ("mm: stop zeroing memory during allocation in vmemmap") as a culprit. This commit has changed the way how the memory for memmaps is initialized and moves it from the allocation time to the initialization time. This works properly for the early memmap init path. It doesn't work for the memory hotplug though because we need to mark page as reserved when the sparsemem section is created and later initialize it completely during onlining. memmap_init_zone is called in the early stage of onlining. With the current code it calls __init_single_page and as such it clears up the whole stage and therefore online_pages_range skips those pages. Fix this by skipping mm_zero_struct_page in __init_single_page for memory hotplug path. This is quite uggly but unifying both early init and memory hotplug init paths is a large project. Make sure we plug the regression at least. Link: http://lkml.kernel.org/r/20180130101141.GW21609@dhcp22.suse.cz Fixes: f7f99100d8d9 ("mm: stop zeroing memory during allocation in vmemmap") Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Tested-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@oracle.com> Cc: Steven Sistare <steven.sistare@oracle.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Bob Picco <bob.picco@oracle.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a6972750e7c5..c7dd9c86e353 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *zone,
1177} 1177}
1178 1178
1179static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1179static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1180 unsigned long zone, int nid) 1180 unsigned long zone, int nid, bool zero)
1181{ 1181{
1182 mm_zero_struct_page(page); 1182 if (zero)
1183 mm_zero_struct_page(page);
1183 set_page_links(page, zone, nid, pfn); 1184 set_page_links(page, zone, nid, pfn);
1184 init_page_count(page); 1185 init_page_count(page);
1185 page_mapcount_reset(page); 1186 page_mapcount_reset(page);
@@ -1194,9 +1195,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1194} 1195}
1195 1196
1196static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 1197static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1197 int nid) 1198 int nid, bool zero)
1198{ 1199{
1199 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 1200 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
1200} 1201}
1201 1202
1202#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1203#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
1217 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 1218 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1218 break; 1219 break;
1219 } 1220 }
1220 __init_single_pfn(pfn, zid, nid); 1221 __init_single_pfn(pfn, zid, nid, true);
1221} 1222}
1222#else 1223#else
1223static inline void init_reserved_page(unsigned long pfn) 1224static inline void init_reserved_page(unsigned long pfn)
@@ -1534,7 +1535,7 @@ static unsigned long __init deferred_init_pages(int nid, int zid,
1534 } else { 1535 } else {
1535 page++; 1536 page++;
1536 } 1537 }
1537 __init_single_page(page, pfn, zid, nid); 1538 __init_single_page(page, pfn, zid, nid, true);
1538 nr_pages++; 1539 nr_pages++;
1539 } 1540 }
1540 return (nr_pages); 1541 return (nr_pages);
@@ -5399,15 +5400,20 @@ not_early:
5399 * can be created for invalid pages (for alignment) 5400 * can be created for invalid pages (for alignment)
5400 * check here not to call set_pageblock_migratetype() against 5401 * check here not to call set_pageblock_migratetype() against
5401 * pfn out of zone. 5402 * pfn out of zone.
5403 *
5404 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
5405 * because this is done early in sparse_add_one_section
5402 */ 5406 */
5403 if (!(pfn & (pageblock_nr_pages - 1))) { 5407 if (!(pfn & (pageblock_nr_pages - 1))) {
5404 struct page *page = pfn_to_page(pfn); 5408 struct page *page = pfn_to_page(pfn);
5405 5409
5406 __init_single_page(page, pfn, zone, nid); 5410 __init_single_page(page, pfn, zone, nid,
5411 context != MEMMAP_HOTPLUG);
5407 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5412 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5408 cond_resched(); 5413 cond_resched();
5409 } else { 5414 } else {
5410 __init_single_pfn(pfn, zone, nid); 5415 __init_single_pfn(pfn, zone, nid,
5416 context != MEMMAP_HOTPLUG);
5411 } 5417 }
5412 } 5418 }
5413} 5419}