summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBaoquan He <bhe@redhat.com>2018-08-17 18:48:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 19:20:31 -0400
commit07a34a8c36521c37119259d937d1389c3f5f6db9 (patch)
treec1378da8a9aef210acfa79f54d8c3258960f69bf
parentf2fc10e0b3fe7d1aecbd2cab6bf0007b6771e16d (diff)
mm/sparsemem.c: defer the ms->section_mem_map clearing
In sparse_init(), if CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y, system will allocate one continuous memory chunk for mem maps on one node and populate the relevant page tables to map memory section one by one. If fail to populate for a certain mem section, print warning and its ->section_mem_map will be cleared to cancel the marking of being present. Like this, the number of mem sections marked as present could become less during sparse_init() execution. Here just defer the ms->section_mem_map clearing if failed to populate its page tables until the last for_each_present_section_nr() loop. This is in preparation for later optimizing the mem map allocation. [akpm@linux-foundation.org: remove now-unused local `ms', per Oscar] Link: http://lkml.kernel.org/r/20180228032657.32385-3-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Acked-by: Dave Hansen <dave.hansen@intel.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@oracle.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Pasha Tatashin <Pavel.Tatashin@microsoft.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pankaj Gupta <pagupta@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/sparse-vmemmap.c4
-rw-r--r--mm/sparse.c12
2 files changed, 8 insertions, 8 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index bd0276d5f66b..68bb65b2d34d 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -292,18 +292,14 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
292 } 292 }
293 293
294 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 294 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
295 struct mem_section *ms;
296
297 if (!present_section_nr(pnum)) 295 if (!present_section_nr(pnum))
298 continue; 296 continue;
299 297
300 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); 298 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
301 if (map_map[pnum]) 299 if (map_map[pnum])
302 continue; 300 continue;
303 ms = __nr_to_section(pnum);
304 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 301 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
305 __func__); 302 __func__);
306 ms->section_mem_map = 0;
307 } 303 }
308 304
309 if (vmemmap_buf_start) { 305 if (vmemmap_buf_start) {
diff --git a/mm/sparse.c b/mm/sparse.c
index 99a6383e98bc..eb31274aae8b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -446,7 +446,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
446 ms = __nr_to_section(pnum); 446 ms = __nr_to_section(pnum);
447 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 447 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
448 __func__); 448 __func__);
449 ms->section_mem_map = 0;
450 } 449 }
451} 450}
452#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 451#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -474,7 +473,6 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
474 473
475 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 474 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
476 __func__); 475 __func__);
477 ms->section_mem_map = 0;
478 return NULL; 476 return NULL;
479} 477}
480#endif 478#endif
@@ -578,17 +576,23 @@ void __init sparse_init(void)
578#endif 576#endif
579 577
580 for_each_present_section_nr(0, pnum) { 578 for_each_present_section_nr(0, pnum) {
579 struct mem_section *ms;
580 ms = __nr_to_section(pnum);
581 usemap = usemap_map[pnum]; 581 usemap = usemap_map[pnum];
582 if (!usemap) 582 if (!usemap) {
583 ms->section_mem_map = 0;
583 continue; 584 continue;
585 }
584 586
585#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 587#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
586 map = map_map[pnum]; 588 map = map_map[pnum];
587#else 589#else
588 map = sparse_early_mem_map_alloc(pnum); 590 map = sparse_early_mem_map_alloc(pnum);
589#endif 591#endif
590 if (!map) 592 if (!map) {
593 ms->section_mem_map = 0;
591 continue; 594 continue;
595 }
592 596
593 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 597 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
594 usemap); 598 usemap);