summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2018-08-17 18:49:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 19:20:32 -0400
commitafda57bc13410459fc957e93341ade7bebca36e2 (patch)
treec74fe86943c151a0d9430189404d8a9493c0410a
parente131c06b14b8601e2b1dbc7ec9cc6418c293a067 (diff)
mm/sparse: move buffer init/fini to the common place
Now that both variants of sparse memory use the same buffers to populate memory map, we can move sparse_buffer_init()/sparse_buffer_fini() to the common place. Link: http://lkml.kernel.org/r/20180712203730.8703-4-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Tested-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc] Tested-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Pasha Tatashin <Pavel.Tatashin@microsoft.com> Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Steven Sistare <steven.sistare@oracle.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/sparse-vmemmap.c2
-rw-r--r--mm/sparse.c14
3 files changed, 7 insertions, 12 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4ace5d50a892..48040510df05 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2671,9 +2671,6 @@ void sparse_mem_maps_populate_node(struct page **map_map,
2671 unsigned long map_count, 2671 unsigned long map_count,
2672 int nodeid); 2672 int nodeid);
2673 2673
2674unsigned long __init section_map_size(void);
2675void sparse_buffer_init(unsigned long size, int nid);
2676void sparse_buffer_fini(void);
2677void *sparse_buffer_alloc(unsigned long size); 2674void *sparse_buffer_alloc(unsigned long size);
2678struct page *sparse_mem_map_populate(unsigned long pnum, int nid, 2675struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
2679 struct vmem_altmap *altmap); 2676 struct vmem_altmap *altmap);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index b05c7663c640..cd15f3d252c3 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -270,7 +270,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
270 unsigned long pnum; 270 unsigned long pnum;
271 int nr_consumed_maps = 0; 271 int nr_consumed_maps = 0;
272 272
273 sparse_buffer_init(section_map_size() * map_count, nodeid);
274 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 273 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
275 if (!present_section_nr(pnum)) 274 if (!present_section_nr(pnum))
276 continue; 275 continue;
@@ -282,5 +281,4 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
282 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 281 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
283 __func__); 282 __func__);
284 } 283 }
285 sparse_buffer_fini();
286} 284}
diff --git a/mm/sparse.c b/mm/sparse.c
index db4867b62fff..20ca292d8f11 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -401,14 +401,14 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
401} 401}
402 402
403#ifdef CONFIG_SPARSEMEM_VMEMMAP 403#ifdef CONFIG_SPARSEMEM_VMEMMAP
404unsigned long __init section_map_size(void) 404static unsigned long __init section_map_size(void)
405 405
406{ 406{
407 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); 407 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
408} 408}
409 409
410#else 410#else
411unsigned long __init section_map_size(void) 411static unsigned long __init section_map_size(void)
412{ 412{
413 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 413 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
414} 414}
@@ -433,10 +433,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
433 unsigned long map_count, int nodeid) 433 unsigned long map_count, int nodeid)
434{ 434{
435 unsigned long pnum; 435 unsigned long pnum;
436 unsigned long size = section_map_size();
437 int nr_consumed_maps = 0; 436 int nr_consumed_maps = 0;
438 437
439 sparse_buffer_init(size * map_count, nodeid);
440 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 438 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
441 if (!present_section_nr(pnum)) 439 if (!present_section_nr(pnum))
442 continue; 440 continue;
@@ -447,14 +445,13 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
447 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 445 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
448 __func__); 446 __func__);
449 } 447 }
450 sparse_buffer_fini();
451} 448}
452#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 449#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
453 450
454static void *sparsemap_buf __meminitdata; 451static void *sparsemap_buf __meminitdata;
455static void *sparsemap_buf_end __meminitdata; 452static void *sparsemap_buf_end __meminitdata;
456 453
457void __init sparse_buffer_init(unsigned long size, int nid) 454static void __init sparse_buffer_init(unsigned long size, int nid)
458{ 455{
459 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 456 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
460 sparsemap_buf = 457 sparsemap_buf =
@@ -464,7 +461,7 @@ void __init sparse_buffer_init(unsigned long size, int nid)
464 sparsemap_buf_end = sparsemap_buf + size; 461 sparsemap_buf_end = sparsemap_buf + size;
465} 462}
466 463
467void __init sparse_buffer_fini(void) 464static void __init sparse_buffer_fini(void)
468{ 465{
469 unsigned long size = sparsemap_buf_end - sparsemap_buf; 466 unsigned long size = sparsemap_buf_end - sparsemap_buf;
470 467
@@ -494,8 +491,11 @@ static void __init sparse_early_mem_maps_alloc_node(void *data,
494 unsigned long map_count, int nodeid) 491 unsigned long map_count, int nodeid)
495{ 492{
496 struct page **map_map = (struct page **)data; 493 struct page **map_map = (struct page **)data;
494
495 sparse_buffer_init(section_map_size() * map_count, nodeid);
497 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, 496 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
498 map_count, nodeid); 497 map_count, nodeid);
498 sparse_buffer_fini();
499} 499}
500#else 500#else
501static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 501static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)