diff options
author | Pavel Tatashin <pasha.tatashin@oracle.com> | 2018-08-17 18:49:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 19:20:32 -0400 |
commit | e131c06b14b8601e2b1dbc7ec9cc6418c293a067 (patch) | |
tree | e396cccbb1fdb005f54f89b40e4c3f06e5f4aa9f | |
parent | 35fd1eb1e8212c02f6eae24335a9e5b80f9519b4 (diff) |
mm/sparse: use the new sparse buffer functions in non-vmemmap
non-vmemmap sparse also allocated large contiguous chunk of memory, and if
fails falls back to smaller allocations. Use the same functions to
allocate buffer as the vmemmap-sparse
Link: http://lkml.kernel.org/r/20180712203730.8703-3-pasha.tatashin@oracle.com
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Tested-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Tested-by: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <Pavel.Tatashin@microsoft.com>
Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/sparse.c | 41 |
1 files changed, 14 insertions, 27 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 9a0a5f598469..db4867b62fff 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -408,13 +408,20 @@ unsigned long __init section_map_size(void) | |||
408 | } | 408 | } |
409 | 409 | ||
410 | #else | 410 | #else |
411 | unsigned long __init section_map_size(void) | ||
412 | { | ||
413 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | ||
414 | } | ||
415 | |||
411 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, | 416 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, |
412 | struct vmem_altmap *altmap) | 417 | struct vmem_altmap *altmap) |
413 | { | 418 | { |
414 | struct page *map; | 419 | unsigned long size = section_map_size(); |
415 | unsigned long size; | 420 | struct page *map = sparse_buffer_alloc(size); |
421 | |||
422 | if (map) | ||
423 | return map; | ||
416 | 424 | ||
417 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | ||
418 | map = memblock_virt_alloc_try_nid(size, | 425 | map = memblock_virt_alloc_try_nid(size, |
419 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), | 426 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
420 | BOOTMEM_ALLOC_ACCESSIBLE, nid); | 427 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
@@ -425,42 +432,22 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
425 | unsigned long pnum_end, | 432 | unsigned long pnum_end, |
426 | unsigned long map_count, int nodeid) | 433 | unsigned long map_count, int nodeid) |
427 | { | 434 | { |
428 | void *map; | ||
429 | unsigned long pnum; | 435 | unsigned long pnum; |
430 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | 436 | unsigned long size = section_map_size(); |
431 | int nr_consumed_maps; | 437 | int nr_consumed_maps = 0; |
432 | |||
433 | size = PAGE_ALIGN(size); | ||
434 | map = memblock_virt_alloc_try_nid_raw(size * map_count, | ||
435 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), | ||
436 | BOOTMEM_ALLOC_ACCESSIBLE, nodeid); | ||
437 | if (map) { | ||
438 | nr_consumed_maps = 0; | ||
439 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
440 | if (!present_section_nr(pnum)) | ||
441 | continue; | ||
442 | map_map[nr_consumed_maps] = map; | ||
443 | map += size; | ||
444 | nr_consumed_maps++; | ||
445 | } | ||
446 | return; | ||
447 | } | ||
448 | 438 | ||
449 | /* fallback */ | 439 | sparse_buffer_init(size * map_count, nodeid); |
450 | nr_consumed_maps = 0; | ||
451 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | 440 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
452 | struct mem_section *ms; | ||
453 | |||
454 | if (!present_section_nr(pnum)) | 441 | if (!present_section_nr(pnum)) |
455 | continue; | 442 | continue; |
456 | map_map[nr_consumed_maps] = | 443 | map_map[nr_consumed_maps] = |
457 | sparse_mem_map_populate(pnum, nodeid, NULL); | 444 | sparse_mem_map_populate(pnum, nodeid, NULL); |
458 | if (map_map[nr_consumed_maps++]) | 445 | if (map_map[nr_consumed_maps++]) |
459 | continue; | 446 | continue; |
460 | ms = __nr_to_section(pnum); | ||
461 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | 447 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
462 | __func__); | 448 | __func__); |
463 | } | 449 | } |
450 | sparse_buffer_fini(); | ||
464 | } | 451 | } |
465 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 452 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
466 | 453 | ||