aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c41
1 files changed, 14 insertions, 27 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 9a0a5f598469..db4867b62fff 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -408,13 +408,20 @@ unsigned long __init section_map_size(void)
408} 408}
409 409
410#else 410#else
411unsigned long __init section_map_size(void)
412{
413 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
414}
415
411struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, 416struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
412 struct vmem_altmap *altmap) 417 struct vmem_altmap *altmap)
413{ 418{
414 struct page *map; 419 unsigned long size = section_map_size();
415 unsigned long size; 420 struct page *map = sparse_buffer_alloc(size);
421
422 if (map)
423 return map;
416 424
417 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
418 map = memblock_virt_alloc_try_nid(size, 425 map = memblock_virt_alloc_try_nid(size,
419 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 426 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
420 BOOTMEM_ALLOC_ACCESSIBLE, nid); 427 BOOTMEM_ALLOC_ACCESSIBLE, nid);
@@ -425,42 +432,22 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
425 unsigned long pnum_end, 432 unsigned long pnum_end,
426 unsigned long map_count, int nodeid) 433 unsigned long map_count, int nodeid)
427{ 434{
428 void *map;
429 unsigned long pnum; 435 unsigned long pnum;
430 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; 436 unsigned long size = section_map_size();
431 int nr_consumed_maps; 437 int nr_consumed_maps = 0;
432
433 size = PAGE_ALIGN(size);
434 map = memblock_virt_alloc_try_nid_raw(size * map_count,
435 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
436 BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
437 if (map) {
438 nr_consumed_maps = 0;
439 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
440 if (!present_section_nr(pnum))
441 continue;
442 map_map[nr_consumed_maps] = map;
443 map += size;
444 nr_consumed_maps++;
445 }
446 return;
447 }
448 438
449 /* fallback */ 439 sparse_buffer_init(size * map_count, nodeid);
450 nr_consumed_maps = 0;
451 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 440 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
452 struct mem_section *ms;
453
454 if (!present_section_nr(pnum)) 441 if (!present_section_nr(pnum))
455 continue; 442 continue;
456 map_map[nr_consumed_maps] = 443 map_map[nr_consumed_maps] =
457 sparse_mem_map_populate(pnum, nodeid, NULL); 444 sparse_mem_map_populate(pnum, nodeid, NULL);
458 if (map_map[nr_consumed_maps++]) 445 if (map_map[nr_consumed_maps++])
459 continue; 446 continue;
460 ms = __nr_to_section(pnum);
461 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 447 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
462 __func__); 448 __func__);
463 } 449 }
450 sparse_buffer_fini();
464} 451}
465#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 452#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
466 453