summaryrefslogtreecommitdiffstats
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c40
1 files changed, 6 insertions, 34 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 95e2c7638a5c..b05c7663c640 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -43,12 +43,9 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
43 unsigned long goal) 43 unsigned long goal)
44{ 44{
45 return memblock_virt_alloc_try_nid_raw(size, align, goal, 45 return memblock_virt_alloc_try_nid_raw(size, align, goal,
46 BOOTMEM_ALLOC_ACCESSIBLE, node); 46 BOOTMEM_ALLOC_ACCESSIBLE, node);
47} 47}
48 48
49static void *vmemmap_buf;
50static void *vmemmap_buf_end;
51
52void * __meminit vmemmap_alloc_block(unsigned long size, int node) 49void * __meminit vmemmap_alloc_block(unsigned long size, int node)
53{ 50{
54 /* If the main allocator is up use that, fallback to bootmem. */ 51 /* If the main allocator is up use that, fallback to bootmem. */
@@ -76,18 +73,10 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
76/* need to make sure size is all the same during early stage */ 73/* need to make sure size is all the same during early stage */
77void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) 74void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
78{ 75{
79 void *ptr; 76 void *ptr = sparse_buffer_alloc(size);
80
81 if (!vmemmap_buf)
82 return vmemmap_alloc_block(size, node);
83
84 /* take the from buf */
85 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
86 if (ptr + size > vmemmap_buf_end)
87 return vmemmap_alloc_block(size, node);
88
89 vmemmap_buf = ptr + size;
90 77
78 if (!ptr)
79 ptr = vmemmap_alloc_block(size, node);
91 return ptr; 80 return ptr;
92} 81}
93 82
@@ -279,19 +268,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
279 unsigned long map_count, int nodeid) 268 unsigned long map_count, int nodeid)
280{ 269{
281 unsigned long pnum; 270 unsigned long pnum;
282 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
283 void *vmemmap_buf_start;
284 int nr_consumed_maps = 0; 271 int nr_consumed_maps = 0;
285 272
286 size = ALIGN(size, PMD_SIZE); 273 sparse_buffer_init(section_map_size() * map_count, nodeid);
287 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
288 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
289
290 if (vmemmap_buf_start) {
291 vmemmap_buf = vmemmap_buf_start;
292 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
293 }
294
295 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 274 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
296 if (!present_section_nr(pnum)) 275 if (!present_section_nr(pnum))
297 continue; 276 continue;
@@ -303,12 +282,5 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
303 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 282 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
304 __func__); 283 __func__);
305 } 284 }
306 285 sparse_buffer_fini();
307 if (vmemmap_buf_start) {
308 /* need to free left buf */
309 memblock_free_early(__pa(vmemmap_buf),
310 vmemmap_buf_end - vmemmap_buf);
311 vmemmap_buf = NULL;
312 vmemmap_buf_end = NULL;
313 }
314} 286}