diff options
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 40 | ||||
-rw-r--r-- | mm/sparse.c | 45 |
3 files changed, 54 insertions, 35 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2fb32d1561eb..4ace5d50a892 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2671,6 +2671,10 @@ void sparse_mem_maps_populate_node(struct page **map_map, | |||
2671 | unsigned long map_count, | 2671 | unsigned long map_count, |
2672 | int nodeid); | 2672 | int nodeid); |
2673 | 2673 | ||
2674 | unsigned long __init section_map_size(void); | ||
2675 | void sparse_buffer_init(unsigned long size, int nid); | ||
2676 | void sparse_buffer_fini(void); | ||
2677 | void *sparse_buffer_alloc(unsigned long size); | ||
2674 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, | 2678 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, |
2675 | struct vmem_altmap *altmap); | 2679 | struct vmem_altmap *altmap); |
2676 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 2680 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 95e2c7638a5c..b05c7663c640 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -43,12 +43,9 @@ static void * __ref __earlyonly_bootmem_alloc(int node, | |||
43 | unsigned long goal) | 43 | unsigned long goal) |
44 | { | 44 | { |
45 | return memblock_virt_alloc_try_nid_raw(size, align, goal, | 45 | return memblock_virt_alloc_try_nid_raw(size, align, goal, |
46 | BOOTMEM_ALLOC_ACCESSIBLE, node); | 46 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
47 | } | 47 | } |
48 | 48 | ||
49 | static void *vmemmap_buf; | ||
50 | static void *vmemmap_buf_end; | ||
51 | |||
52 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) | 49 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
53 | { | 50 | { |
54 | /* If the main allocator is up use that, fallback to bootmem. */ | 51 | /* If the main allocator is up use that, fallback to bootmem. */ |
@@ -76,18 +73,10 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
76 | /* need to make sure size is all the same during early stage */ | 73 | /* need to make sure size is all the same during early stage */ |
77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) | 74 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
78 | { | 75 | { |
79 | void *ptr; | 76 | void *ptr = sparse_buffer_alloc(size); |
80 | |||
81 | if (!vmemmap_buf) | ||
82 | return vmemmap_alloc_block(size, node); | ||
83 | |||
84 | /* take the from buf */ | ||
85 | ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); | ||
86 | if (ptr + size > vmemmap_buf_end) | ||
87 | return vmemmap_alloc_block(size, node); | ||
88 | |||
89 | vmemmap_buf = ptr + size; | ||
90 | 77 | ||
78 | if (!ptr) | ||
79 | ptr = vmemmap_alloc_block(size, node); | ||
91 | return ptr; | 80 | return ptr; |
92 | } | 81 | } |
93 | 82 | ||
@@ -279,19 +268,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
279 | unsigned long map_count, int nodeid) | 268 | unsigned long map_count, int nodeid) |
280 | { | 269 | { |
281 | unsigned long pnum; | 270 | unsigned long pnum; |
282 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | ||
283 | void *vmemmap_buf_start; | ||
284 | int nr_consumed_maps = 0; | 271 | int nr_consumed_maps = 0; |
285 | 272 | ||
286 | size = ALIGN(size, PMD_SIZE); | 273 | sparse_buffer_init(section_map_size() * map_count, nodeid); |
287 | vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, | ||
288 | PMD_SIZE, __pa(MAX_DMA_ADDRESS)); | ||
289 | |||
290 | if (vmemmap_buf_start) { | ||
291 | vmemmap_buf = vmemmap_buf_start; | ||
292 | vmemmap_buf_end = vmemmap_buf_start + size * map_count; | ||
293 | } | ||
294 | |||
295 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | 274 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
296 | if (!present_section_nr(pnum)) | 275 | if (!present_section_nr(pnum)) |
297 | continue; | 276 | continue; |
@@ -303,12 +282,5 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
303 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | 282 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
304 | __func__); | 283 | __func__); |
305 | } | 284 | } |
306 | 285 | sparse_buffer_fini(); | |
307 | if (vmemmap_buf_start) { | ||
308 | /* need to free left buf */ | ||
309 | memblock_free_early(__pa(vmemmap_buf), | ||
310 | vmemmap_buf_end - vmemmap_buf); | ||
311 | vmemmap_buf = NULL; | ||
312 | vmemmap_buf_end = NULL; | ||
313 | } | ||
314 | } | 286 | } |
diff --git a/mm/sparse.c b/mm/sparse.c index 2ea8b3dbd0df..9a0a5f598469 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -400,7 +400,14 @@ static void __init sparse_early_usemaps_alloc_node(void *data, | |||
400 | } | 400 | } |
401 | } | 401 | } |
402 | 402 | ||
403 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 403 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
404 | unsigned long __init section_map_size(void) | ||
405 | |||
406 | { | ||
407 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); | ||
408 | } | ||
409 | |||
410 | #else | ||
404 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, | 411 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, |
405 | struct vmem_altmap *altmap) | 412 | struct vmem_altmap *altmap) |
406 | { | 413 | { |
@@ -457,6 +464,42 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
457 | } | 464 | } |
458 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 465 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
459 | 466 | ||
467 | static void *sparsemap_buf __meminitdata; | ||
468 | static void *sparsemap_buf_end __meminitdata; | ||
469 | |||
470 | void __init sparse_buffer_init(unsigned long size, int nid) | ||
471 | { | ||
472 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ | ||
473 | sparsemap_buf = | ||
474 | memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE, | ||
475 | __pa(MAX_DMA_ADDRESS), | ||
476 | BOOTMEM_ALLOC_ACCESSIBLE, nid); | ||
477 | sparsemap_buf_end = sparsemap_buf + size; | ||
478 | } | ||
479 | |||
480 | void __init sparse_buffer_fini(void) | ||
481 | { | ||
482 | unsigned long size = sparsemap_buf_end - sparsemap_buf; | ||
483 | |||
484 | if (sparsemap_buf && size > 0) | ||
485 | memblock_free_early(__pa(sparsemap_buf), size); | ||
486 | sparsemap_buf = NULL; | ||
487 | } | ||
488 | |||
489 | void * __meminit sparse_buffer_alloc(unsigned long size) | ||
490 | { | ||
491 | void *ptr = NULL; | ||
492 | |||
493 | if (sparsemap_buf) { | ||
494 | ptr = PTR_ALIGN(sparsemap_buf, size); | ||
495 | if (ptr + size > sparsemap_buf_end) | ||
496 | ptr = NULL; | ||
497 | else | ||
498 | sparsemap_buf = ptr + size; | ||
499 | } | ||
500 | return ptr; | ||
501 | } | ||
502 | |||
460 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | 503 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
461 | static void __init sparse_early_mem_maps_alloc_node(void *data, | 504 | static void __init sparse_early_mem_maps_alloc_node(void *data, |
462 | unsigned long pnum_begin, | 505 | unsigned long pnum_begin, |