aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2014-01-21 18:50:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:47 -0500
commitbb016b84164554725899aef544331085e08cb402 (patch)
tree7e524b87477a8d3fdb119f2523d1e0bd3c32133a /mm/sparse.c
parentc15295001aa940df4e3cf6574808a4addca9f2e5 (diff)
mm/sparse: use memblock apis for early memory allocations
Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tejun Heo <tj@kernel.org> Cc: Tony Lindgren <tony@atomide.com> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 8cc7be0e9590..63c3ea5c119c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -69,7 +69,7 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
69 else 69 else
70 section = kzalloc(array_size, GFP_KERNEL); 70 section = kzalloc(array_size, GFP_KERNEL);
71 } else { 71 } else {
72 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 72 section = memblock_virt_alloc_node(array_size, nid);
73 } 73 }
74 74
75 return section; 75 return section;
@@ -279,8 +279,9 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
279 limit = goal + (1UL << PA_SECTION_SHIFT); 279 limit = goal + (1UL << PA_SECTION_SHIFT);
280 nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 280 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
281again: 281again:
282 p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, 282 p = memblock_virt_alloc_try_nid_nopanic(size,
283 SMP_CACHE_BYTES, goal, limit); 283 SMP_CACHE_BYTES, goal, limit,
284 nid);
284 if (!p && limit) { 285 if (!p && limit) {
285 limit = 0; 286 limit = 0;
286 goto again; 287 goto again;
@@ -331,7 +332,7 @@ static unsigned long * __init
331sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 332sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
332 unsigned long size) 333 unsigned long size)
333{ 334{
334 return alloc_bootmem_node_nopanic(pgdat, size); 335 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
335} 336}
336 337
337static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 338static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -376,8 +377,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
376 return map; 377 return map;
377 378
378 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 379 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
379 map = __alloc_bootmem_node_high(NODE_DATA(nid), size, 380 map = memblock_virt_alloc_try_nid(size,
380 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 381 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
382 BOOTMEM_ALLOC_ACCESSIBLE, nid);
381 return map; 383 return map;
382} 384}
383void __init sparse_mem_maps_populate_node(struct page **map_map, 385void __init sparse_mem_maps_populate_node(struct page **map_map,
@@ -401,8 +403,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
401 } 403 }
402 404
403 size = PAGE_ALIGN(size); 405 size = PAGE_ALIGN(size);
404 map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, 406 map = memblock_virt_alloc_try_nid(size * map_count,
405 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 407 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
408 BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
406 if (map) { 409 if (map) {
407 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 410 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
408 if (!present_section_nr(pnum)) 411 if (!present_section_nr(pnum))
@@ -545,7 +548,7 @@ void __init sparse_init(void)
545 * sparse_early_mem_map_alloc, so allocate usemap_map at first. 548 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
546 */ 549 */
547 size = sizeof(unsigned long *) * NR_MEM_SECTIONS; 550 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
548 usemap_map = alloc_bootmem(size); 551 usemap_map = memblock_virt_alloc(size, 0);
549 if (!usemap_map) 552 if (!usemap_map)
550 panic("can not allocate usemap_map\n"); 553 panic("can not allocate usemap_map\n");
551 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, 554 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
@@ -553,7 +556,7 @@ void __init sparse_init(void)
553 556
554#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 557#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
555 size2 = sizeof(struct page *) * NR_MEM_SECTIONS; 558 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
556 map_map = alloc_bootmem(size2); 559 map_map = memblock_virt_alloc(size2, 0);
557 if (!map_map) 560 if (!map_map)
558 panic("can not allocate map_map\n"); 561 panic("can not allocate map_map\n");
559 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, 562 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
@@ -583,9 +586,9 @@ void __init sparse_init(void)
583 vmemmap_populate_print_last(); 586 vmemmap_populate_print_last();
584 587
585#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 588#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
586 free_bootmem(__pa(map_map), size2); 589 memblock_free_early(__pa(map_map), size2);
587#endif 590#endif
588 free_bootmem(__pa(usemap_map), size); 591 memblock_free_early(__pa(usemap_map), size);
589} 592}
590 593
591#ifdef CONFIG_MEMORY_HOTPLUG 594#ifdef CONFIG_MEMORY_HOTPLUG