aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2014-01-21 18:50:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:47 -0500
commit6782832eba5e8c87a749a41da8deda1c3ef67ba0 (patch)
treec4e0e5cce518a5fcf602886344caa98e87a8e4d0 /mm/page_alloc.c
parent9da791dfabc60218c81904c7906b45789466e68e (diff)
mm/page_alloc.c: use memblock apis for early memory allocations
Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tony Lindgren <tony@atomide.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f59d1986018..b230e838883d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4215,7 +4215,6 @@ static noinline __init_refok
4215int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 4215int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4216{ 4216{
4217 int i; 4217 int i;
4218 struct pglist_data *pgdat = zone->zone_pgdat;
4219 size_t alloc_size; 4218 size_t alloc_size;
4220 4219
4221 /* 4220 /*
@@ -4231,7 +4230,8 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4231 4230
4232 if (!slab_is_available()) { 4231 if (!slab_is_available()) {
4233 zone->wait_table = (wait_queue_head_t *) 4232 zone->wait_table = (wait_queue_head_t *)
4234 alloc_bootmem_node_nopanic(pgdat, alloc_size); 4233 memblock_virt_alloc_node_nopanic(
4234 alloc_size, zone->zone_pgdat->node_id);
4235 } else { 4235 } else {
4236 /* 4236 /*
4237 * This case means that a zone whose size was 0 gets new memory 4237 * This case means that a zone whose size was 0 gets new memory
@@ -4351,13 +4351,14 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4351#endif 4351#endif
4352 4352
4353/** 4353/**
4354 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 4354 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4355 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4355 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4356 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 4356 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4357 * 4357 *
4358 * If an architecture guarantees that all ranges registered with 4358 * If an architecture guarantees that all ranges registered with
4359 * add_active_ranges() contain no holes and may be freed, this 4359 * add_active_ranges() contain no holes and may be freed, this
4360 * this function may be used instead of calling free_bootmem() manually. 4360 * this function may be used instead of calling memblock_free_early_nid()
4361 * manually.
4361 */ 4362 */
4362void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 4363void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4363{ 4364{
@@ -4369,9 +4370,9 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4369 end_pfn = min(end_pfn, max_low_pfn); 4370 end_pfn = min(end_pfn, max_low_pfn);
4370 4371
4371 if (start_pfn < end_pfn) 4372 if (start_pfn < end_pfn)
4372 free_bootmem_node(NODE_DATA(this_nid), 4373 memblock_free_early_nid(PFN_PHYS(start_pfn),
4373 PFN_PHYS(start_pfn), 4374 (end_pfn - start_pfn) << PAGE_SHIFT,
4374 (end_pfn - start_pfn) << PAGE_SHIFT); 4375 this_nid);
4375 } 4376 }
4376} 4377}
4377 4378
@@ -4642,8 +4643,9 @@ static void __init setup_usemap(struct pglist_data *pgdat,
4642 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 4643 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4643 zone->pageblock_flags = NULL; 4644 zone->pageblock_flags = NULL;
4644 if (usemapsize) 4645 if (usemapsize)
4645 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, 4646 zone->pageblock_flags =
4646 usemapsize); 4647 memblock_virt_alloc_node_nopanic(usemapsize,
4648 pgdat->node_id);
4647} 4649}
4648#else 4650#else
4649static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 4651static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
@@ -4837,7 +4839,8 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4837 size = (end - start) * sizeof(struct page); 4839 size = (end - start) * sizeof(struct page);
4838 map = alloc_remap(pgdat->node_id, size); 4840 map = alloc_remap(pgdat->node_id, size);
4839 if (!map) 4841 if (!map)
4840 map = alloc_bootmem_node_nopanic(pgdat, size); 4842 map = memblock_virt_alloc_node_nopanic(size,
4843 pgdat->node_id);
4841 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4844 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4842 } 4845 }
4843#ifndef CONFIG_NEED_MULTIPLE_NODES 4846#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -5887,7 +5890,7 @@ void *__init alloc_large_system_hash(const char *tablename,
5887 do { 5890 do {
5888 size = bucketsize << log2qty; 5891 size = bucketsize << log2qty;
5889 if (flags & HASH_EARLY) 5892 if (flags & HASH_EARLY)
5890 table = alloc_bootmem_nopanic(size); 5893 table = memblock_virt_alloc_nopanic(size, 0);
5891 else if (hashdist) 5894 else if (hashdist)
5892 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 5895 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5893 else { 5896 else {