aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
commitdf32e43a54d04eda35d2859beaf90e3864d53288 (patch)
tree7a61cf658b2949bd426285eb9902be7758ced1ba /mm/sparse.c
parentfbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff)
parent78d5506e82b21a1a1de68c24182db2c2fe521422 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton: - a couple of misc things - inotify/fsnotify work from Jan - ocfs2 updates (partial) - about half of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm/migrate: remove unused function, fail_migrate_page() mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages mm/migrate: correct failure handling if !hugepage_migration_support() mm/migrate: add comment about permanent failure path mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure mm: compaction: reset scanner positions immediately when they meet mm: compaction: do not mark unmovable pageblocks as skipped in async compaction mm: compaction: detect when scanners meet in isolate_freepages mm: compaction: reset cached scanner pfn's before reading them mm: compaction: encapsulate defer reset logic mm: compaction: trace compaction begin and end memcg, oom: lock mem_cgroup_print_oom_info sched: add tracepoints related to NUMA task migration mm: numa: do not automatically migrate KSM pages mm: numa: trace tasks that fail migration due to rate limiting mm: numa: limit scope of lock for NUMA migrate rate limiting mm: numa: make NUMA-migrate related functions static lib/show_mem.c: show num_poisoned_pages when oom mm/hwpoison: add '#' to hwpoison_inject mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter ...
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 8cc7be0e9590..63c3ea5c119c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -69,7 +69,7 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
69 else 69 else
70 section = kzalloc(array_size, GFP_KERNEL); 70 section = kzalloc(array_size, GFP_KERNEL);
71 } else { 71 } else {
72 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 72 section = memblock_virt_alloc_node(array_size, nid);
73 } 73 }
74 74
75 return section; 75 return section;
@@ -279,8 +279,9 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
279 limit = goal + (1UL << PA_SECTION_SHIFT); 279 limit = goal + (1UL << PA_SECTION_SHIFT);
280 nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 280 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
281again: 281again:
282 p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, 282 p = memblock_virt_alloc_try_nid_nopanic(size,
283 SMP_CACHE_BYTES, goal, limit); 283 SMP_CACHE_BYTES, goal, limit,
284 nid);
284 if (!p && limit) { 285 if (!p && limit) {
285 limit = 0; 286 limit = 0;
286 goto again; 287 goto again;
@@ -331,7 +332,7 @@ static unsigned long * __init
331sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 332sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
332 unsigned long size) 333 unsigned long size)
333{ 334{
334 return alloc_bootmem_node_nopanic(pgdat, size); 335 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
335} 336}
336 337
337static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 338static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -376,8 +377,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
376 return map; 377 return map;
377 378
378 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 379 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
379 map = __alloc_bootmem_node_high(NODE_DATA(nid), size, 380 map = memblock_virt_alloc_try_nid(size,
380 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 381 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
382 BOOTMEM_ALLOC_ACCESSIBLE, nid);
381 return map; 383 return map;
382} 384}
383void __init sparse_mem_maps_populate_node(struct page **map_map, 385void __init sparse_mem_maps_populate_node(struct page **map_map,
@@ -401,8 +403,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
401 } 403 }
402 404
403 size = PAGE_ALIGN(size); 405 size = PAGE_ALIGN(size);
404 map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, 406 map = memblock_virt_alloc_try_nid(size * map_count,
405 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 407 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
408 BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
406 if (map) { 409 if (map) {
407 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 410 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
408 if (!present_section_nr(pnum)) 411 if (!present_section_nr(pnum))
@@ -545,7 +548,7 @@ void __init sparse_init(void)
545 * sparse_early_mem_map_alloc, so allocate usemap_map at first. 548 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
546 */ 549 */
547 size = sizeof(unsigned long *) * NR_MEM_SECTIONS; 550 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
548 usemap_map = alloc_bootmem(size); 551 usemap_map = memblock_virt_alloc(size, 0);
549 if (!usemap_map) 552 if (!usemap_map)
550 panic("can not allocate usemap_map\n"); 553 panic("can not allocate usemap_map\n");
551 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, 554 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
@@ -553,7 +556,7 @@ void __init sparse_init(void)
553 556
554#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 557#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
555 size2 = sizeof(struct page *) * NR_MEM_SECTIONS; 558 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
556 map_map = alloc_bootmem(size2); 559 map_map = memblock_virt_alloc(size2, 0);
557 if (!map_map) 560 if (!map_map)
558 panic("can not allocate map_map\n"); 561 panic("can not allocate map_map\n");
559 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, 562 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
@@ -583,9 +586,9 @@ void __init sparse_init(void)
583 vmemmap_populate_print_last(); 586 vmemmap_populate_print_last();
584 587
585#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 588#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
586 free_bootmem(__pa(map_map), size2); 589 memblock_free_early(__pa(map_map), size2);
587#endif 590#endif
588 free_bootmem(__pa(usemap_map), size); 591 memblock_free_early(__pa(usemap_map), size);
589} 592}
590 593
591#ifdef CONFIG_MEMORY_HOTPLUG 594#ifdef CONFIG_MEMORY_HOTPLUG