aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c71
1 files changed, 33 insertions, 38 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cdef1d4b4e4..bd7625676a6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3699,13 +3699,45 @@ void __init free_bootmem_with_active_regions(int nid,
3699} 3699}
3700 3700
3701#ifdef CONFIG_HAVE_MEMBLOCK 3701#ifdef CONFIG_HAVE_MEMBLOCK
3702/*
3703 * Basic iterator support. Return the last range of PFNs for a node
3704 * Note: nid == MAX_NUMNODES returns last region regardless of node
3705 */
3706static int __meminit last_active_region_index_in_nid(int nid)
3707{
3708 int i;
3709
3710 for (i = nr_nodemap_entries - 1; i >= 0; i--)
3711 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3712 return i;
3713
3714 return -1;
3715}
3716
3717/*
3718 * Basic iterator support. Return the previous active range of PFNs for a node
3719 * Note: nid == MAX_NUMNODES returns next region regardless of node
3720 */
3721static int __meminit previous_active_region_index_in_nid(int index, int nid)
3722{
3723 for (index = index - 1; index >= 0; index--)
3724 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3725 return index;
3726
3727 return -1;
3728}
3729
3730#define for_each_active_range_index_in_nid_reverse(i, nid) \
3731 for (i = last_active_region_index_in_nid(nid); i != -1; \
3732 i = previous_active_region_index_in_nid(i, nid))
3733
3702u64 __init find_memory_core_early(int nid, u64 size, u64 align, 3734u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3703 u64 goal, u64 limit) 3735 u64 goal, u64 limit)
3704{ 3736{
3705 int i; 3737 int i;
3706 3738
3707 /* Need to go over early_node_map to find out good range for node */ 3739 /* Need to go over early_node_map to find out good range for node */
3708 for_each_active_range_index_in_nid(i, nid) { 3740 for_each_active_range_index_in_nid_reverse(i, nid) {
3709 u64 addr; 3741 u64 addr;
3710 u64 ei_start, ei_last; 3742 u64 ei_start, ei_last;
3711 u64 final_start, final_end; 3743 u64 final_start, final_end;
@@ -3748,34 +3780,6 @@ int __init add_from_early_node_map(struct range *range, int az,
3748 return nr_range; 3780 return nr_range;
3749} 3781}
3750 3782
3751#ifdef CONFIG_NO_BOOTMEM
3752void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3753 u64 goal, u64 limit)
3754{
3755 void *ptr;
3756 u64 addr;
3757
3758 if (limit > memblock.current_limit)
3759 limit = memblock.current_limit;
3760
3761 addr = find_memory_core_early(nid, size, align, goal, limit);
3762
3763 if (addr == MEMBLOCK_ERROR)
3764 return NULL;
3765
3766 ptr = phys_to_virt(addr);
3767 memset(ptr, 0, size);
3768 memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
3769 /*
3770 * The min_count is set to 0 so that bootmem allocated blocks
3771 * are never reported as leaks.
3772 */
3773 kmemleak_alloc(ptr, size, 0, 0);
3774 return ptr;
3775}
3776#endif
3777
3778
3779void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) 3783void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3780{ 3784{
3781 int i; 3785 int i;
@@ -4809,15 +4813,6 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4809 dma_reserve = new_dma_reserve; 4813 dma_reserve = new_dma_reserve;
4810} 4814}
4811 4815
4812#ifndef CONFIG_NEED_MULTIPLE_NODES
4813struct pglist_data __refdata contig_page_data = {
4814#ifndef CONFIG_NO_BOOTMEM
4815 .bdata = &bootmem_node_data[0]
4816#endif
4817 };
4818EXPORT_SYMBOL(contig_page_data);
4819#endif
4820
4821void __init free_area_init(unsigned long *zones_size) 4816void __init free_area_init(unsigned long *zones_size)
4822{ 4817{
4823 free_area_init_node(0, zones_size, 4818 free_area_init_node(0, zones_size,