aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-12 04:46:35 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 14:45:35 -0400
commiteb40c4c27f1722f058e4713ccfedebac577d5190 (patch)
treeb471a4451c7cab125b3aafced4c77c7958fd711d /mm/page_alloc.c
parente64980405cc6aa74ef178d8d9aa4018c867ceed1 (diff)
memblock, x86: Replace memblock_x86_find_in_range_node() with generic memblock calls
With the previous changes, generic NUMA aware memblock API has feature parity with memblock_x86_find_in_range_node(). There currently are two users - x86 setup_node_data() and __alloc_memory_core_early() in nobootmem.c. This patch converts the former to use memblock_alloc_nid() and the latter memblock_find_range_in_node(), and kills memblock_x86_find_in_range_node() and related functions including find_memory_early_core_early() in page_alloc.c. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310460395-30913-9-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c67
1 files changed, 0 insertions, 67 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 902f03a4fd6..8ab5e5e7fda 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3779,73 +3779,6 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
3779 } 3779 }
3780} 3780}
3781 3781
3782#ifdef CONFIG_HAVE_MEMBLOCK
3783/*
3784 * Basic iterator support. Return the last range of PFNs for a node
3785 * Note: nid == MAX_NUMNODES returns last region regardless of node
3786 */
3787static int __meminit last_active_region_index_in_nid(int nid)
3788{
3789 int i;
3790
3791 for (i = nr_nodemap_entries - 1; i >= 0; i--)
3792 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3793 return i;
3794
3795 return -1;
3796}
3797
3798/*
3799 * Basic iterator support. Return the previous active range of PFNs for a node
3800 * Note: nid == MAX_NUMNODES returns next region regardless of node
3801 */
3802static int __meminit previous_active_region_index_in_nid(int index, int nid)
3803{
3804 for (index = index - 1; index >= 0; index--)
3805 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3806 return index;
3807
3808 return -1;
3809}
3810
3811#define for_each_active_range_index_in_nid_reverse(i, nid) \
3812 for (i = last_active_region_index_in_nid(nid); i != -1; \
3813 i = previous_active_region_index_in_nid(i, nid))
3814
3815u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3816 u64 goal, u64 limit)
3817{
3818 int i;
3819
3820 /* Need to go over early_node_map to find out good range for node */
3821 for_each_active_range_index_in_nid_reverse(i, nid) {
3822 u64 addr;
3823 u64 ei_start, ei_last;
3824 u64 final_start, final_end;
3825
3826 ei_last = early_node_map[i].end_pfn;
3827 ei_last <<= PAGE_SHIFT;
3828 ei_start = early_node_map[i].start_pfn;
3829 ei_start <<= PAGE_SHIFT;
3830
3831 final_start = max(ei_start, goal);
3832 final_end = min(ei_last, limit);
3833
3834 if (final_start >= final_end)
3835 continue;
3836
3837 addr = memblock_find_in_range(final_start, final_end, size, align);
3838
3839 if (!addr)
3840 continue;
3841
3842 return addr;
3843 }
3844
3845 return 0;
3846}
3847#endif
3848
3849int __init add_from_early_node_map(struct range *range, int az, 3782int __init add_from_early_node_map(struct range *range, int az,
3850 int nr_range, int nid) 3783 int nr_range, int nid)
3851{ 3784{