aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf81049e1e51..213664c9cdca 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -840,8 +840,16 @@ static void __init *careful_allocation(int nid, unsigned long size,
840 size, nid); 840 size, nid);
841 841
842 /* 842 /*
843 * If the memory came from a previously allocated node, we must 843 * We initialize the nodes in numeric order: 0, 1, 2...
844 * retry with the bootmem allocator. 844 * and hand over control from the LMB allocator to the
845 * bootmem allocator. If this function is called for
846 * node 5, then we know that all nodes <5 are using the
847 * bootmem allocator instead of the LMB allocator.
848 *
849 * So, check the nid from which this allocation came
850 * and double check to see if we need to use bootmem
851 * instead of the LMB. We don't free the LMB memory
852 * since it would be useless.
845 */ 853 */
846 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); 854 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
847 if (new_nid < nid) { 855 if (new_nid < nid) {