aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-12-11 03:36:06 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-15 21:48:18 -0500
commita4c74ddd5ea3db53fc73d29c222b22656a7d05be (patch)
tree1c2d49c9e3c74fbe612ff7fda74770d24f51e154 /arch/powerpc/mm
parent48f797de550d39ea35552646c34149991362ff7f (diff)
powerpc: Fix bootmem reservation on uninitialized node
careful_allocation() was calling into the bootmem allocator for nodes which had not been fully initialized and caused a previous bug: http://patchwork.ozlabs.org/patch/10528/ So, I merged a few broken out loops in do_init_bootmem() to fix it. That changed the code ordering. I think this bug is triggered by having reserved areas for a node which are spanned by another node's contents. In the mark_reserved_regions_for_nid() code, we attempt to reserve the area for a node before we have allocated the NODE_DATA() for that nid. We do this since I reordered that loop. I suck. This is causing crashes at bootup on some systems, as reported by Jon Tollefson. This may only present on some systems that have 16GB pages reserved. But, it can probably happen on any system that is trying to reserve large swaths of memory that happen to span other nodes' contents. This commit ensures that we do not touch bootmem for any node which has not been initialized, and also removes a compile warning about an unused variable. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/numa.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index a8397bbad3d..cf81049e1e5 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -901,10 +901,17 @@ static void mark_reserved_regions_for_nid(int nid)
901 if (end_pfn > node_ar.end_pfn) 901 if (end_pfn > node_ar.end_pfn)
902 reserve_size = (node_ar.end_pfn << PAGE_SHIFT) 902 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
903 - (start_pfn << PAGE_SHIFT); 903 - (start_pfn << PAGE_SHIFT);
904 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, 904 /*
905 reserve_size, node_ar.nid); 905 * Only worry about *this* node, others may not
906 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, 906 * yet have valid NODE_DATA().
907 reserve_size, BOOTMEM_DEFAULT); 907 */
908 if (node_ar.nid == nid) {
909 dbg("reserve_bootmem %lx %lx nid=%d\n",
910 physbase, reserve_size, node_ar.nid);
911 reserve_bootmem_node(NODE_DATA(node_ar.nid),
912 physbase, reserve_size,
913 BOOTMEM_DEFAULT);
914 }
908 /* 915 /*
909 * if reserved region is contained in the active region 916 * if reserved region is contained in the active region
910 * then done. 917 * then done.
@@ -929,7 +936,6 @@ static void mark_reserved_regions_for_nid(int nid)
929void __init do_init_bootmem(void) 936void __init do_init_bootmem(void)
930{ 937{
931 int nid; 938 int nid;
932 unsigned int i;
933 939
934 min_low_pfn = 0; 940 min_low_pfn = 0;
935 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 941 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;