aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-10-21 13:24:16 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-21 16:35:06 -0400
commitaedb0eb107961a234f7c38e53da65a8f7ea992a9 (patch)
treefeb4be6542e39916fe13517583cffc2ca7a4e2c3 /mm
parent7516795739bd53175629b90fab0ad488d7a6a9f7 (diff)
[PATCH] Slab: Do not fallback to nodes that have not been bootstrapped yet
The zonelist may contain zones of nodes that have not been bootstrapped and we will oops if we try to allocate from those zones. So check if the node information for the slab and the node have been setup before attempting an allocation. If it has not been setup then skip that zone. Usually we will not encounter this situation since the slab bootstrap code avoids falling back before we have setup the respective nodes but we seem to have a special needs for pppc. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Andy Whitcroft <apw@shadowen.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Mike Kravetz <kravetz@us.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Will Schmidt <will_schmidt@vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 266449d604bd..84c631f30741 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3152,12 +3152,15 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3152 struct zone **z; 3152 struct zone **z;
3153 void *obj = NULL; 3153 void *obj = NULL;
3154 3154
3155 for (z = zonelist->zones; *z && !obj; z++) 3155 for (z = zonelist->zones; *z && !obj; z++) {
3156 int nid = zone_to_nid(*z);
3157
3156 if (zone_idx(*z) <= ZONE_NORMAL && 3158 if (zone_idx(*z) <= ZONE_NORMAL &&
3157 cpuset_zone_allowed(*z, flags)) 3159 cpuset_zone_allowed(*z, flags) &&
3160 cache->nodelists[nid])
3158 obj = __cache_alloc_node(cache, 3161 obj = __cache_alloc_node(cache,
3159 flags | __GFP_THISNODE, 3162 flags | __GFP_THISNODE, nid);
3160 zone_to_nid(*z)); 3163 }
3161 return obj; 3164 return obj;
3162} 3165}
3163 3166