aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-01-25 01:20:51 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 11:30:36 -0500
commit556a169dab38b5100df6f4a45b655dddd3db94c1 (patch)
treec7788980db8dfb401dd4cf28f4445e1ab98f51d2 /mm
parenteb36f4fc019835cecf0788907f6cab774508087b (diff)
slab: fix bootstrap on memoryless node
If the node we're booting on doesn't have memory, bootstrapping kmalloc() caches resorts to fallback_alloc() which requires ->nodelists set for all nodes. Fix that by calling set_up_list3s() for CACHE_CACHE in kmem_cache_init(). As kmem_getpages() is called with GFP_THISNODE set, this used to work before because of breakage in 2.6.22 and before with GFP_THISNODE returning pages from the wrong node if a node had no memory. So it may have worked accidentally and in an unsafe manner because the pages would have been associated with the wrong node which could trigger bug ons and locking troubles. Tested-by: Mel Gorman <mel@csn.ul.ie> Tested-by: Olaf Hering <olaf@aepfle.de> Reviewed-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> [ With additional one-liner by Olaf Hering - Linus ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b03b2e46b806..ff31261fd24f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,11 +304,11 @@ struct kmem_list3 {
304/* 304/*
305 * Need this for bootstrapping a per node allocator. 305 * Need this for bootstrapping a per node allocator.
306 */ 306 */
307#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 307#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309#define CACHE_CACHE 0 309#define CACHE_CACHE 0
310#define SIZE_AC 1 310#define SIZE_AC MAX_NUMNODES
311#define SIZE_L3 (1 + MAX_NUMNODES) 311#define SIZE_L3 (2 * MAX_NUMNODES)
312 312
313static int drain_freelist(struct kmem_cache *cache, 313static int drain_freelist(struct kmem_cache *cache,
314 struct kmem_list3 *l3, int tofree); 314 struct kmem_list3 *l3, int tofree);
@@ -1410,6 +1410,22 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1410} 1410}
1411 1411
1412/* 1412/*
1413 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1414 * size of kmem_list3.
1415 */
1416static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1417{
1418 int node;
1419
1420 for_each_online_node(node) {
1421 cachep->nodelists[node] = &initkmem_list3[index + node];
1422 cachep->nodelists[node]->next_reap = jiffies +
1423 REAPTIMEOUT_LIST3 +
1424 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1425 }
1426}
1427
1428/*
1413 * Initialisation. Called after the page allocator have been initialised and 1429 * Initialisation. Called after the page allocator have been initialised and
1414 * before smp_init(). 1430 * before smp_init().
1415 */ 1431 */
@@ -1432,6 +1448,7 @@ void __init kmem_cache_init(void)
1432 if (i < MAX_NUMNODES) 1448 if (i < MAX_NUMNODES)
1433 cache_cache.nodelists[i] = NULL; 1449 cache_cache.nodelists[i] = NULL;
1434 } 1450 }
1451 set_up_list3s(&cache_cache, CACHE_CACHE);
1435 1452
1436 /* 1453 /*
1437 * Fragmentation resistance on low memory - only use bigger 1454 * Fragmentation resistance on low memory - only use bigger
@@ -1587,10 +1604,9 @@ void __init kmem_cache_init(void)
1587 { 1604 {
1588 int nid; 1605 int nid;
1589 1606
1590 /* Replace the static kmem_list3 structures for the boot cpu */
1591 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1592
1593 for_each_online_node(nid) { 1607 for_each_online_node(nid) {
1608 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid);
1609
1594 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1610 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1595 &initkmem_list3[SIZE_AC + nid], nid); 1611 &initkmem_list3[SIZE_AC + nid], nid);
1596 1612
@@ -1960,22 +1976,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1960 } 1976 }
1961} 1977}
1962 1978
1963/*
1964 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1965 * size of kmem_list3.
1966 */
1967static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1968{
1969 int node;
1970
1971 for_each_online_node(node) {
1972 cachep->nodelists[node] = &initkmem_list3[index + node];
1973 cachep->nodelists[node]->next_reap = jiffies +
1974 REAPTIMEOUT_LIST3 +
1975 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1976 }
1977}
1978
1979static void __kmem_cache_destroy(struct kmem_cache *cachep) 1979static void __kmem_cache_destroy(struct kmem_cache *cachep)
1980{ 1980{
1981 int i; 1981 int i;
@@ -2099,7 +2099,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2099 g_cpucache_up = PARTIAL_L3; 2099 g_cpucache_up = PARTIAL_L3;
2100 } else { 2100 } else {
2101 int node; 2101 int node;
2102 for_each_node_state(node, N_NORMAL_MEMORY) { 2102 for_each_online_node(node) {
2103 cachep->nodelists[node] = 2103 cachep->nodelists[node] =
2104 kmalloc_node(sizeof(struct kmem_list3), 2104 kmalloc_node(sizeof(struct kmem_list3),
2105 GFP_KERNEL, node); 2105 GFP_KERNEL, node);