aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:38 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:50 -0400
commit2ed3a4ef95ef1a13a424378c34ebd9b7e593f212 (patch)
treebb08e0b3526ab71639197fad649349dc222e0451 /mm/slab.c
parent117f6eb1d8b8deb6f19fc88fc15bdb413c2a0c79 (diff)
[PATCH] slab: do not panic when alloc_kmemlist fails and slab is up
It is fairly easy to get a system to oops by simply sizing a cache via /proc in such a way that one of the chaches (shared is easiest) becomes bigger than the maximum allowed slab allocation size. This occurs because enable_cpucache() fails if it cannot reallocate some caches. However, enable_cpucache() is used for multiple purposes: resizing caches, cache creation and bootstrap. If the slab is already up then we already have working caches. The resize can fail without a problem. We just need to return the proper error code. F.e. after this patch: # echo "size-64 10000 50 1000" >/proc/slabinfo -bash: echo: write error: Cannot allocate memory notice no OOPS. If we are doing a kmem_cache_create() then we also should not panic but return -ENOMEM. If on the other hand we do not have a fully bootstrapped slab allocator yet then we should indeed panic since we are unable to bring up the slab to its full functionality. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c714741b253b..3233c4c7cbce 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
313 struct kmem_list3 *l3, int tofree); 313 struct kmem_list3 *l3, int tofree);
314static void free_block(struct kmem_cache *cachep, void **objpp, int len, 314static void free_block(struct kmem_cache *cachep, void **objpp, int len,
315 int node); 315 int node);
316static void enable_cpucache(struct kmem_cache *cachep); 316static int enable_cpucache(struct kmem_cache *cachep);
317static void cache_reap(void *unused); 317static void cache_reap(void *unused);
318 318
319/* 319/*
@@ -1490,7 +1490,8 @@ void __init kmem_cache_init(void)
1490 struct kmem_cache *cachep; 1490 struct kmem_cache *cachep;
1491 mutex_lock(&cache_chain_mutex); 1491 mutex_lock(&cache_chain_mutex);
1492 list_for_each_entry(cachep, &cache_chain, next) 1492 list_for_each_entry(cachep, &cache_chain, next)
1493 enable_cpucache(cachep); 1493 if (enable_cpucache(cachep))
1494 BUG();
1494 mutex_unlock(&cache_chain_mutex); 1495 mutex_unlock(&cache_chain_mutex);
1495 } 1496 }
1496 1497
@@ -1924,12 +1925,11 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1924 return left_over; 1925 return left_over;
1925} 1926}
1926 1927
1927static void setup_cpu_cache(struct kmem_cache *cachep) 1928static int setup_cpu_cache(struct kmem_cache *cachep)
1928{ 1929{
1929 if (g_cpucache_up == FULL) { 1930 if (g_cpucache_up == FULL)
1930 enable_cpucache(cachep); 1931 return enable_cpucache(cachep);
1931 return; 1932
1932 }
1933 if (g_cpucache_up == NONE) { 1933 if (g_cpucache_up == NONE) {
1934 /* 1934 /*
1935 * Note: the first kmem_cache_create must create the cache 1935 * Note: the first kmem_cache_create must create the cache
@@ -1976,6 +1976,7 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
1976 cpu_cache_get(cachep)->touched = 0; 1976 cpu_cache_get(cachep)->touched = 0;
1977 cachep->batchcount = 1; 1977 cachep->batchcount = 1;
1978 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1978 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1979 return 0;
1979} 1980}
1980 1981
1981/** 1982/**
@@ -2242,8 +2243,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2242 cachep->dtor = dtor; 2243 cachep->dtor = dtor;
2243 cachep->name = name; 2244 cachep->name = name;
2244 2245
2245 2246 if (setup_cpu_cache(cachep)) {
2246 setup_cpu_cache(cachep); 2247 __kmem_cache_destroy(cachep);
2248 cachep = NULL;
2249 goto oops;
2250 }
2247 2251
2248 /* cache setup completed, link it into the list */ 2252 /* cache setup completed, link it into the list */
2249 list_add(&cachep->next, &cache_chain); 2253 list_add(&cachep->next, &cache_chain);
@@ -3693,7 +3697,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3693 int batchcount, int shared) 3697 int batchcount, int shared)
3694{ 3698{
3695 struct ccupdate_struct new; 3699 struct ccupdate_struct new;
3696 int i, err; 3700 int i;
3697 3701
3698 memset(&new.new, 0, sizeof(new.new)); 3702 memset(&new.new, 0, sizeof(new.new));
3699 for_each_online_cpu(i) { 3703 for_each_online_cpu(i) {
@@ -3724,17 +3728,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3724 kfree(ccold); 3728 kfree(ccold);
3725 } 3729 }
3726 3730
3727 err = alloc_kmemlist(cachep); 3731 return alloc_kmemlist(cachep);
3728 if (err) {
3729 printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
3730 cachep->name, -err);
3731 BUG();
3732 }
3733 return 0;
3734} 3732}
3735 3733
3736/* Called with cache_chain_mutex held always */ 3734/* Called with cache_chain_mutex held always */
3737static void enable_cpucache(struct kmem_cache *cachep) 3735static int enable_cpucache(struct kmem_cache *cachep)
3738{ 3736{
3739 int err; 3737 int err;
3740 int limit, shared; 3738 int limit, shared;
@@ -3786,6 +3784,7 @@ static void enable_cpucache(struct kmem_cache *cachep)
3786 if (err) 3784 if (err)
3787 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3785 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3788 cachep->name, -err); 3786 cachep->name, -err);
3787 return err;
3789} 3788}
3790 3789
3791/* 3790/*