aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-03-25 06:06:47 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 11:22:50 -0500
commit0718dc2a82c865ca75975acabaf984057f9fd488 (patch)
tree0f7e8ed22ea26c79e17712c417593bdce04e7888
parentcafeb02e098ecd58fb0bd797b2c9fbba3edf54f8 (diff)
[PATCH] slab: fix memory leak in alloc_kmemlist
We have had this memory leak for a while now. The situation is complicated by the use of alloc_kmemlist() as a function to resize various caches by do_tune_cpucache(). What we do here is first of all make sure that we deallocate properly in the loop over all the nodes. If we are just resizing caches then we can simply return with -ENOMEM if an allocation fails. If the cache is new then we need to rollback and remove all earlier allocations. We detect that a cache is new by checking if the link to the global cache chain has been setup. This is a bit hackish .... (also fix up too overlong lines that I added in the last patch...) Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Jesper Juhl <jesper.juhl@gmail.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index ef9f60fe37d6..681837499d7d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3418,7 +3418,7 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
3418EXPORT_SYMBOL_GPL(kmem_cache_name); 3418EXPORT_SYMBOL_GPL(kmem_cache_name);
3419 3419
3420/* 3420/*
3421 * This initializes kmem_list3 for all nodes. 3421 * This initializes kmem_list3 or resizes varioius caches for all nodes.
3422 */ 3422 */
3423static int alloc_kmemlist(struct kmem_cache *cachep) 3423static int alloc_kmemlist(struct kmem_cache *cachep)
3424{ 3424{
@@ -3433,10 +3433,13 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3433 if (!new_alien) 3433 if (!new_alien)
3434 goto fail; 3434 goto fail;
3435 3435
3436 new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, 3436 new_shared = alloc_arraycache(node,
3437 cachep->shared*cachep->batchcount,
3437 0xbaadf00d); 3438 0xbaadf00d);
3438 if (!new_shared) 3439 if (!new_shared) {
3440 free_alien_cache(new_alien);
3439 goto fail; 3441 goto fail;
3442 }
3440 3443
3441 l3 = cachep->nodelists[node]; 3444 l3 = cachep->nodelists[node];
3442 if (l3) { 3445 if (l3) {
@@ -3445,7 +3448,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3445 spin_lock_irq(&l3->list_lock); 3448 spin_lock_irq(&l3->list_lock);
3446 3449
3447 if (shared) 3450 if (shared)
3448 free_block(cachep, shared->entry, shared->avail, node); 3451 free_block(cachep, shared->entry,
3452 shared->avail, node);
3449 3453
3450 l3->shared = new_shared; 3454 l3->shared = new_shared;
3451 if (!l3->alien) { 3455 if (!l3->alien) {
@@ -3460,8 +3464,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3460 continue; 3464 continue;
3461 } 3465 }
3462 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3466 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3463 if (!l3) 3467 if (!l3) {
3468 free_alien_cache(new_alien);
3469 kfree(new_shared);
3464 goto fail; 3470 goto fail;
3471 }
3465 3472
3466 kmem_list3_init(l3); 3473 kmem_list3_init(l3);
3467 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3474 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
@@ -3473,7 +3480,23 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3473 cachep->nodelists[node] = l3; 3480 cachep->nodelists[node] = l3;
3474 } 3481 }
3475 return 0; 3482 return 0;
3483
3476fail: 3484fail:
3485 if (!cachep->next.next) {
3486 /* Cache is not active yet. Roll back what we did */
3487 node--;
3488 while (node >= 0) {
3489 if (cachep->nodelists[node]) {
3490 l3 = cachep->nodelists[node];
3491
3492 kfree(l3->shared);
3493 free_alien_cache(l3->alien);
3494 kfree(l3);
3495 cachep->nodelists[node] = NULL;
3496 }
3497 node--;
3498 }
3499 }
3477 return -ENOMEM; 3500 return -ENOMEM;
3478} 3501}
3479 3502