aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-09-26 02:31:47 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:50 -0400
commitd2e7b7d0aa021847c59f882b066e7d3812902870 (patch)
tree173a2271e657a1171c25de9b943bdfb92922acab /mm
parent980128f223fa3c75e3ebdde650c9f1bcabd4c0a2 (diff)
[PATCH] fix potential stack overflow in mm/slab.c
On High end systems (1024 or so cpus) this can potentially cause stack overflow. Fix the stack usage. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2b37a62f6314..619337a5cb2b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3725,22 +3725,26 @@ static void do_ccupdate_local(void *info)
3725static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3725static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3726 int batchcount, int shared) 3726 int batchcount, int shared)
3727{ 3727{
3728 struct ccupdate_struct new; 3728 struct ccupdate_struct *new;
3729 int i; 3729 int i;
3730 3730
3731 memset(&new.new, 0, sizeof(new.new)); 3731 new = kzalloc(sizeof(*new), GFP_KERNEL);
3732 if (!new)
3733 return -ENOMEM;
3734
3732 for_each_online_cpu(i) { 3735 for_each_online_cpu(i) {
3733 new.new[i] = alloc_arraycache(cpu_to_node(i), limit, 3736 new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3734 batchcount); 3737 batchcount);
3735 if (!new.new[i]) { 3738 if (!new->new[i]) {
3736 for (i--; i >= 0; i--) 3739 for (i--; i >= 0; i--)
3737 kfree(new.new[i]); 3740 kfree(new->new[i]);
3741 kfree(new);
3738 return -ENOMEM; 3742 return -ENOMEM;
3739 } 3743 }
3740 } 3744 }
3741 new.cachep = cachep; 3745 new->cachep = cachep;
3742 3746
3743 on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1); 3747 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
3744 3748
3745 check_irq_on(); 3749 check_irq_on();
3746 cachep->batchcount = batchcount; 3750 cachep->batchcount = batchcount;
@@ -3748,7 +3752,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3748 cachep->shared = shared; 3752 cachep->shared = shared;
3749 3753
3750 for_each_online_cpu(i) { 3754 for_each_online_cpu(i) {
3751 struct array_cache *ccold = new.new[i]; 3755 struct array_cache *ccold = new->new[i];
3752 if (!ccold) 3756 if (!ccold)
3753 continue; 3757 continue;
3754 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3758 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
@@ -3756,7 +3760,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3756 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3760 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3757 kfree(ccold); 3761 kfree(ccold);
3758 } 3762 }
3759 3763 kfree(new);
3760 return alloc_kmemlist(cachep); 3764 return alloc_kmemlist(cachep);
3761} 3765}
3762 3766
@@ -4274,6 +4278,7 @@ static int leaks_show(struct seq_file *m, void *p)
4274 show_symbol(m, n[2*i+2]); 4278 show_symbol(m, n[2*i+2]);
4275 seq_putc(m, '\n'); 4279 seq_putc(m, '\n');
4276 } 4280 }
4281
4277 return 0; 4282 return 0;
4278} 4283}
4279 4284