diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-03-24 17:25:47 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2010-05-22 03:57:30 -0400 |
commit | bbd7d57bfe852d9788bae5fb171c7edb4021d8ac (patch) | |
tree | 9b16ed244e0bd0b71a1d3d6e662be310f1e8f9c2 /mm | |
parent | e40152ee1e1c7a63f4777791863215e3faa37a86 (diff) |
slub: Potential stack overflow
I discovered that we can overflow stack if CONFIG_SLUB_DEBUG=y and use slabs
with many objects, since list_slab_objects() and process_slab() use
DECLARE_BITMAP(map, page->objects).
With 65535 bits, we use 8192 bytes of stack ...
Switch these allocations to dynamic allocations.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 25 |
1 files changed, 16 insertions, 9 deletions
@@ -2429,9 +2429,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
2429 | #ifdef CONFIG_SLUB_DEBUG | 2429 | #ifdef CONFIG_SLUB_DEBUG |
2430 | void *addr = page_address(page); | 2430 | void *addr = page_address(page); |
2431 | void *p; | 2431 | void *p; |
2432 | DECLARE_BITMAP(map, page->objects); | 2432 | long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long), |
2433 | GFP_ATOMIC); | ||
2433 | 2434 | ||
2434 | bitmap_zero(map, page->objects); | 2435 | if (!map) |
2436 | return; | ||
2435 | slab_err(s, page, "%s", text); | 2437 | slab_err(s, page, "%s", text); |
2436 | slab_lock(page); | 2438 | slab_lock(page); |
2437 | for_each_free_object(p, s, page->freelist) | 2439 | for_each_free_object(p, s, page->freelist) |
@@ -2446,6 +2448,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
2446 | } | 2448 | } |
2447 | } | 2449 | } |
2448 | slab_unlock(page); | 2450 | slab_unlock(page); |
2451 | kfree(map); | ||
2449 | #endif | 2452 | #endif |
2450 | } | 2453 | } |
2451 | 2454 | ||
@@ -3651,10 +3654,10 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3651 | } | 3654 | } |
3652 | 3655 | ||
3653 | static void process_slab(struct loc_track *t, struct kmem_cache *s, | 3656 | static void process_slab(struct loc_track *t, struct kmem_cache *s, |
3654 | struct page *page, enum track_item alloc) | 3657 | struct page *page, enum track_item alloc, |
3658 | long *map) | ||
3655 | { | 3659 | { |
3656 | void *addr = page_address(page); | 3660 | void *addr = page_address(page); |
3657 | DECLARE_BITMAP(map, page->objects); | ||
3658 | void *p; | 3661 | void *p; |
3659 | 3662 | ||
3660 | bitmap_zero(map, page->objects); | 3663 | bitmap_zero(map, page->objects); |
@@ -3673,11 +3676,14 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3673 | unsigned long i; | 3676 | unsigned long i; |
3674 | struct loc_track t = { 0, 0, NULL }; | 3677 | struct loc_track t = { 0, 0, NULL }; |
3675 | int node; | 3678 | int node; |
3679 | unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * | ||
3680 | sizeof(unsigned long), GFP_KERNEL); | ||
3676 | 3681 | ||
3677 | if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), | 3682 | if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), |
3678 | GFP_TEMPORARY)) | 3683 | GFP_TEMPORARY)) { |
3684 | kfree(map); | ||
3679 | return sprintf(buf, "Out of memory\n"); | 3685 | return sprintf(buf, "Out of memory\n"); |
3680 | 3686 | } | |
3681 | /* Push back cpu slabs */ | 3687 | /* Push back cpu slabs */ |
3682 | flush_all(s); | 3688 | flush_all(s); |
3683 | 3689 | ||
@@ -3691,9 +3697,9 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3691 | 3697 | ||
3692 | spin_lock_irqsave(&n->list_lock, flags); | 3698 | spin_lock_irqsave(&n->list_lock, flags); |
3693 | list_for_each_entry(page, &n->partial, lru) | 3699 | list_for_each_entry(page, &n->partial, lru) |
3694 | process_slab(&t, s, page, alloc); | 3700 | process_slab(&t, s, page, alloc, map); |
3695 | list_for_each_entry(page, &n->full, lru) | 3701 | list_for_each_entry(page, &n->full, lru) |
3696 | process_slab(&t, s, page, alloc); | 3702 | process_slab(&t, s, page, alloc, map); |
3697 | spin_unlock_irqrestore(&n->list_lock, flags); | 3703 | spin_unlock_irqrestore(&n->list_lock, flags); |
3698 | } | 3704 | } |
3699 | 3705 | ||
@@ -3744,6 +3750,7 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3744 | } | 3750 | } |
3745 | 3751 | ||
3746 | free_loc_track(&t); | 3752 | free_loc_track(&t); |
3753 | kfree(map); | ||
3747 | if (!t.count) | 3754 | if (!t.count) |
3748 | len += sprintf(buf, "No data\n"); | 3755 | len += sprintf(buf, "No data\n"); |
3749 | return len; | 3756 | return len; |