diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:02 -0400 |
commit | 434e245ddd3f14aa8eef97cae16c71b863ab092a (patch) | |
tree | bbfd9d012416e6882fd714650435a78ce4f9da9b /mm | |
parent | 94f6030ca792c57422f04a73e7a872d8325946d3 (diff) |
SLUB: Do not allocate object bit array on stack
The objects per slab increase with the current patches in mm since we allow up
to order 3 allocs by default. More patches in mm actually allow to use 2M or
higher sized slabs. For slab validation we need per object bitmaps in order
to check a slab. We end up with up to 64k objects per slab resulting in a
potential requirement of 8K stack space. That does not look good.
Allocate the bit arrays via kmalloc.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 39 |
1 files changed, 25 insertions, 14 deletions
@@ -2764,11 +2764,11 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2764 | } | 2764 | } |
2765 | 2765 | ||
2766 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) | 2766 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
2767 | static int validate_slab(struct kmem_cache *s, struct page *page) | 2767 | static int validate_slab(struct kmem_cache *s, struct page *page, |
2768 | unsigned long *map) | ||
2768 | { | 2769 | { |
2769 | void *p; | 2770 | void *p; |
2770 | void *addr = page_address(page); | 2771 | void *addr = page_address(page); |
2771 | DECLARE_BITMAP(map, s->objects); | ||
2772 | 2772 | ||
2773 | if (!check_slab(s, page) || | 2773 | if (!check_slab(s, page) || |
2774 | !on_freelist(s, page, NULL)) | 2774 | !on_freelist(s, page, NULL)) |
@@ -2790,10 +2790,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page) | |||
2790 | return 1; | 2790 | return 1; |
2791 | } | 2791 | } |
2792 | 2792 | ||
2793 | static void validate_slab_slab(struct kmem_cache *s, struct page *page) | 2793 | static void validate_slab_slab(struct kmem_cache *s, struct page *page, |
2794 | unsigned long *map) | ||
2794 | { | 2795 | { |
2795 | if (slab_trylock(page)) { | 2796 | if (slab_trylock(page)) { |
2796 | validate_slab(s, page); | 2797 | validate_slab(s, page, map); |
2797 | slab_unlock(page); | 2798 | slab_unlock(page); |
2798 | } else | 2799 | } else |
2799 | printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", | 2800 | printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", |
@@ -2810,7 +2811,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page) | |||
2810 | } | 2811 | } |
2811 | } | 2812 | } |
2812 | 2813 | ||
2813 | static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) | 2814 | static int validate_slab_node(struct kmem_cache *s, |
2815 | struct kmem_cache_node *n, unsigned long *map) | ||
2814 | { | 2816 | { |
2815 | unsigned long count = 0; | 2817 | unsigned long count = 0; |
2816 | struct page *page; | 2818 | struct page *page; |
@@ -2819,7 +2821,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) | |||
2819 | spin_lock_irqsave(&n->list_lock, flags); | 2821 | spin_lock_irqsave(&n->list_lock, flags); |
2820 | 2822 | ||
2821 | list_for_each_entry(page, &n->partial, lru) { | 2823 | list_for_each_entry(page, &n->partial, lru) { |
2822 | validate_slab_slab(s, page); | 2824 | validate_slab_slab(s, page, map); |
2823 | count++; | 2825 | count++; |
2824 | } | 2826 | } |
2825 | if (count != n->nr_partial) | 2827 | if (count != n->nr_partial) |
@@ -2830,7 +2832,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) | |||
2830 | goto out; | 2832 | goto out; |
2831 | 2833 | ||
2832 | list_for_each_entry(page, &n->full, lru) { | 2834 | list_for_each_entry(page, &n->full, lru) { |
2833 | validate_slab_slab(s, page); | 2835 | validate_slab_slab(s, page, map); |
2834 | count++; | 2836 | count++; |
2835 | } | 2837 | } |
2836 | if (count != atomic_long_read(&n->nr_slabs)) | 2838 | if (count != atomic_long_read(&n->nr_slabs)) |
@@ -2843,17 +2845,23 @@ out: | |||
2843 | return count; | 2845 | return count; |
2844 | } | 2846 | } |
2845 | 2847 | ||
2846 | static unsigned long validate_slab_cache(struct kmem_cache *s) | 2848 | static long validate_slab_cache(struct kmem_cache *s) |
2847 | { | 2849 | { |
2848 | int node; | 2850 | int node; |
2849 | unsigned long count = 0; | 2851 | unsigned long count = 0; |
2852 | unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) * | ||
2853 | sizeof(unsigned long), GFP_KERNEL); | ||
2854 | |||
2855 | if (!map) | ||
2856 | return -ENOMEM; | ||
2850 | 2857 | ||
2851 | flush_all(s); | 2858 | flush_all(s); |
2852 | for_each_online_node(node) { | 2859 | for_each_online_node(node) { |
2853 | struct kmem_cache_node *n = get_node(s, node); | 2860 | struct kmem_cache_node *n = get_node(s, node); |
2854 | 2861 | ||
2855 | count += validate_slab_node(s, n); | 2862 | count += validate_slab_node(s, n, map); |
2856 | } | 2863 | } |
2864 | kfree(map); | ||
2857 | return count; | 2865 | return count; |
2858 | } | 2866 | } |
2859 | 2867 | ||
@@ -3467,11 +3475,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf) | |||
3467 | static ssize_t validate_store(struct kmem_cache *s, | 3475 | static ssize_t validate_store(struct kmem_cache *s, |
3468 | const char *buf, size_t length) | 3476 | const char *buf, size_t length) |
3469 | { | 3477 | { |
3470 | if (buf[0] == '1') | 3478 | int ret = -EINVAL; |
3471 | validate_slab_cache(s); | 3479 | |
3472 | else | 3480 | if (buf[0] == '1') { |
3473 | return -EINVAL; | 3481 | ret = validate_slab_cache(s); |
3474 | return length; | 3482 | if (ret >= 0) |
3483 | ret = length; | ||
3484 | } | ||
3485 | return ret; | ||
3475 | } | 3486 | } |
3476 | SLAB_ATTR(validate); | 3487 | SLAB_ATTR(validate); |
3477 | 3488 | ||