diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-01-08 02:20:26 -0500 |
---|---|---|
committer | Christoph Lameter <clameter@sgi.com> | 2008-02-04 13:56:01 -0500 |
commit | f61396aed90acb033952531c522d1010f87e24f4 (patch) | |
tree | 53d42c90f0aab1d1b53e8648a279e10a83df0107 /mm | |
parent | 151c602f79cb9154c3f3d83223cae355af463d6f (diff) |
Move count_partial before kmem_cache_shrink
Move the counting function for objects in partial slabs so that it is placed
before kmem_cache_shrink.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 26 |
1 files changed, 13 insertions, 13 deletions
@@ -2607,6 +2607,19 @@ void kfree(const void *x) | |||
2607 | } | 2607 | } |
2608 | EXPORT_SYMBOL(kfree); | 2608 | EXPORT_SYMBOL(kfree); |
2609 | 2609 | ||
2610 | static unsigned long count_partial(struct kmem_cache_node *n) | ||
2611 | { | ||
2612 | unsigned long flags; | ||
2613 | unsigned long x = 0; | ||
2614 | struct page *page; | ||
2615 | |||
2616 | spin_lock_irqsave(&n->list_lock, flags); | ||
2617 | list_for_each_entry(page, &n->partial, lru) | ||
2618 | x += page->inuse; | ||
2619 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
2620 | return x; | ||
2621 | } | ||
2622 | |||
2610 | /* | 2623 | /* |
2611 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts | 2624 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts |
2612 | * the remaining slabs by the number of items in use. The slabs with the | 2625 | * the remaining slabs by the number of items in use. The slabs with the |
@@ -3078,19 +3091,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3078 | return slab_alloc(s, gfpflags, node, caller); | 3091 | return slab_alloc(s, gfpflags, node, caller); |
3079 | } | 3092 | } |
3080 | 3093 | ||
3081 | static unsigned long count_partial(struct kmem_cache_node *n) | ||
3082 | { | ||
3083 | unsigned long flags; | ||
3084 | unsigned long x = 0; | ||
3085 | struct page *page; | ||
3086 | |||
3087 | spin_lock_irqsave(&n->list_lock, flags); | ||
3088 | list_for_each_entry(page, &n->partial, lru) | ||
3089 | x += page->inuse; | ||
3090 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
3091 | return x; | ||
3092 | } | ||
3093 | |||
3094 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) | 3094 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
3095 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3095 | static int validate_slab(struct kmem_cache *s, struct page *page, |
3096 | unsigned long *map) | 3096 | unsigned long *map) |