aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-01-08 02:20:26 -0500
committerChristoph Lameter <clameter@sgi.com>2008-02-04 13:56:01 -0500
commitf61396aed90acb033952531c522d1010f87e24f4 (patch)
tree53d42c90f0aab1d1b53e8648a279e10a83df0107 /mm/slub.c
parent151c602f79cb9154c3f3d83223cae355af463d6f (diff)
Move count_partial before kmem_cache_shrink
Move the counting function for objects in partial slabs so that it is placed before kmem_cache_shrink. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 65bf21dc996a..9aa12b54ad1b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2607,6 +2607,19 @@ void kfree(const void *x)
2607} 2607}
2608EXPORT_SYMBOL(kfree); 2608EXPORT_SYMBOL(kfree);
2609 2609
2610static unsigned long count_partial(struct kmem_cache_node *n)
2611{
2612 unsigned long flags;
2613 unsigned long x = 0;
2614 struct page *page;
2615
2616 spin_lock_irqsave(&n->list_lock, flags);
2617 list_for_each_entry(page, &n->partial, lru)
2618 x += page->inuse;
2619 spin_unlock_irqrestore(&n->list_lock, flags);
2620 return x;
2621}
2622
2610/* 2623/*
2611 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2624 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2612 * the remaining slabs by the number of items in use. The slabs with the 2625 * the remaining slabs by the number of items in use. The slabs with the
@@ -3078,19 +3091,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3078 return slab_alloc(s, gfpflags, node, caller); 3091 return slab_alloc(s, gfpflags, node, caller);
3079} 3092}
3080 3093
3081static unsigned long count_partial(struct kmem_cache_node *n)
3082{
3083 unsigned long flags;
3084 unsigned long x = 0;
3085 struct page *page;
3086
3087 spin_lock_irqsave(&n->list_lock, flags);
3088 list_for_each_entry(page, &n->partial, lru)
3089 x += page->inuse;
3090 spin_unlock_irqrestore(&n->list_lock, flags);
3091 return x;
3092}
3093
3094#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3094#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3095static int validate_slab(struct kmem_cache *s, struct page *page, 3095static int validate_slab(struct kmem_cache *s, struct page *page,
3096 unsigned long *map) 3096 unsigned long *map)