aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 65bf21dc996a..9aa12b54ad1b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2607,6 +2607,19 @@ void kfree(const void *x)
2607} 2607}
2608EXPORT_SYMBOL(kfree); 2608EXPORT_SYMBOL(kfree);
2609 2609
2610static unsigned long count_partial(struct kmem_cache_node *n)
2611{
2612 unsigned long flags;
2613 unsigned long x = 0;
2614 struct page *page;
2615
2616 spin_lock_irqsave(&n->list_lock, flags);
2617 list_for_each_entry(page, &n->partial, lru)
2618 x += page->inuse;
2619 spin_unlock_irqrestore(&n->list_lock, flags);
2620 return x;
2621}
2622
2610/* 2623/*
2611 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2624 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2612 * the remaining slabs by the number of items in use. The slabs with the 2625 * the remaining slabs by the number of items in use. The slabs with the
@@ -3078,19 +3091,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3078 return slab_alloc(s, gfpflags, node, caller); 3091 return slab_alloc(s, gfpflags, node, caller);
3079} 3092}
3080 3093
3081static unsigned long count_partial(struct kmem_cache_node *n)
3082{
3083 unsigned long flags;
3084 unsigned long x = 0;
3085 struct page *page;
3086
3087 spin_lock_irqsave(&n->list_lock, flags);
3088 list_for_each_entry(page, &n->partial, lru)
3089 x += page->inuse;
3090 spin_unlock_irqrestore(&n->list_lock, flags);
3091 return x;
3092}
3093
3094#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3094#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3095static int validate_slab(struct kmem_cache *s, struct page *page, 3095static int validate_slab(struct kmem_cache *s, struct page *page,
3096 unsigned long *map) 3096 unsigned long *map)