aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/slub.c30
2 files changed, 16 insertions, 16 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c0..eef557dc46c3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
211config SLUB_STATS 211config SLUB_STATS
212 default n 212 default n
213 bool "Enable SLUB performance statistics" 213 bool "Enable SLUB performance statistics"
214 depends on SLUB 214 depends on SLUB && SLUB_DEBUG && SYSFS
215 help 215 help
216 SLUB statistics are useful to debug SLUBs allocation behavior in 216 SLUB statistics are useful to debug SLUBs allocation behavior in
217 order find ways to optimize the allocator. This should never be 217 order find ways to optimize the allocator. This should never be
diff --git a/mm/slub.c b/mm/slub.c
index 3df6d5bdd711..3fcdcf7d77ba 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2688,21 +2688,6 @@ void kfree(const void *x)
2688} 2688}
2689EXPORT_SYMBOL(kfree); 2689EXPORT_SYMBOL(kfree);
2690 2690
2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692static unsigned long count_partial(struct kmem_cache_node *n)
2693{
2694 unsigned long flags;
2695 unsigned long x = 0;
2696 struct page *page;
2697
2698 spin_lock_irqsave(&n->list_lock, flags);
2699 list_for_each_entry(page, &n->partial, lru)
2700 x += page->inuse;
2701 spin_unlock_irqrestore(&n->list_lock, flags);
2702 return x;
2703}
2704#endif
2705
2706/* 2691/*
2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2692 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2708 * the remaining slabs by the number of items in use. The slabs with the 2693 * the remaining slabs by the number of items in use. The slabs with the
@@ -3181,6 +3166,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3181 return slab_alloc(s, gfpflags, node, caller); 3166 return slab_alloc(s, gfpflags, node, caller);
3182} 3167}
3183 3168
3169#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3170static unsigned long count_partial(struct kmem_cache_node *n)
3171{
3172 unsigned long flags;
3173 unsigned long x = 0;
3174 struct page *page;
3175
3176 spin_lock_irqsave(&n->list_lock, flags);
3177 list_for_each_entry(page, &n->partial, lru)
3178 x += page->inuse;
3179 spin_unlock_irqrestore(&n->list_lock, flags);
3180 return x;
3181}
3182#endif
3183
3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185static int validate_slab(struct kmem_cache *s, struct page *page, 3185static int validate_slab(struct kmem_cache *s, struct page *page,
3186 unsigned long *map) 3186 unsigned long *map)