aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 11:51:34 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-14 11:51:34 -0400
commit5b06c853ad447636e31d105e95c48ae9abb6bfb5 (patch)
treecf4d606b54659833a98a4b66c76ee3562bcd1062
parent4097d6017576a5e138f442f5e3c393ad00d10f58 (diff)
slub: Deal with config variable dependencies
count_partial() is used by both slabinfo and the sysfs proc support. Move the function directly before the beginning of the sysfs code so that it can be easily found. Rework the preprocessor conditional to take into account that slub sysfs support depends on CONFIG_SYSFS *and* CONFIG_SLUB_DEBUG. Make CONFIG_SLUB_STATS depend on CONFIG_SLUB_DEBUG and CONFIG_SYSFS. There is no point of keeping statistics if no one can restrive them. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/slub.c30
2 files changed, 16 insertions, 16 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c..eef557dc46c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
211config SLUB_STATS 211config SLUB_STATS
212 default n 212 default n
213 bool "Enable SLUB performance statistics" 213 bool "Enable SLUB performance statistics"
214 depends on SLUB 214 depends on SLUB && SLUB_DEBUG && SYSFS
215 help 215 help
216 SLUB statistics are useful to debug SLUBs allocation behavior in 216 SLUB statistics are useful to debug SLUBs allocation behavior in
217 order find ways to optimize the allocator. This should never be 217 order find ways to optimize the allocator. This should never be
diff --git a/mm/slub.c b/mm/slub.c
index 3df6d5bdd71..3fcdcf7d77b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2688,21 +2688,6 @@ void kfree(const void *x)
2688} 2688}
2689EXPORT_SYMBOL(kfree); 2689EXPORT_SYMBOL(kfree);
2690 2690
2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692static unsigned long count_partial(struct kmem_cache_node *n)
2693{
2694 unsigned long flags;
2695 unsigned long x = 0;
2696 struct page *page;
2697
2698 spin_lock_irqsave(&n->list_lock, flags);
2699 list_for_each_entry(page, &n->partial, lru)
2700 x += page->inuse;
2701 spin_unlock_irqrestore(&n->list_lock, flags);
2702 return x;
2703}
2704#endif
2705
2706/* 2691/*
2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2692 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2708 * the remaining slabs by the number of items in use. The slabs with the 2693 * the remaining slabs by the number of items in use. The slabs with the
@@ -3181,6 +3166,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3181 return slab_alloc(s, gfpflags, node, caller); 3166 return slab_alloc(s, gfpflags, node, caller);
3182} 3167}
3183 3168
3169#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3170static unsigned long count_partial(struct kmem_cache_node *n)
3171{
3172 unsigned long flags;
3173 unsigned long x = 0;
3174 struct page *page;
3175
3176 spin_lock_irqsave(&n->list_lock, flags);
3177 list_for_each_entry(page, &n->partial, lru)
3178 x += page->inuse;
3179 spin_unlock_irqrestore(&n->list_lock, flags);
3180 return x;
3181}
3182#endif
3183
3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185static int validate_slab(struct kmem_cache *s, struct page *page, 3185static int validate_slab(struct kmem_cache *s, struct page *page,
3186 unsigned long *map) 3186 unsigned long *map)