aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 11:51:34 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-14 11:51:34 -0400
commit5b06c853ad447636e31d105e95c48ae9abb6bfb5 (patch)
treecf4d606b54659833a98a4b66c76ee3562bcd1062 /mm
parent4097d6017576a5e138f442f5e3c393ad00d10f58 (diff)
slub: Deal with config variable dependencies
count_partial() is used by both slabinfo and the sysfs proc support. Move the function directly before the beginning of the sysfs code so that it can be easily found. Rework the preprocessor conditional to take into account that slub sysfs support depends on CONFIG_SYSFS *and* CONFIG_SLUB_DEBUG. Make CONFIG_SLUB_STATS depend on CONFIG_SLUB_DEBUG and CONFIG_SYSFS. There is no point of keeping statistics if no one can restrive them. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 3df6d5bdd711..3fcdcf7d77ba 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2688,21 +2688,6 @@ void kfree(const void *x)
2688} 2688}
2689EXPORT_SYMBOL(kfree); 2689EXPORT_SYMBOL(kfree);
2690 2690
2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692static unsigned long count_partial(struct kmem_cache_node *n)
2693{
2694 unsigned long flags;
2695 unsigned long x = 0;
2696 struct page *page;
2697
2698 spin_lock_irqsave(&n->list_lock, flags);
2699 list_for_each_entry(page, &n->partial, lru)
2700 x += page->inuse;
2701 spin_unlock_irqrestore(&n->list_lock, flags);
2702 return x;
2703}
2704#endif
2705
2706/* 2691/*
2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2692 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2708 * the remaining slabs by the number of items in use. The slabs with the 2693 * the remaining slabs by the number of items in use. The slabs with the
@@ -3181,6 +3166,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3181 return slab_alloc(s, gfpflags, node, caller); 3166 return slab_alloc(s, gfpflags, node, caller);
3182} 3167}
3183 3168
3169#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3170static unsigned long count_partial(struct kmem_cache_node *n)
3171{
3172 unsigned long flags;
3173 unsigned long x = 0;
3174 struct page *page;
3175
3176 spin_lock_irqsave(&n->list_lock, flags);
3177 list_for_each_entry(page, &n->partial, lru)
3178 x += page->inuse;
3179 spin_unlock_irqrestore(&n->list_lock, flags);
3180 return x;
3181}
3182#endif
3183
3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185static int validate_slab(struct kmem_cache *s, struct page *page, 3185static int validate_slab(struct kmem_cache *s, struct page *page,
3186 unsigned long *map) 3186 unsigned long *map)