aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2010-07-09 15:07:14 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2010-07-16 04:13:08 -0400
commitaf537b0a6c650ab6ff7104d8163e96866b31c835 (patch)
tree31d756ebce6ac6613b42a1aa5b1f5a80a7d76fe7 /mm/slub.c
parentf5b801ac38a9612b380ee9a75ab1861f0594e79f (diff)
slub: Use kmem_cache flags to detect if slab is in debugging mode.
The cacheline with the flags is reachable from the hot paths after the percpu allocator changes went in. So there is no need anymore to put a flag into each slab page. Get rid of the SlubDebug flag and use the flags in kmem_cache instead. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c33
1 files changed, 12 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b89a7c99b2fa..9cf5dae7815e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -107,11 +107,17 @@
107 * the fast path and disables lockless freelists. 107 * the fast path and disables lockless freelists.
108 */ 108 */
109 109
110#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
111 SLAB_TRACE | SLAB_DEBUG_FREE)
112
113static inline int kmem_cache_debug(struct kmem_cache *s)
114{
110#ifdef CONFIG_SLUB_DEBUG 115#ifdef CONFIG_SLUB_DEBUG
111#define SLABDEBUG 1 116 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
112#else 117#else
113#define SLABDEBUG 0 118 return 0;
114#endif 119#endif
120}
115 121
116/* 122/*
117 * Issues still to be resolved: 123 * Issues still to be resolved:
@@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1157 inc_slabs_node(s, page_to_nid(page), page->objects); 1163 inc_slabs_node(s, page_to_nid(page), page->objects);
1158 page->slab = s; 1164 page->slab = s;
1159 page->flags |= 1 << PG_slab; 1165 page->flags |= 1 << PG_slab;
1160 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1161 SLAB_STORE_USER | SLAB_TRACE))
1162 __SetPageSlubDebug(page);
1163 1166
1164 start = page_address(page); 1167 start = page_address(page);
1165 1168
@@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1186 int order = compound_order(page); 1189 int order = compound_order(page);
1187 int pages = 1 << order; 1190 int pages = 1 << order;
1188 1191
1189 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1192 if (kmem_cache_debug(s)) {
1190 void *p; 1193 void *p;
1191 1194
1192 slab_pad_check(s, page); 1195 slab_pad_check(s, page);
1193 for_each_object(p, s, page_address(page), 1196 for_each_object(p, s, page_address(page),
1194 page->objects) 1197 page->objects)
1195 check_object(s, page, p, 0); 1198 check_object(s, page, p, 0);
1196 __ClearPageSlubDebug(page);
1197 } 1199 }
1198 1200
1199 kmemcheck_free_shadow(page, compound_order(page)); 1201 kmemcheck_free_shadow(page, compound_order(page));
@@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1415 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1417 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1416 } else { 1418 } else {
1417 stat(s, DEACTIVATE_FULL); 1419 stat(s, DEACTIVATE_FULL);
1418 if (SLABDEBUG && PageSlubDebug(page) && 1420 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
1419 (s->flags & SLAB_STORE_USER))
1420 add_full(n, page); 1421 add_full(n, page);
1421 } 1422 }
1422 slab_unlock(page); 1423 slab_unlock(page);
@@ -1624,7 +1625,7 @@ load_freelist:
1624 object = c->page->freelist; 1625 object = c->page->freelist;
1625 if (unlikely(!object)) 1626 if (unlikely(!object))
1626 goto another_slab; 1627 goto another_slab;
1627 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1628 if (kmem_cache_debug(s))
1628 goto debug; 1629 goto debug;
1629 1630
1630 c->freelist = get_freepointer(s, object); 1631 c->freelist = get_freepointer(s, object);
@@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1783 stat(s, FREE_SLOWPATH); 1784 stat(s, FREE_SLOWPATH);
1784 slab_lock(page); 1785 slab_lock(page);
1785 1786
1786 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1787 if (kmem_cache_debug(s))
1787 goto debug; 1788 goto debug;
1788 1789
1789checks_ok: 1790checks_ok:
@@ -3398,16 +3399,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3398 } else 3399 } else
3399 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3400 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3400 s->name, page); 3401 s->name, page);
3401
3402 if (s->flags & DEBUG_DEFAULT_FLAGS) {
3403 if (!PageSlubDebug(page))
3404 printk(KERN_ERR "SLUB %s: SlubDebug not set "
3405 "on slab 0x%p\n", s->name, page);
3406 } else {
3407 if (PageSlubDebug(page))
3408 printk(KERN_ERR "SLUB %s: SlubDebug set on "
3409 "slab 0x%p\n", s->name, page);
3410 }
3411} 3402}
3412 3403
3413static int validate_slab_node(struct kmem_cache *s, 3404static int validate_slab_node(struct kmem_cache *s,