aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--mm/slub.c33
2 files changed, 12 insertions, 23 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5b59f35dcb8f..6fa317801e1c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -128,7 +128,6 @@ enum pageflags {
128 128
129 /* SLUB */ 129 /* SLUB */
130 PG_slub_frozen = PG_active, 130 PG_slub_frozen = PG_active,
131 PG_slub_debug = PG_error,
132}; 131};
133 132
134#ifndef __GENERATING_BOUNDS_H 133#ifndef __GENERATING_BOUNDS_H
@@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
215__PAGEFLAG(SlobFree, slob_free) 214__PAGEFLAG(SlobFree, slob_free)
216 215
217__PAGEFLAG(SlubFrozen, slub_frozen) 216__PAGEFLAG(SlubFrozen, slub_frozen)
218__PAGEFLAG(SlubDebug, slub_debug)
219 217
220/* 218/*
221 * Private page markings that may be used by the filesystem that owns the page 219 * Private page markings that may be used by the filesystem that owns the page
diff --git a/mm/slub.c b/mm/slub.c
index b89a7c99b2fa..9cf5dae7815e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -107,11 +107,17 @@
107 * the fast path and disables lockless freelists. 107 * the fast path and disables lockless freelists.
108 */ 108 */
109 109
110#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
111 SLAB_TRACE | SLAB_DEBUG_FREE)
112
113static inline int kmem_cache_debug(struct kmem_cache *s)
114{
110#ifdef CONFIG_SLUB_DEBUG 115#ifdef CONFIG_SLUB_DEBUG
111#define SLABDEBUG 1 116 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
112#else 117#else
113#define SLABDEBUG 0 118 return 0;
114#endif 119#endif
120}
115 121
116/* 122/*
117 * Issues still to be resolved: 123 * Issues still to be resolved:
@@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1157 inc_slabs_node(s, page_to_nid(page), page->objects); 1163 inc_slabs_node(s, page_to_nid(page), page->objects);
1158 page->slab = s; 1164 page->slab = s;
1159 page->flags |= 1 << PG_slab; 1165 page->flags |= 1 << PG_slab;
1160 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1161 SLAB_STORE_USER | SLAB_TRACE))
1162 __SetPageSlubDebug(page);
1163 1166
1164 start = page_address(page); 1167 start = page_address(page);
1165 1168
@@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1186 int order = compound_order(page); 1189 int order = compound_order(page);
1187 int pages = 1 << order; 1190 int pages = 1 << order;
1188 1191
1189 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1192 if (kmem_cache_debug(s)) {
1190 void *p; 1193 void *p;
1191 1194
1192 slab_pad_check(s, page); 1195 slab_pad_check(s, page);
1193 for_each_object(p, s, page_address(page), 1196 for_each_object(p, s, page_address(page),
1194 page->objects) 1197 page->objects)
1195 check_object(s, page, p, 0); 1198 check_object(s, page, p, 0);
1196 __ClearPageSlubDebug(page);
1197 } 1199 }
1198 1200
1199 kmemcheck_free_shadow(page, compound_order(page)); 1201 kmemcheck_free_shadow(page, compound_order(page));
@@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1415 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1417 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1416 } else { 1418 } else {
1417 stat(s, DEACTIVATE_FULL); 1419 stat(s, DEACTIVATE_FULL);
1418 if (SLABDEBUG && PageSlubDebug(page) && 1420 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
1419 (s->flags & SLAB_STORE_USER))
1420 add_full(n, page); 1421 add_full(n, page);
1421 } 1422 }
1422 slab_unlock(page); 1423 slab_unlock(page);
@@ -1624,7 +1625,7 @@ load_freelist:
1624 object = c->page->freelist; 1625 object = c->page->freelist;
1625 if (unlikely(!object)) 1626 if (unlikely(!object))
1626 goto another_slab; 1627 goto another_slab;
1627 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1628 if (kmem_cache_debug(s))
1628 goto debug; 1629 goto debug;
1629 1630
1630 c->freelist = get_freepointer(s, object); 1631 c->freelist = get_freepointer(s, object);
@@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1783 stat(s, FREE_SLOWPATH); 1784 stat(s, FREE_SLOWPATH);
1784 slab_lock(page); 1785 slab_lock(page);
1785 1786
1786 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1787 if (kmem_cache_debug(s))
1787 goto debug; 1788 goto debug;
1788 1789
1789checks_ok: 1790checks_ok:
@@ -3398,16 +3399,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3398 } else 3399 } else
3399 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3400 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3400 s->name, page); 3401 s->name, page);
3401
3402 if (s->flags & DEBUG_DEFAULT_FLAGS) {
3403 if (!PageSlubDebug(page))
3404 printk(KERN_ERR "SLUB %s: SlubDebug not set "
3405 "on slab 0x%p\n", s->name, page);
3406 } else {
3407 if (PageSlubDebug(page))
3408 printk(KERN_ERR "SLUB %s: SlubDebug set on "
3409 "slab 0x%p\n", s->name, page);
3410 }
3411} 3402}
3412 3403
3413static int validate_slab_node(struct kmem_cache *s, 3404static int validate_slab_node(struct kmem_cache *s,