aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--mm/slub.c65
2 files changed, 24 insertions, 48 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 7d8db1233e44..3fc586b7b90b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -104,6 +104,10 @@ enum pageflags {
104 /* XEN */ 104 /* XEN */
105 PG_pinned = PG_owner_priv_1, 105 PG_pinned = PG_owner_priv_1,
106 PG_savepinned = PG_dirty, 106 PG_savepinned = PG_dirty,
107
108 /* SLUB */
109 PG_slub_frozen = PG_active,
110 PG_slub_debug = PG_error,
107}; 111};
108 112
109#ifndef __GENERATING_BOUNDS_H 113#ifndef __GENERATING_BOUNDS_H
@@ -169,6 +173,9 @@ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
169PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) 173PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
170 __SETPAGEFLAG(Private, private) 174 __SETPAGEFLAG(Private, private)
171 175
176__PAGEFLAG(SlubFrozen, slub_frozen)
177__PAGEFLAG(SlubDebug, slub_debug)
178
172/* 179/*
173 * Only test-and-set exist for PG_writeback. The unconditional operators are 180 * Only test-and-set exist for PG_writeback. The unconditional operators are
174 * risky: they bypass page accounting. 181 * risky: they bypass page accounting.
diff --git a/mm/slub.c b/mm/slub.c
index 6d4a49c1ff2f..77c21cf53ff9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -102,44 +102,12 @@
102 * the fast path and disables lockless freelists. 102 * the fast path and disables lockless freelists.
103 */ 103 */
104 104
105#define FROZEN (1 << PG_active)
106
107#ifdef CONFIG_SLUB_DEBUG 105#ifdef CONFIG_SLUB_DEBUG
108#define SLABDEBUG (1 << PG_error) 106#define SLABDEBUG 1
109#else 107#else
110#define SLABDEBUG 0 108#define SLABDEBUG 0
111#endif 109#endif
112 110
113static inline int SlabFrozen(struct page *page)
114{
115 return page->flags & FROZEN;
116}
117
118static inline void SetSlabFrozen(struct page *page)
119{
120 page->flags |= FROZEN;
121}
122
123static inline void ClearSlabFrozen(struct page *page)
124{
125 page->flags &= ~FROZEN;
126}
127
128static inline int SlabDebug(struct page *page)
129{
130 return page->flags & SLABDEBUG;
131}
132
133static inline void SetSlabDebug(struct page *page)
134{
135 page->flags |= SLABDEBUG;
136}
137
138static inline void ClearSlabDebug(struct page *page)
139{
140 page->flags &= ~SLABDEBUG;
141}
142
143/* 111/*
144 * Issues still to be resolved: 112 * Issues still to be resolved:
145 * 113 *
@@ -971,7 +939,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
971 } 939 }
972 940
973 /* Special debug activities for freeing objects */ 941 /* Special debug activities for freeing objects */
974 if (!SlabFrozen(page) && !page->freelist) 942 if (!PageSlubFrozen(page) && !page->freelist)
975 remove_full(s, page); 943 remove_full(s, page);
976 if (s->flags & SLAB_STORE_USER) 944 if (s->flags & SLAB_STORE_USER)
977 set_track(s, object, TRACK_FREE, addr); 945 set_track(s, object, TRACK_FREE, addr);
@@ -1157,7 +1125,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1157 page->flags |= 1 << PG_slab; 1125 page->flags |= 1 << PG_slab;
1158 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1126 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1159 SLAB_STORE_USER | SLAB_TRACE)) 1127 SLAB_STORE_USER | SLAB_TRACE))
1160 SetSlabDebug(page); 1128 __SetPageSlubDebug(page);
1161 1129
1162 start = page_address(page); 1130 start = page_address(page);
1163 1131
@@ -1184,14 +1152,14 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1184 int order = compound_order(page); 1152 int order = compound_order(page);
1185 int pages = 1 << order; 1153 int pages = 1 << order;
1186 1154
1187 if (unlikely(SlabDebug(page))) { 1155 if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1188 void *p; 1156 void *p;
1189 1157
1190 slab_pad_check(s, page); 1158 slab_pad_check(s, page);
1191 for_each_object(p, s, page_address(page), 1159 for_each_object(p, s, page_address(page),
1192 page->objects) 1160 page->objects)
1193 check_object(s, page, p, 0); 1161 check_object(s, page, p, 0);
1194 ClearSlabDebug(page); 1162 __ClearPageSlubDebug(page);
1195 } 1163 }
1196 1164
1197 mod_zone_page_state(page_zone(page), 1165 mod_zone_page_state(page_zone(page),
@@ -1288,7 +1256,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1288 if (slab_trylock(page)) { 1256 if (slab_trylock(page)) {
1289 list_del(&page->lru); 1257 list_del(&page->lru);
1290 n->nr_partial--; 1258 n->nr_partial--;
1291 SetSlabFrozen(page); 1259 __SetPageSlubFrozen(page);
1292 return 1; 1260 return 1;
1293 } 1261 }
1294 return 0; 1262 return 0;
@@ -1398,7 +1366,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1398 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1366 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1399 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1367 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1400 1368
1401 ClearSlabFrozen(page); 1369 __ClearPageSlubFrozen(page);
1402 if (page->inuse) { 1370 if (page->inuse) {
1403 1371
1404 if (page->freelist) { 1372 if (page->freelist) {
@@ -1406,7 +1374,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1406 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1374 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1407 } else { 1375 } else {
1408 stat(c, DEACTIVATE_FULL); 1376 stat(c, DEACTIVATE_FULL);
1409 if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1377 if (SLABDEBUG && PageSlubDebug(page) &&
1378 (s->flags & SLAB_STORE_USER))
1410 add_full(n, page); 1379 add_full(n, page);
1411 } 1380 }
1412 slab_unlock(page); 1381 slab_unlock(page);
@@ -1551,7 +1520,7 @@ load_freelist:
1551 object = c->page->freelist; 1520 object = c->page->freelist;
1552 if (unlikely(!object)) 1521 if (unlikely(!object))
1553 goto another_slab; 1522 goto another_slab;
1554 if (unlikely(SlabDebug(c->page))) 1523 if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1555 goto debug; 1524 goto debug;
1556 1525
1557 c->freelist = object[c->offset]; 1526 c->freelist = object[c->offset];
@@ -1588,7 +1557,7 @@ new_slab:
1588 if (c->page) 1557 if (c->page)
1589 flush_slab(s, c); 1558 flush_slab(s, c);
1590 slab_lock(new); 1559 slab_lock(new);
1591 SetSlabFrozen(new); 1560 __SetPageSlubFrozen(new);
1592 c->page = new; 1561 c->page = new;
1593 goto load_freelist; 1562 goto load_freelist;
1594 } 1563 }
@@ -1674,7 +1643,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1674 stat(c, FREE_SLOWPATH); 1643 stat(c, FREE_SLOWPATH);
1675 slab_lock(page); 1644 slab_lock(page);
1676 1645
1677 if (unlikely(SlabDebug(page))) 1646 if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1678 goto debug; 1647 goto debug;
1679 1648
1680checks_ok: 1649checks_ok:
@@ -1682,7 +1651,7 @@ checks_ok:
1682 page->freelist = object; 1651 page->freelist = object;
1683 page->inuse--; 1652 page->inuse--;
1684 1653
1685 if (unlikely(SlabFrozen(page))) { 1654 if (unlikely(PageSlubFrozen(page))) {
1686 stat(c, FREE_FROZEN); 1655 stat(c, FREE_FROZEN);
1687 goto out_unlock; 1656 goto out_unlock;
1688 } 1657 }
@@ -3317,12 +3286,12 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3317 s->name, page); 3286 s->name, page);
3318 3287
3319 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3288 if (s->flags & DEBUG_DEFAULT_FLAGS) {
3320 if (!SlabDebug(page)) 3289 if (!PageSlubDebug(page))
3321 printk(KERN_ERR "SLUB %s: SlabDebug not set " 3290 printk(KERN_ERR "SLUB %s: SlubDebug not set "
3322 "on slab 0x%p\n", s->name, page); 3291 "on slab 0x%p\n", s->name, page);
3323 } else { 3292 } else {
3324 if (SlabDebug(page)) 3293 if (PageSlubDebug(page))
3325 printk(KERN_ERR "SLUB %s: SlabDebug set on " 3294 printk(KERN_ERR "SLUB %s: SlubDebug set on "
3326 "slab 0x%p\n", s->name, page); 3295 "slab 0x%p\n", s->name, page);
3327 } 3296 }
3328} 3297}