aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-09 05:32:42 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:45 -0400
commit35e5d7ee27680aef6dc3fab45a5ecd9952d9791a (patch)
tree47660b2ac98ca94c42164952520700644411a6cc /mm/slub.c
parentb345970905e34c1b632fe4d80e2af14c7de99b45 (diff)
SLUB: introduce DebugSlab(page)
This replaces the PageError() checking. DebugSlab is clearer and allows for future changes to the page bit used. We also need it to support CONFIG_SLUB_DEBUG. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 07492a83b46e..c58a974d15ac 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -87,6 +87,21 @@
87 * the fast path. 87 * the fast path.
88 */ 88 */
89 89
90static inline int SlabDebug(struct page *page)
91{
92 return PageError(page);
93}
94
95static inline void SetSlabDebug(struct page *page)
96{
97 SetPageError(page);
98}
99
100static inline void ClearSlabDebug(struct page *page)
101{
102 ClearPageError(page);
103}
104
90/* 105/*
91 * Issues still to be resolved: 106 * Issues still to be resolved:
92 * 107 *
@@ -823,7 +838,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
823static void setup_object(struct kmem_cache *s, struct page *page, 838static void setup_object(struct kmem_cache *s, struct page *page,
824 void *object) 839 void *object)
825{ 840{
826 if (PageError(page)) { 841 if (SlabDebug(page)) {
827 init_object(s, object, 0); 842 init_object(s, object, 0);
828 init_tracking(s, object); 843 init_tracking(s, object);
829 } 844 }
@@ -858,7 +873,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
858 page->flags |= 1 << PG_slab; 873 page->flags |= 1 << PG_slab;
859 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 874 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
860 SLAB_STORE_USER | SLAB_TRACE)) 875 SLAB_STORE_USER | SLAB_TRACE))
861 page->flags |= 1 << PG_error; 876 SetSlabDebug(page);
862 877
863 start = page_address(page); 878 start = page_address(page);
864 end = start + s->objects * s->size; 879 end = start + s->objects * s->size;
@@ -887,7 +902,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
887{ 902{
888 int pages = 1 << s->order; 903 int pages = 1 << s->order;
889 904
890 if (unlikely(PageError(page) || s->dtor)) { 905 if (unlikely(SlabDebug(page) || s->dtor)) {
891 void *p; 906 void *p;
892 907
893 slab_pad_check(s, page); 908 slab_pad_check(s, page);
@@ -934,7 +949,8 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
934 949
935 atomic_long_dec(&n->nr_slabs); 950 atomic_long_dec(&n->nr_slabs);
936 reset_page_mapcount(page); 951 reset_page_mapcount(page);
937 page->flags &= ~(1 << PG_slab | 1 << PG_error); 952 ClearSlabDebug(page);
953 __ClearPageSlab(page);
938 free_slab(s, page); 954 free_slab(s, page);
939} 955}
940 956
@@ -1109,7 +1125,7 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
1109 1125
1110 if (page->freelist) 1126 if (page->freelist)
1111 add_partial(n, page); 1127 add_partial(n, page);
1112 else if (PageError(page) && (s->flags & SLAB_STORE_USER)) 1128 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1113 add_full(n, page); 1129 add_full(n, page);
1114 slab_unlock(page); 1130 slab_unlock(page);
1115 1131
@@ -1193,7 +1209,7 @@ static void flush_all(struct kmem_cache *s)
1193 * per cpu array in the kmem_cache struct. 1209 * per cpu array in the kmem_cache struct.
1194 * 1210 *
1195 * Fastpath is not possible if we need to get a new slab or have 1211 * Fastpath is not possible if we need to get a new slab or have
1196 * debugging enabled (which means all slabs are marked with PageError) 1212 * debugging enabled (which means all slabs are marked with SlabDebug)
1197 */ 1213 */
1198static void *slab_alloc(struct kmem_cache *s, 1214static void *slab_alloc(struct kmem_cache *s,
1199 gfp_t gfpflags, int node, void *addr) 1215 gfp_t gfpflags, int node, void *addr)
@@ -1216,7 +1232,7 @@ redo:
1216 object = page->freelist; 1232 object = page->freelist;
1217 if (unlikely(!object)) 1233 if (unlikely(!object))
1218 goto another_slab; 1234 goto another_slab;
1219 if (unlikely(PageError(page))) 1235 if (unlikely(SlabDebug(page)))
1220 goto debug; 1236 goto debug;
1221 1237
1222have_object: 1238have_object:
@@ -1314,7 +1330,7 @@ static void slab_free(struct kmem_cache *s, struct page *page,
1314 local_irq_save(flags); 1330 local_irq_save(flags);
1315 slab_lock(page); 1331 slab_lock(page);
1316 1332
1317 if (unlikely(PageError(page))) 1333 if (unlikely(SlabDebug(page)))
1318 goto debug; 1334 goto debug;
1319checks_ok: 1335checks_ok:
1320 prior = object[page->offset] = page->freelist; 1336 prior = object[page->offset] = page->freelist;
@@ -2571,12 +2587,12 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page)
2571 s->name, page); 2587 s->name, page);
2572 2588
2573 if (s->flags & DEBUG_DEFAULT_FLAGS) { 2589 if (s->flags & DEBUG_DEFAULT_FLAGS) {
2574 if (!PageError(page)) 2590 if (!SlabDebug(page))
2575 printk(KERN_ERR "SLUB %s: PageError not set " 2591 printk(KERN_ERR "SLUB %s: SlabDebug not set "
2576 "on slab 0x%p\n", s->name, page); 2592 "on slab 0x%p\n", s->name, page);
2577 } else { 2593 } else {
2578 if (PageError(page)) 2594 if (SlabDebug(page))
2579 printk(KERN_ERR "SLUB %s: PageError set on " 2595 printk(KERN_ERR "SLUB %s: SlabDebug set on "
2580 "slab 0x%p\n", s->name, page); 2596 "slab 0x%p\n", s->name, page);
2581 } 2597 }
2582} 2598}