aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:42 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:54 -0400
commit643b113849d8faa68c9f01c3c9d929bfbffd50bd (patch)
treed8eea2326ccee49892acaa970bf015ee69a31e8a /mm
parent77c5e2d01af871f4bfbe08feefa3d5118cb1001b (diff)
slub: enable tracking of full slabs
If slab tracking is on then build a list of full slabs so that we can verify the integrity of all slabs and are also able to built list of alloc/free callers. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index cfc5301afe42..c4f40d373d1e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
661 return search == NULL; 661 return search == NULL;
662} 662}
663 663
664/*
665 * Tracking of fully allocated slabs for debugging
666 */
667static void add_full(struct kmem_cache *s, struct page *page)
668{
669 struct kmem_cache_node *n;
670
671 VM_BUG_ON(!irqs_disabled());
672
673 VM_BUG_ON(!irqs_disabled());
674
675 if (!(s->flags & SLAB_STORE_USER))
676 return;
677
678 n = get_node(s, page_to_nid(page));
679 spin_lock(&n->list_lock);
680 list_add(&page->lru, &n->full);
681 spin_unlock(&n->list_lock);
682}
683
684static void remove_full(struct kmem_cache *s, struct page *page)
685{
686 struct kmem_cache_node *n;
687
688 if (!(s->flags & SLAB_STORE_USER))
689 return;
690
691 n = get_node(s, page_to_nid(page));
692
693 spin_lock(&n->list_lock);
694 list_del(&page->lru);
695 spin_unlock(&n->list_lock);
696}
697
664static int alloc_object_checks(struct kmem_cache *s, struct page *page, 698static int alloc_object_checks(struct kmem_cache *s, struct page *page,
665 void *object) 699 void *object)
666{ 700{
@@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
1090 if (page->inuse) { 1124 if (page->inuse) {
1091 if (page->freelist) 1125 if (page->freelist)
1092 add_partial(s, page); 1126 add_partial(s, page);
1127 else if (PageError(page))
1128 add_full(s, page);
1093 slab_unlock(page); 1129 slab_unlock(page);
1094 } else { 1130 } else {
1095 slab_unlock(page); 1131 slab_unlock(page);
@@ -1302,7 +1338,7 @@ out_unlock:
1302slab_empty: 1338slab_empty:
1303 if (prior) 1339 if (prior)
1304 /* 1340 /*
1305 * Partially used slab that is on the partial list. 1341 * Slab on the partial list.
1306 */ 1342 */
1307 remove_partial(s, page); 1343 remove_partial(s, page);
1308 1344
@@ -1314,6 +1350,8 @@ slab_empty:
1314debug: 1350debug:
1315 if (!free_object_checks(s, page, x)) 1351 if (!free_object_checks(s, page, x))
1316 goto out_unlock; 1352 goto out_unlock;
1353 if (!PageActive(page) && !page->freelist)
1354 remove_full(s, page);
1317 if (s->flags & SLAB_STORE_USER) 1355 if (s->flags & SLAB_STORE_USER)
1318 set_track(s, x, TRACK_FREE, addr); 1356 set_track(s, x, TRACK_FREE, addr);
1319 goto checks_ok; 1357 goto checks_ok;
@@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
1466 atomic_long_set(&n->nr_slabs, 0); 1504 atomic_long_set(&n->nr_slabs, 0);
1467 spin_lock_init(&n->list_lock); 1505 spin_lock_init(&n->list_lock);
1468 INIT_LIST_HEAD(&n->partial); 1506 INIT_LIST_HEAD(&n->partial);
1507 INIT_LIST_HEAD(&n->full);
1469} 1508}
1470 1509
1471#ifdef CONFIG_NUMA 1510#ifdef CONFIG_NUMA