diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-06 17:49:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:54 -0400 |
commit | 643b113849d8faa68c9f01c3c9d929bfbffd50bd (patch) | |
tree | d8eea2326ccee49892acaa970bf015ee69a31e8a /mm/slub.c | |
parent | 77c5e2d01af871f4bfbe08feefa3d5118cb1001b (diff) |
slub: enable tracking of full slabs
If slab tracking is on then build a list of full slabs so that we can verify
the integrity of all slabs and are also able to built list of alloc/free
callers.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 41 |
1 files changed, 40 insertions, 1 deletions
@@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
661 | return search == NULL; | 661 | return search == NULL; |
662 | } | 662 | } |
663 | 663 | ||
664 | /* | ||
665 | * Tracking of fully allocated slabs for debugging | ||
666 | */ | ||
667 | static void add_full(struct kmem_cache *s, struct page *page) | ||
668 | { | ||
669 | struct kmem_cache_node *n; | ||
670 | |||
671 | VM_BUG_ON(!irqs_disabled()); | ||
672 | |||
673 | VM_BUG_ON(!irqs_disabled()); | ||
674 | |||
675 | if (!(s->flags & SLAB_STORE_USER)) | ||
676 | return; | ||
677 | |||
678 | n = get_node(s, page_to_nid(page)); | ||
679 | spin_lock(&n->list_lock); | ||
680 | list_add(&page->lru, &n->full); | ||
681 | spin_unlock(&n->list_lock); | ||
682 | } | ||
683 | |||
684 | static void remove_full(struct kmem_cache *s, struct page *page) | ||
685 | { | ||
686 | struct kmem_cache_node *n; | ||
687 | |||
688 | if (!(s->flags & SLAB_STORE_USER)) | ||
689 | return; | ||
690 | |||
691 | n = get_node(s, page_to_nid(page)); | ||
692 | |||
693 | spin_lock(&n->list_lock); | ||
694 | list_del(&page->lru); | ||
695 | spin_unlock(&n->list_lock); | ||
696 | } | ||
697 | |||
664 | static int alloc_object_checks(struct kmem_cache *s, struct page *page, | 698 | static int alloc_object_checks(struct kmem_cache *s, struct page *page, |
665 | void *object) | 699 | void *object) |
666 | { | 700 | { |
@@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page) | |||
1090 | if (page->inuse) { | 1124 | if (page->inuse) { |
1091 | if (page->freelist) | 1125 | if (page->freelist) |
1092 | add_partial(s, page); | 1126 | add_partial(s, page); |
1127 | else if (PageError(page)) | ||
1128 | add_full(s, page); | ||
1093 | slab_unlock(page); | 1129 | slab_unlock(page); |
1094 | } else { | 1130 | } else { |
1095 | slab_unlock(page); | 1131 | slab_unlock(page); |
@@ -1302,7 +1338,7 @@ out_unlock: | |||
1302 | slab_empty: | 1338 | slab_empty: |
1303 | if (prior) | 1339 | if (prior) |
1304 | /* | 1340 | /* |
1305 | * Partially used slab that is on the partial list. | 1341 | * Slab on the partial list. |
1306 | */ | 1342 | */ |
1307 | remove_partial(s, page); | 1343 | remove_partial(s, page); |
1308 | 1344 | ||
@@ -1314,6 +1350,8 @@ slab_empty: | |||
1314 | debug: | 1350 | debug: |
1315 | if (!free_object_checks(s, page, x)) | 1351 | if (!free_object_checks(s, page, x)) |
1316 | goto out_unlock; | 1352 | goto out_unlock; |
1353 | if (!PageActive(page) && !page->freelist) | ||
1354 | remove_full(s, page); | ||
1317 | if (s->flags & SLAB_STORE_USER) | 1355 | if (s->flags & SLAB_STORE_USER) |
1318 | set_track(s, x, TRACK_FREE, addr); | 1356 | set_track(s, x, TRACK_FREE, addr); |
1319 | goto checks_ok; | 1357 | goto checks_ok; |
@@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) | |||
1466 | atomic_long_set(&n->nr_slabs, 0); | 1504 | atomic_long_set(&n->nr_slabs, 0); |
1467 | spin_lock_init(&n->list_lock); | 1505 | spin_lock_init(&n->list_lock); |
1468 | INIT_LIST_HEAD(&n->partial); | 1506 | INIT_LIST_HEAD(&n->partial); |
1507 | INIT_LIST_HEAD(&n->full); | ||
1469 | } | 1508 | } |
1470 | 1509 | ||
1471 | #ifdef CONFIG_NUMA | 1510 | #ifdef CONFIG_NUMA |