aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-25 15:22:43 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 11:27:37 -0400
commit33b12c38134e95e5afa73214af6f49abd7b8418e (patch)
tree05292917c91f05ddcf67580f4abd00a68020976a
parent599870b175987008b5f5c82a70b89f751e12822e (diff)
slub: Dump list of objects not freed on kmem_cache_close()
Dump a list of unfreed objects if a slab cache is closed but objects still remain. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--mm/slub.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c937233127e2..64c2b2bfbd79 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2371,6 +2371,32 @@ const char *kmem_cache_name(struct kmem_cache *s)
2371} 2371}
2372EXPORT_SYMBOL(kmem_cache_name); 2372EXPORT_SYMBOL(kmem_cache_name);
2373 2373
2374static void list_slab_objects(struct kmem_cache *s, struct page *page,
2375 const char *text)
2376{
2377#ifdef CONFIG_SLUB_DEBUG
2378 void *addr = page_address(page);
2379 void *p;
2380 DECLARE_BITMAP(map, page->objects);
2381
2382 bitmap_zero(map, page->objects);
2383 slab_err(s, page, "%s", text);
2384 slab_lock(page);
2385 for_each_free_object(p, s, page->freelist)
2386 set_bit(slab_index(p, s, addr), map);
2387
2388 for_each_object(p, s, addr, page->objects) {
2389
2390 if (!test_bit(slab_index(p, s, addr), map)) {
2391 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2392 p, p - addr);
2393 print_tracking(s, p);
2394 }
2395 }
2396 slab_unlock(page);
2397#endif
2398}
2399
2374/* 2400/*
2375 * Attempt to free all partial slabs on a node. 2401 * Attempt to free all partial slabs on a node.
2376 */ 2402 */
@@ -2380,12 +2406,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2380 struct page *page, *h; 2406 struct page *page, *h;
2381 2407
2382 spin_lock_irqsave(&n->list_lock, flags); 2408 spin_lock_irqsave(&n->list_lock, flags);
2383 list_for_each_entry_safe(page, h, &n->partial, lru) 2409 list_for_each_entry_safe(page, h, &n->partial, lru) {
2384 if (!page->inuse) { 2410 if (!page->inuse) {
2385 list_del(&page->lru); 2411 list_del(&page->lru);
2386 discard_slab(s, page); 2412 discard_slab(s, page);
2387 n->nr_partial--; 2413 n->nr_partial--;
2414 } else {
2415 list_slab_objects(s, page,
2416 "Objects remaining on kmem_cache_close()");
2388 } 2417 }
2418 }
2389 spin_unlock_irqrestore(&n->list_lock, flags); 2419 spin_unlock_irqrestore(&n->list_lock, flags);
2390} 2420}
2391 2421