aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--mm/slub.c41
2 files changed, 41 insertions, 1 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f8e0c86c48a9..ea27065e80e6 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -16,6 +16,7 @@ struct kmem_cache_node {
16 unsigned long nr_partial; 16 unsigned long nr_partial;
17 atomic_long_t nr_slabs; 17 atomic_long_t nr_slabs;
18 struct list_head partial; 18 struct list_head partial;
19 struct list_head full;
19}; 20};
20 21
21/* 22/*
diff --git a/mm/slub.c b/mm/slub.c
index cfc5301afe42..c4f40d373d1e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
661 return search == NULL; 661 return search == NULL;
662} 662}
663 663
664/*
665 * Tracking of fully allocated slabs for debugging
666 */
667static void add_full(struct kmem_cache *s, struct page *page)
668{
669 struct kmem_cache_node *n;
670
671 VM_BUG_ON(!irqs_disabled());
672
673 VM_BUG_ON(!irqs_disabled());
674
675 if (!(s->flags & SLAB_STORE_USER))
676 return;
677
678 n = get_node(s, page_to_nid(page));
679 spin_lock(&n->list_lock);
680 list_add(&page->lru, &n->full);
681 spin_unlock(&n->list_lock);
682}
683
684static void remove_full(struct kmem_cache *s, struct page *page)
685{
686 struct kmem_cache_node *n;
687
688 if (!(s->flags & SLAB_STORE_USER))
689 return;
690
691 n = get_node(s, page_to_nid(page));
692
693 spin_lock(&n->list_lock);
694 list_del(&page->lru);
695 spin_unlock(&n->list_lock);
696}
697
664static int alloc_object_checks(struct kmem_cache *s, struct page *page, 698static int alloc_object_checks(struct kmem_cache *s, struct page *page,
665 void *object) 699 void *object)
666{ 700{
@@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
1090 if (page->inuse) { 1124 if (page->inuse) {
1091 if (page->freelist) 1125 if (page->freelist)
1092 add_partial(s, page); 1126 add_partial(s, page);
1127 else if (PageError(page))
1128 add_full(s, page);
1093 slab_unlock(page); 1129 slab_unlock(page);
1094 } else { 1130 } else {
1095 slab_unlock(page); 1131 slab_unlock(page);
@@ -1302,7 +1338,7 @@ out_unlock:
1302slab_empty: 1338slab_empty:
1303 if (prior) 1339 if (prior)
1304 /* 1340 /*
1305 * Partially used slab that is on the partial list. 1341 * Slab on the partial list.
1306 */ 1342 */
1307 remove_partial(s, page); 1343 remove_partial(s, page);
1308 1344
@@ -1314,6 +1350,8 @@ slab_empty:
1314debug: 1350debug:
1315 if (!free_object_checks(s, page, x)) 1351 if (!free_object_checks(s, page, x))
1316 goto out_unlock; 1352 goto out_unlock;
1353 if (!PageActive(page) && !page->freelist)
1354 remove_full(s, page);
1317 if (s->flags & SLAB_STORE_USER) 1355 if (s->flags & SLAB_STORE_USER)
1318 set_track(s, x, TRACK_FREE, addr); 1356 set_track(s, x, TRACK_FREE, addr);
1319 goto checks_ok; 1357 goto checks_ok;
@@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
1466 atomic_long_set(&n->nr_slabs, 0); 1504 atomic_long_set(&n->nr_slabs, 0);
1467 spin_lock_init(&n->list_lock); 1505 spin_lock_init(&n->list_lock);
1468 INIT_LIST_HEAD(&n->partial); 1506 INIT_LIST_HEAD(&n->partial);
1507 INIT_LIST_HEAD(&n->full);
1469} 1508}
1470 1509
1471#ifdef CONFIG_NUMA 1510#ifdef CONFIG_NUMA