aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:54 -0400
commite95eed571e85d7ad4cde73576296c615f305f59f (patch)
tree6ee56b3248ccd89db45f6bd8d532005fc61cc00b /mm/slub.c
parent53e15af03be4fdaaf20802d78f141487d7272985 (diff)
SLUB: Add MIN_PARTIAL
We leave a mininum of partial slabs on nodes when we search for partial slabs on other node. Define a constant for that value. Then modify slub to keep MIN_PARTIAL slabs around. This avoids bad situations where a function frees the last object in a slab (which results in the page being returned to the page allocator) only to then allocate one again (which requires getting a page back from the page allocator if the partial list was empty). Keeping a couple of slabs on the partial list reduces overhead. Empty slabs are added to the end of the partial list to insure that partially allocated slabs are consumed first (defragmentation). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c55
1 files changed, 36 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 69ee7f807e84..4251917c5da1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -133,6 +133,9 @@
133 */ 133 */
134#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) 134#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL)
135 135
136/* Mininum number of partial slabs */
137#define MIN_PARTIAL 2
138
136#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 139#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
137 SLAB_POISON | SLAB_STORE_USER) 140 SLAB_POISON | SLAB_STORE_USER)
138/* 141/*
@@ -664,16 +667,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
664/* 667/*
665 * Tracking of fully allocated slabs for debugging 668 * Tracking of fully allocated slabs for debugging
666 */ 669 */
667static void add_full(struct kmem_cache *s, struct page *page) 670static void add_full(struct kmem_cache_node *n, struct page *page)
668{ 671{
669 struct kmem_cache_node *n;
670
671 VM_BUG_ON(!irqs_disabled());
672
673 if (!(s->flags & SLAB_STORE_USER))
674 return;
675
676 n = get_node(s, page_to_nid(page));
677 spin_lock(&n->list_lock); 672 spin_lock(&n->list_lock);
678 list_add(&page->lru, &n->full); 673 list_add(&page->lru, &n->full);
679 spin_unlock(&n->list_lock); 674 spin_unlock(&n->list_lock);
@@ -982,10 +977,16 @@ static __always_inline int slab_trylock(struct page *page)
982/* 977/*
983 * Management of partially allocated slabs 978 * Management of partially allocated slabs
984 */ 979 */
985static void add_partial(struct kmem_cache *s, struct page *page) 980static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
986{ 981{
987 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 982 spin_lock(&n->list_lock);
983 n->nr_partial++;
984 list_add_tail(&page->lru, &n->partial);
985 spin_unlock(&n->list_lock);
986}
988 987
988static void add_partial(struct kmem_cache_node *n, struct page *page)
989{
989 spin_lock(&n->list_lock); 990 spin_lock(&n->list_lock);
990 n->nr_partial++; 991 n->nr_partial++;
991 list_add(&page->lru, &n->partial); 992 list_add(&page->lru, &n->partial);
@@ -1085,7 +1086,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1085 n = get_node(s, zone_to_nid(*z)); 1086 n = get_node(s, zone_to_nid(*z));
1086 1087
1087 if (n && cpuset_zone_allowed_hardwall(*z, flags) && 1088 if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
1088 n->nr_partial > 2) { 1089 n->nr_partial > MIN_PARTIAL) {
1089 page = get_partial_node(n); 1090 page = get_partial_node(n);
1090 if (page) 1091 if (page)
1091 return page; 1092 return page;
@@ -1119,15 +1120,31 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1119 */ 1120 */
1120static void putback_slab(struct kmem_cache *s, struct page *page) 1121static void putback_slab(struct kmem_cache *s, struct page *page)
1121{ 1122{
1123 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1124
1122 if (page->inuse) { 1125 if (page->inuse) {
1126
1123 if (page->freelist) 1127 if (page->freelist)
1124 add_partial(s, page); 1128 add_partial(n, page);
1125 else if (PageError(page)) 1129 else if (PageError(page) && (s->flags & SLAB_STORE_USER))
1126 add_full(s, page); 1130 add_full(n, page);
1127 slab_unlock(page); 1131 slab_unlock(page);
1132
1128 } else { 1133 } else {
1129 slab_unlock(page); 1134 if (n->nr_partial < MIN_PARTIAL) {
1130 discard_slab(s, page); 1135 /*
1136 * Adding an empty page to the partial slabs in order
1137 * to avoid page allocator overhead. This page needs to
1138 * come after all the others that are not fully empty
1139 * in order to make sure that we do maximum
1140 * defragmentation.
1141 */
1142 add_partial_tail(n, page);
1143 slab_unlock(page);
1144 } else {
1145 slab_unlock(page);
1146 discard_slab(s, page);
1147 }
1131 } 1148 }
1132} 1149}
1133 1150
@@ -1326,7 +1343,7 @@ checks_ok:
1326 * then add it. 1343 * then add it.
1327 */ 1344 */
1328 if (unlikely(!prior)) 1345 if (unlikely(!prior))
1329 add_partial(s, page); 1346 add_partial(get_node(s, page_to_nid(page)), page);
1330 1347
1331out_unlock: 1348out_unlock:
1332 slab_unlock(page); 1349 slab_unlock(page);
@@ -1535,7 +1552,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
1535 init_object(kmalloc_caches, n, 1); 1552 init_object(kmalloc_caches, n, 1);
1536 init_kmem_cache_node(n); 1553 init_kmem_cache_node(n);
1537 atomic_long_inc(&n->nr_slabs); 1554 atomic_long_inc(&n->nr_slabs);
1538 add_partial(kmalloc_caches, page); 1555 add_partial(n, page);
1539 return n; 1556 return n;
1540} 1557}
1541 1558