aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 13:25:58 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-02 06:26:57 -0400
commit03e404af26dc2ea0d278d7a342de0aab394793ce (patch)
tree1290b42700767c661125aaf584253bdeb98b7afd /mm/slub.c
parente36a2652d7d1ad97f7636a39bdd8654d296cc36b (diff)
slub: fast release on full slab
Make deactivation occur implicitly while checking out the current freelist. This avoids one cmpxchg operation on a slab that is now fully in use. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e00b7732f556..25dac48c1c60 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1977,9 +1977,21 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1977 object = page->freelist; 1977 object = page->freelist;
1978 counters = page->counters; 1978 counters = page->counters;
1979 new.counters = counters; 1979 new.counters = counters;
1980 new.inuse = page->objects;
1981 VM_BUG_ON(!new.frozen); 1980 VM_BUG_ON(!new.frozen);
1982 1981
1982 /*
1983 * If there is no object left then we use this loop to
1984 * deactivate the slab which is simple since no objects
1985 * are left in the slab and therefore we do not need to
1986 * put the page back onto the partial list.
1987 *
1988 * If there are objects left then we retrieve them
1989 * and use them to refill the per cpu queue.
1990 */
1991
1992 new.inuse = page->objects;
1993 new.frozen = object != NULL;
1994
1983 } while (!cmpxchg_double_slab(s, page, 1995 } while (!cmpxchg_double_slab(s, page,
1984 object, counters, 1996 object, counters,
1985 NULL, new.counters, 1997 NULL, new.counters,
@@ -1988,8 +2000,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1988load_freelist: 2000load_freelist:
1989 VM_BUG_ON(!page->frozen); 2001 VM_BUG_ON(!page->frozen);
1990 2002
1991 if (unlikely(!object)) 2003 if (unlikely(!object)) {
2004 c->page = NULL;
2005 stat(s, DEACTIVATE_BYPASS);
1992 goto new_slab; 2006 goto new_slab;
2007 }
1993 2008
1994 stat(s, ALLOC_REFILL); 2009 stat(s, ALLOC_REFILL);
1995 2010
@@ -4680,6 +4695,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4680STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4695STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4681STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4696STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4682STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4697STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4698STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
4683STAT_ATTR(ORDER_FALLBACK, order_fallback); 4699STAT_ATTR(ORDER_FALLBACK, order_fallback);
4684STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 4700STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4685STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 4701STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
@@ -4740,6 +4756,7 @@ static struct attribute *slab_attrs[] = {
4740 &deactivate_to_head_attr.attr, 4756 &deactivate_to_head_attr.attr,
4741 &deactivate_to_tail_attr.attr, 4757 &deactivate_to_tail_attr.attr,
4742 &deactivate_remote_frees_attr.attr, 4758 &deactivate_remote_frees_attr.attr,
4759 &deactivate_bypass_attr.attr,
4743 &order_fallback_attr.attr, 4760 &order_fallback_attr.attr,
4744 &cmpxchg_double_fail_attr.attr, 4761 &cmpxchg_double_fail_attr.attr,
4745 &cmpxchg_double_cpu_fail_attr.attr, 4762 &cmpxchg_double_cpu_fail_attr.attr,