aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-05-09 11:09:57 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-01 02:25:41 -0400
commitc17dda40a6a4ed95f035db38b7ba4fab0d99da44 (patch)
tree332ba90981bb35d851a4078a3086352432a54b7c /mm/slub.c
parentec3ab083a7a004282ee374bdaeb0aa603521b8eb (diff)
slub: Separate out kmem_cache_cpu processing from deactivate_slab
Processing on fields of kmem_cache_cpu is cleaner if code working on fields of this struct is taken out of deactivate_slab(). Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index aed879276410..2389a016577e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1729,14 +1729,12 @@ void init_kmem_cache_cpus(struct kmem_cache *s)
1729/* 1729/*
1730 * Remove the cpu slab 1730 * Remove the cpu slab
1731 */ 1731 */
1732static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1732static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
1733{ 1733{
1734 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 1734 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1735 struct page *page = c->page;
1736 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1735 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1737 int lock = 0; 1736 int lock = 0;
1738 enum slab_modes l = M_NONE, m = M_NONE; 1737 enum slab_modes l = M_NONE, m = M_NONE;
1739 void *freelist;
1740 void *nextfree; 1738 void *nextfree;
1741 int tail = DEACTIVATE_TO_HEAD; 1739 int tail = DEACTIVATE_TO_HEAD;
1742 struct page new; 1740 struct page new;
@@ -1747,11 +1745,6 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1747 tail = DEACTIVATE_TO_TAIL; 1745 tail = DEACTIVATE_TO_TAIL;
1748 } 1746 }
1749 1747
1750 c->tid = next_tid(c->tid);
1751 c->page = NULL;
1752 freelist = c->freelist;
1753 c->freelist = NULL;
1754
1755 /* 1748 /*
1756 * Stage one: Free all available per cpu objects back 1749 * Stage one: Free all available per cpu objects back
1757 * to the page freelist while it is still frozen. Leave the 1750 * to the page freelist while it is still frozen. Leave the
@@ -2009,7 +2002,11 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2009static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2002static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2010{ 2003{
2011 stat(s, CPUSLAB_FLUSH); 2004 stat(s, CPUSLAB_FLUSH);
2012 deactivate_slab(s, c); 2005 deactivate_slab(s, c->page, c->freelist);
2006
2007 c->tid = next_tid(c->tid);
2008 c->page = NULL;
2009 c->freelist = NULL;
2013} 2010}
2014 2011
2015/* 2012/*
@@ -2229,7 +2226,9 @@ redo:
2229 2226
2230 if (unlikely(!node_match(c, node))) { 2227 if (unlikely(!node_match(c, node))) {
2231 stat(s, ALLOC_NODE_MISMATCH); 2228 stat(s, ALLOC_NODE_MISMATCH);
2232 deactivate_slab(s, c); 2229 deactivate_slab(s, c->page, c->freelist);
2230 c->page = NULL;
2231 c->freelist = NULL;
2233 goto new_slab; 2232 goto new_slab;
2234 } 2233 }
2235 2234
@@ -2289,8 +2288,9 @@ new_slab:
2289 if (!alloc_debug_processing(s, c->page, freelist, addr)) 2288 if (!alloc_debug_processing(s, c->page, freelist, addr))
2290 goto new_slab; /* Slab failed checks. Next slab needed */ 2289 goto new_slab; /* Slab failed checks. Next slab needed */
2291 2290
2292 c->freelist = get_freepointer(s, freelist); 2291 deactivate_slab(s, c->page, get_freepointer(s, freelist));
2293 deactivate_slab(s, c); 2292 c->page = NULL;
2293 c->freelist = NULL;
2294 local_irq_restore(flags); 2294 local_irq_restore(flags);
2295 return freelist; 2295 return freelist;
2296} 2296}