aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 13:25:46 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-02 06:26:53 -0400
commit8cb0a5068f4108e8ca60d5e0bcfbe6901adcfaef (patch)
tree48098dca8f01abd882bfb9771880b2c791604f8a
parent50d5c41cd151b21ac1dfc98f048210456ccacc20 (diff)
slub: Move page->frozen handling near where the page->freelist handling occurs
This is necessary because the frozen bit has to be handled in the same cmpxchg_double with the freelist and the counters. Signed-off-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slub.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 82b2d048a278..5a2d3d8e0558 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1286,6 +1286,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1286 1286
1287 page->freelist = start; 1287 page->freelist = start;
1288 page->inuse = 0; 1288 page->inuse = 0;
1289 page->frozen = 1;
1289out: 1290out:
1290 return page; 1291 return page;
1291} 1292}
@@ -1424,7 +1425,6 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1424{ 1425{
1425 if (slab_trylock(page)) { 1426 if (slab_trylock(page)) {
1426 __remove_partial(n, page); 1427 __remove_partial(n, page);
1427 page->frozen = 1;
1428 return 1; 1428 return 1;
1429 } 1429 }
1430 return 0; 1430 return 0;
@@ -1538,7 +1538,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1538{ 1538{
1539 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1539 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1540 1540
1541 page->frozen = 0;
1542 if (page->inuse) { 1541 if (page->inuse) {
1543 1542
1544 if (page->freelist) { 1543 if (page->freelist) {
@@ -1671,6 +1670,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1671 } 1670 }
1672 c->page = NULL; 1671 c->page = NULL;
1673 c->tid = next_tid(c->tid); 1672 c->tid = next_tid(c->tid);
1673 page->frozen = 0;
1674 unfreeze_slab(s, page, tail); 1674 unfreeze_slab(s, page, tail);
1675} 1675}
1676 1676
@@ -1831,6 +1831,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1831 stat(s, ALLOC_REFILL); 1831 stat(s, ALLOC_REFILL);
1832 1832
1833load_freelist: 1833load_freelist:
1834 VM_BUG_ON(!page->frozen);
1835
1834 object = page->freelist; 1836 object = page->freelist;
1835 if (unlikely(!object)) 1837 if (unlikely(!object))
1836 goto another_slab; 1838 goto another_slab;
@@ -1854,6 +1856,7 @@ new_slab:
1854 page = get_partial(s, gfpflags, node); 1856 page = get_partial(s, gfpflags, node);
1855 if (page) { 1857 if (page) {
1856 stat(s, ALLOC_FROM_PARTIAL); 1858 stat(s, ALLOC_FROM_PARTIAL);
1859 page->frozen = 1;
1857 c->node = page_to_nid(page); 1860 c->node = page_to_nid(page);
1858 c->page = page; 1861 c->page = page;
1859 goto load_freelist; 1862 goto load_freelist;
@@ -2371,6 +2374,7 @@ static void early_kmem_cache_node_alloc(int node)
2371 BUG_ON(!n); 2374 BUG_ON(!n);
2372 page->freelist = get_freepointer(kmem_cache_node, n); 2375 page->freelist = get_freepointer(kmem_cache_node, n);
2373 page->inuse++; 2376 page->inuse++;
2377 page->frozen = 0;
2374 kmem_cache_node->node[node] = n; 2378 kmem_cache_node->node[node] = n;
2375#ifdef CONFIG_SLUB_DEBUG 2379#ifdef CONFIG_SLUB_DEBUG
2376 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2380 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);