aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:26:07 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:01 -0400
commitee3c72a14bfecdf783738032ff3c73ef6412f5b3 (patch)
tree876c2c5d33058be8502504330726bb16b876ba52 /mm/slub.c
parentb3fba8da653999c67d7517050f196e92da6f8d3b (diff)
SLUB: Avoid touching page struct when freeing to per cpu slab
Set c->node to -1 if we allocate from a debug slab instead for SlabDebug which requires access the page struct cacheline. Signed-off-by: Christoph Lameter <clameter@sgi.com> Tested-by: Alexey Dobriyan <adobriyan@sw.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5d895d44c327..ea9fd72093d8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1537,6 +1537,7 @@ debug:
1537 1537
1538 c->page->inuse++; 1538 c->page->inuse++;
1539 c->page->freelist = object[c->offset]; 1539 c->page->freelist = object[c->offset];
1540 c->node = -1;
1540 slab_unlock(c->page); 1541 slab_unlock(c->page);
1541 return object; 1542 return object;
1542} 1543}
@@ -1560,8 +1561,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
1560 1561
1561 local_irq_save(flags); 1562 local_irq_save(flags);
1562 c = get_cpu_slab(s, smp_processor_id()); 1563 c = get_cpu_slab(s, smp_processor_id());
1563 if (unlikely(!c->page || !c->freelist || 1564 if (unlikely(!c->freelist || !node_match(c, node)))
1564 !node_match(c, node)))
1565 1565
1566 object = __slab_alloc(s, gfpflags, node, addr, c); 1566 object = __slab_alloc(s, gfpflags, node, addr, c);
1567 1567
@@ -1670,7 +1670,7 @@ static void __always_inline slab_free(struct kmem_cache *s,
1670 local_irq_save(flags); 1670 local_irq_save(flags);
1671 debug_check_no_locks_freed(object, s->objsize); 1671 debug_check_no_locks_freed(object, s->objsize);
1672 c = get_cpu_slab(s, smp_processor_id()); 1672 c = get_cpu_slab(s, smp_processor_id());
1673 if (likely(page == c->page && !SlabDebug(page))) { 1673 if (likely(page == c->page && c->node >= 0)) {
1674 object[c->offset] = c->freelist; 1674 object[c->offset] = c->freelist;
1675 c->freelist = object; 1675 c->freelist = object;
1676 } else 1676 } else
@@ -3250,12 +3250,16 @@ static unsigned long slab_objects(struct kmem_cache *s,
3250 3250
3251 for_each_possible_cpu(cpu) { 3251 for_each_possible_cpu(cpu) {
3252 struct page *page; 3252 struct page *page;
3253 int node;
3253 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3254 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3254 3255
3255 if (!c) 3256 if (!c)
3256 continue; 3257 continue;
3257 3258
3258 page = c->page; 3259 page = c->page;
3260 node = c->node;
3261 if (node < 0)
3262 continue;
3259 if (page) { 3263 if (page) {
3260 if (flags & SO_CPU) { 3264 if (flags & SO_CPU) {
3261 int x = 0; 3265 int x = 0;
@@ -3265,9 +3269,9 @@ static unsigned long slab_objects(struct kmem_cache *s,
3265 else 3269 else
3266 x = 1; 3270 x = 1;
3267 total += x; 3271 total += x;
3268 nodes[c->node] += x; 3272 nodes[node] += x;
3269 } 3273 }
3270 per_cpu[c->node]++; 3274 per_cpu[node]++;
3271 } 3275 }
3272 } 3276 }
3273 3277