aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-05-09 11:09:56 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-01 02:25:41 -0400
commitec3ab083a7a004282ee374bdaeb0aa603521b8eb (patch)
tree8c570d6bd284ca8357675a4e7c8409dec691f91c /mm
parent188fd063208942a4681d8e8a4484ad0d4ae0fda1 (diff)
slub: Get rid of the node field
The node field is always page_to_nid(c->page). So its rather easy to replace. Note that there maybe slightly more overhead in various hot paths due to the need to shift the bits from page->flags. However, that is mostly compensated for by a smaller footprint of the kmem_cache_cpu structure (this patch reduces that to 3 words per cache) which allows better caching. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b29246bc7392..aed879276410 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1561,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s,
1561 1561
1562 if (!object) { 1562 if (!object) {
1563 c->page = page; 1563 c->page = page;
1564 c->node = page_to_nid(page);
1565 stat(s, ALLOC_FROM_PARTIAL); 1564 stat(s, ALLOC_FROM_PARTIAL);
1566 object = t; 1565 object = t;
1567 available = page->objects - page->inuse; 1566 available = page->objects - page->inuse;
@@ -2057,7 +2056,7 @@ static void flush_all(struct kmem_cache *s)
2057static inline int node_match(struct kmem_cache_cpu *c, int node) 2056static inline int node_match(struct kmem_cache_cpu *c, int node)
2058{ 2057{
2059#ifdef CONFIG_NUMA 2058#ifdef CONFIG_NUMA
2060 if (node != NUMA_NO_NODE && c->node != node) 2059 if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
2061 return 0; 2060 return 0;
2062#endif 2061#endif
2063 return 1; 2062 return 1;
@@ -2152,7 +2151,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2152 page->freelist = NULL; 2151 page->freelist = NULL;
2153 2152
2154 stat(s, ALLOC_SLAB); 2153 stat(s, ALLOC_SLAB);
2155 c->node = page_to_nid(page);
2156 c->page = page; 2154 c->page = page;
2157 *pc = c; 2155 *pc = c;
2158 } else 2156 } else
@@ -2269,7 +2267,6 @@ new_slab:
2269 if (c->partial) { 2267 if (c->partial) {
2270 c->page = c->partial; 2268 c->page = c->partial;
2271 c->partial = c->page->next; 2269 c->partial = c->page->next;
2272 c->node = page_to_nid(c->page);
2273 stat(s, CPU_PARTIAL_ALLOC); 2270 stat(s, CPU_PARTIAL_ALLOC);
2274 c->freelist = NULL; 2271 c->freelist = NULL;
2275 goto redo; 2272 goto redo;
@@ -2294,7 +2291,6 @@ new_slab:
2294 2291
2295 c->freelist = get_freepointer(s, freelist); 2292 c->freelist = get_freepointer(s, freelist);
2296 deactivate_slab(s, c); 2293 deactivate_slab(s, c);
2297 c->node = NUMA_NO_NODE;
2298 local_irq_restore(flags); 2294 local_irq_restore(flags);
2299 return freelist; 2295 return freelist;
2300} 2296}
@@ -4507,30 +4503,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4507 4503
4508 for_each_possible_cpu(cpu) { 4504 for_each_possible_cpu(cpu) {
4509 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4505 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4510 int node = ACCESS_ONCE(c->node); 4506 int node;
4511 struct page *page; 4507 struct page *page;
4512 4508
4513 if (node < 0)
4514 continue;
4515 page = ACCESS_ONCE(c->page); 4509 page = ACCESS_ONCE(c->page);
4516 if (page) { 4510 if (!page)
4517 if (flags & SO_TOTAL) 4511 continue;
4518 x = page->objects;
4519 else if (flags & SO_OBJECTS)
4520 x = page->inuse;
4521 else
4522 x = 1;
4523 4512
4524 total += x; 4513 node = page_to_nid(page);
4525 nodes[node] += x; 4514 if (flags & SO_TOTAL)
4526 } 4515 x = page->objects;
4527 page = c->partial; 4516 else if (flags & SO_OBJECTS)
4517 x = page->inuse;
4518 else
4519 x = 1;
4520
4521 total += x;
4522 nodes[node] += x;
4528 4523
4524 page = ACCESS_ONCE(c->partial);
4529 if (page) { 4525 if (page) {
4530 x = page->pobjects; 4526 x = page->pobjects;
4531 total += x; 4527 total += x;
4532 nodes[node] += x; 4528 nodes[node] += x;
4533 } 4529 }
4530
4534 per_cpu[node]++; 4531 per_cpu[node]++;
4535 } 4532 }
4536 } 4533 }