diff options
author | Christoph Lameter <cl@linux.com> | 2012-05-09 11:09:59 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-06-01 02:25:41 -0400 |
commit | 57d437d2aa680f42d75cef45205834d5f605550a (patch) | |
tree | 0730b0d2b5b3c1e8286569580de92228e097af51 /mm/slub.c | |
parent | f6e7def7f7d749759e4bf36dcc25ae289a20d868 (diff) |
slub: pass page to node_match() instead of kmem_cache_cpu structure
Avoid passing the kmem_cache_cpu pointer to node_match. This makes the
node_match function more generic and easier to understand.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 10 |
1 files changed, 6 insertions, 4 deletions
@@ -2050,10 +2050,10 @@ static void flush_all(struct kmem_cache *s) | |||
2050 | * Check if the objects in a per cpu structure fit numa | 2050 | * Check if the objects in a per cpu structure fit numa |
2051 | * locality expectations. | 2051 | * locality expectations. |
2052 | */ | 2052 | */ |
2053 | static inline int node_match(struct kmem_cache_cpu *c, int node) | 2053 | static inline int node_match(struct page *page, int node) |
2054 | { | 2054 | { |
2055 | #ifdef CONFIG_NUMA | 2055 | #ifdef CONFIG_NUMA |
2056 | if (node != NUMA_NO_NODE && page_to_nid(c->page) != node) | 2056 | if (node != NUMA_NO_NODE && page_to_nid(page) != node) |
2057 | return 0; | 2057 | return 0; |
2058 | #endif | 2058 | #endif |
2059 | return 1; | 2059 | return 1; |
@@ -2226,7 +2226,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2226 | goto new_slab; | 2226 | goto new_slab; |
2227 | redo: | 2227 | redo: |
2228 | 2228 | ||
2229 | if (unlikely(!node_match(c, node))) { | 2229 | if (unlikely(!node_match(page, node))) { |
2230 | stat(s, ALLOC_NODE_MISMATCH); | 2230 | stat(s, ALLOC_NODE_MISMATCH); |
2231 | deactivate_slab(s, page, c->freelist); | 2231 | deactivate_slab(s, page, c->freelist); |
2232 | c->page = NULL; | 2232 | c->page = NULL; |
@@ -2313,6 +2313,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
2313 | { | 2313 | { |
2314 | void **object; | 2314 | void **object; |
2315 | struct kmem_cache_cpu *c; | 2315 | struct kmem_cache_cpu *c; |
2316 | struct page *page; | ||
2316 | unsigned long tid; | 2317 | unsigned long tid; |
2317 | 2318 | ||
2318 | if (slab_pre_alloc_hook(s, gfpflags)) | 2319 | if (slab_pre_alloc_hook(s, gfpflags)) |
@@ -2338,7 +2339,8 @@ redo: | |||
2338 | barrier(); | 2339 | barrier(); |
2339 | 2340 | ||
2340 | object = c->freelist; | 2341 | object = c->freelist; |
2341 | if (unlikely(!object || !node_match(c, node))) | 2342 | page = c->page; |
2343 | if (unlikely(!object || !node_match(page, node))) | ||
2342 | 2344 | ||
2343 | object = __slab_alloc(s, gfpflags, node, addr, c); | 2345 | object = __slab_alloc(s, gfpflags, node, addr, c); |
2344 | 2346 | ||