diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2010-05-21 17:41:35 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2010-05-24 14:11:29 -0400 |
commit | 73367bd8eef4f4eb311005886aaa916013073265 (patch) | |
tree | d603667689cfea1d8de49fe3c7fada7f6b6eae53 /mm | |
parent | 7e125f7b9cbfce4101191b8076d606c517a73066 (diff) |
slub: move kmem_cache_node into it's own cacheline
This patch is meant to improve the performance of SLUB by moving the local
kmem_cache_node lock into it's own cacheline separate from kmem_cache.
This is accomplished by simply removing the local_node when NUMA is enabled.
On my system with 2 nodes I saw around a 5% performance increase w/
hackbench times dropping from 6.2 seconds to 5.9 seconds on average. I
suspect the performance gain would increase as the number of nodes
increases, but I do not have the data to currently back that up.
Bugzilla-Reference: http://bugzilla.kernel.org/show_bug.cgi?id=15713
Cc: <stable@kernel.org>
Reported-by: Alex Shi <alex.shi@intel.com>
Tested-by: Alex Shi <alex.shi@intel.com>
Acked-by: Yanmin Zhang <yanmin_zhang@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 33 |
1 files changed, 11 insertions, 22 deletions
@@ -2133,7 +2133,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2133 | 2133 | ||
2134 | for_each_node_state(node, N_NORMAL_MEMORY) { | 2134 | for_each_node_state(node, N_NORMAL_MEMORY) { |
2135 | struct kmem_cache_node *n = s->node[node]; | 2135 | struct kmem_cache_node *n = s->node[node]; |
2136 | if (n && n != &s->local_node) | 2136 | if (n) |
2137 | kmem_cache_free(kmalloc_caches, n); | 2137 | kmem_cache_free(kmalloc_caches, n); |
2138 | s->node[node] = NULL; | 2138 | s->node[node] = NULL; |
2139 | } | 2139 | } |
@@ -2142,33 +2142,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2142 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2142 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) |
2143 | { | 2143 | { |
2144 | int node; | 2144 | int node; |
2145 | int local_node; | ||
2146 | |||
2147 | if (slab_state >= UP && (s < kmalloc_caches || | ||
2148 | s >= kmalloc_caches + KMALLOC_CACHES)) | ||
2149 | local_node = page_to_nid(virt_to_page(s)); | ||
2150 | else | ||
2151 | local_node = 0; | ||
2152 | 2145 | ||
2153 | for_each_node_state(node, N_NORMAL_MEMORY) { | 2146 | for_each_node_state(node, N_NORMAL_MEMORY) { |
2154 | struct kmem_cache_node *n; | 2147 | struct kmem_cache_node *n; |
2155 | 2148 | ||
2156 | if (local_node == node) | 2149 | if (slab_state == DOWN) { |
2157 | n = &s->local_node; | 2150 | early_kmem_cache_node_alloc(gfpflags, node); |
2158 | else { | 2151 | continue; |
2159 | if (slab_state == DOWN) { | 2152 | } |
2160 | early_kmem_cache_node_alloc(gfpflags, node); | 2153 | n = kmem_cache_alloc_node(kmalloc_caches, |
2161 | continue; | 2154 | gfpflags, node); |
2162 | } | ||
2163 | n = kmem_cache_alloc_node(kmalloc_caches, | ||
2164 | gfpflags, node); | ||
2165 | |||
2166 | if (!n) { | ||
2167 | free_kmem_cache_nodes(s); | ||
2168 | return 0; | ||
2169 | } | ||
2170 | 2155 | ||
2156 | if (!n) { | ||
2157 | free_kmem_cache_nodes(s); | ||
2158 | return 0; | ||
2171 | } | 2159 | } |
2160 | |||
2172 | s->node[node] = n; | 2161 | s->node[node] = n; |
2173 | init_kmem_cache_node(n, s); | 2162 | init_kmem_cache_node(n, s); |
2174 | } | 2163 | } |