aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-08-05 02:28:47 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-08-05 02:28:47 -0400
commit5595cffc8248e4672c5803547445e85e4053c8fc (patch)
tree39aa137d63777fd345f5946f7b1662a6ed78dfda /mm
parent231367fd9bccbb36309ab5bf5012e11a84231031 (diff)
SLUB: dynamic per-cache MIN_PARTIAL
This patch changes the static MIN_PARTIAL to a dynamic per-cache ->min_partial value that is calculated from object size. The bigger the object size, the more pages we keep on the partial list. I tested SLAB, SLUB, and SLUB with this patch on Jens Axboe's 'netio' example script of the fio benchmarking tool. The script stresses the networking subsystem which should also give a fairly good beating of kmalloc() et al. To run the test yourself, first clone the fio repository: git clone git://git.kernel.dk/fio.git and then run the following command n times on your machine: time ./fio examples/netio The results on my 2-way 64-bit x86 machine are as follows: [ the minimum, maximum, and average are captured from 50 individual runs ] real time (seconds) min max avg sd SLAB 22.76 23.38 22.98 0.17 SLUB 22.80 25.78 23.46 0.72 SLUB (dynamic) 22.74 23.54 23.00 0.20 sys time (seconds) min max avg sd SLAB 6.90 8.28 7.70 0.28 SLUB 7.42 16.95 8.89 2.28 SLUB (dynamic) 7.17 8.64 7.73 0.29 user time (seconds) min max avg sd SLAB 36.89 38.11 37.50 0.29 SLUB 30.85 37.99 37.06 1.67 SLUB (dynamic) 36.75 38.07 37.59 0.32 As you can see from the above numbers, this patch brings SLUB to the same level as SLAB for this particular workload fixing a ~2% regression. I'd expect this change to help similar workloads that allocate a lot of objects that are close to the size of a page. Cc: Matthew Wilcox <matthew@wil.cx> Cc: Andrew Morton <akpm@linux-foundation.org> Acked-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c26d4c36fba9..4f5b96149458 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1329 n = get_node(s, zone_to_nid(zone)); 1329 n = get_node(s, zone_to_nid(zone));
1330 1330
1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1332 n->nr_partial > MIN_PARTIAL) { 1332 n->nr_partial > n->min_partial) {
1333 page = get_partial_node(n); 1333 page = get_partial_node(n);
1334 if (page) 1334 if (page)
1335 return page; 1335 return page;
@@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1381 slab_unlock(page); 1381 slab_unlock(page);
1382 } else { 1382 } else {
1383 stat(c, DEACTIVATE_EMPTY); 1383 stat(c, DEACTIVATE_EMPTY);
1384 if (n->nr_partial < MIN_PARTIAL) { 1384 if (n->nr_partial < n->min_partial) {
1385 /* 1385 /*
1386 * Adding an empty slab to the partial slabs in order 1386 * Adding an empty slab to the partial slabs in order
1387 * to avoid page allocator overhead. This slab needs 1387 * to avoid page allocator overhead. This slab needs
@@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1913#endif 1913#endif
1914} 1914}
1915 1915
1916static void init_kmem_cache_node(struct kmem_cache_node *n) 1916static void
1917init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1917{ 1918{
1918 n->nr_partial = 0; 1919 n->nr_partial = 0;
1920
1921 /*
1922 * The larger the object size is, the more pages we want on the partial
1923 * list to avoid pounding the page allocator excessively.
1924 */
1925 n->min_partial = ilog2(s->size);
1926 if (n->min_partial < MIN_PARTIAL)
1927 n->min_partial = MIN_PARTIAL;
1928 else if (n->min_partial > MAX_PARTIAL)
1929 n->min_partial = MAX_PARTIAL;
1930
1919 spin_lock_init(&n->list_lock); 1931 spin_lock_init(&n->list_lock);
1920 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1921#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
@@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2087 init_object(kmalloc_caches, n, 1); 2099 init_object(kmalloc_caches, n, 1);
2088 init_tracking(kmalloc_caches, n); 2100 init_tracking(kmalloc_caches, n);
2089#endif 2101#endif
2090 init_kmem_cache_node(n); 2102 init_kmem_cache_node(n, kmalloc_caches);
2091 inc_slabs_node(kmalloc_caches, node, page->objects); 2103 inc_slabs_node(kmalloc_caches, node, page->objects);
2092 2104
2093 /* 2105 /*
@@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2144 2156
2145 } 2157 }
2146 s->node[node] = n; 2158 s->node[node] = n;
2147 init_kmem_cache_node(n); 2159 init_kmem_cache_node(n, s);
2148 } 2160 }
2149 return 1; 2161 return 1;
2150} 2162}
@@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2155 2167
2156static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2168static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2157{ 2169{
2158 init_kmem_cache_node(&s->local_node); 2170 init_kmem_cache_node(&s->local_node, s);
2159 return 1; 2171 return 1;
2160} 2172}
2161#endif 2173#endif
@@ -2889,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg)
2889 ret = -ENOMEM; 2901 ret = -ENOMEM;
2890 goto out; 2902 goto out;
2891 } 2903 }
2892 init_kmem_cache_node(n); 2904 init_kmem_cache_node(n, s);
2893 s->node[nid] = n; 2905 s->node[nid] = n;
2894 } 2906 }
2895out: 2907out: