aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-02-22 20:40:07 -0500
committerPekka Enberg <penberg@cs.helsinki.fi>2009-02-23 05:05:41 -0500
commit3b89d7d881a1dbb4da158f7eb5d6b3ceefc72810 (patch)
tree48c119937a204172677a5fa3a829019890670350
parentb578f3fcca1e78624dfb5f358776e63711d7fda2 (diff)
slub: move min_partial to struct kmem_cache
Although it allows for better cacheline use, it is unnecessary to save a copy of the cache's min_partial value in each kmem_cache_node. Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--mm/slub.c29
2 files changed, 17 insertions, 14 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aacd..f20a89e4d52c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -46,7 +46,6 @@ struct kmem_cache_cpu {
46struct kmem_cache_node { 46struct kmem_cache_node {
47 spinlock_t list_lock; /* Protect partial list and nr_partial */ 47 spinlock_t list_lock; /* Protect partial list and nr_partial */
48 unsigned long nr_partial; 48 unsigned long nr_partial;
49 unsigned long min_partial;
50 struct list_head partial; 49 struct list_head partial;
51#ifdef CONFIG_SLUB_DEBUG 50#ifdef CONFIG_SLUB_DEBUG
52 atomic_long_t nr_slabs; 51 atomic_long_t nr_slabs;
@@ -89,6 +88,7 @@ struct kmem_cache {
89 void (*ctor)(void *); 88 void (*ctor)(void *);
90 int inuse; /* Offset to metadata */ 89 int inuse; /* Offset to metadata */
91 int align; /* Alignment */ 90 int align; /* Alignment */
91 unsigned long min_partial;
92 const char *name; /* Name (only for display!) */ 92 const char *name; /* Name (only for display!) */
93 struct list_head list; /* List of slab caches */ 93 struct list_head list; /* List of slab caches */
94#ifdef CONFIG_SLUB_DEBUG 94#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/slub.c b/mm/slub.c
index bdc9abb08a23..4fff385b17a3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1335,7 +1335,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1335 n = get_node(s, zone_to_nid(zone)); 1335 n = get_node(s, zone_to_nid(zone));
1336 1336
1337 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1337 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1338 n->nr_partial > n->min_partial) { 1338 n->nr_partial > s->min_partial) {
1339 page = get_partial_node(n); 1339 page = get_partial_node(n);
1340 if (page) 1340 if (page)
1341 return page; 1341 return page;
@@ -1387,7 +1387,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1387 slab_unlock(page); 1387 slab_unlock(page);
1388 } else { 1388 } else {
1389 stat(c, DEACTIVATE_EMPTY); 1389 stat(c, DEACTIVATE_EMPTY);
1390 if (n->nr_partial < n->min_partial) { 1390 if (n->nr_partial < s->min_partial) {
1391 /* 1391 /*
1392 * Adding an empty slab to the partial slabs in order 1392 * Adding an empty slab to the partial slabs in order
1393 * to avoid page allocator overhead. This slab needs 1393 * to avoid page allocator overhead. This slab needs
@@ -1928,17 +1928,6 @@ static void
1928init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1928init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1929{ 1929{
1930 n->nr_partial = 0; 1930 n->nr_partial = 0;
1931
1932 /*
1933 * The larger the object size is, the more pages we want on the partial
1934 * list to avoid pounding the page allocator excessively.
1935 */
1936 n->min_partial = ilog2(s->size);
1937 if (n->min_partial < MIN_PARTIAL)
1938 n->min_partial = MIN_PARTIAL;
1939 else if (n->min_partial > MAX_PARTIAL)
1940 n->min_partial = MAX_PARTIAL;
1941
1942 spin_lock_init(&n->list_lock); 1931 spin_lock_init(&n->list_lock);
1943 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1944#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2170,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2181} 2170}
2182#endif 2171#endif
2183 2172
2173static void calculate_min_partial(struct kmem_cache *s, unsigned long min)
2174{
2175 if (min < MIN_PARTIAL)
2176 min = MIN_PARTIAL;
2177 else if (min > MAX_PARTIAL)
2178 min = MAX_PARTIAL;
2179 s->min_partial = min;
2180}
2181
2184/* 2182/*
2185 * calculate_sizes() determines the order and the distribution of data within 2183 * calculate_sizes() determines the order and the distribution of data within
2186 * a slab object. 2184 * a slab object.
@@ -2319,6 +2317,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2319 if (!calculate_sizes(s, -1)) 2317 if (!calculate_sizes(s, -1))
2320 goto error; 2318 goto error;
2321 2319
2320 /*
2321 * The larger the object size is, the more pages we want on the partial
2322 * list to avoid pounding the page allocator excessively.
2323 */
2324 calculate_min_partial(s, ilog2(s->size));
2322 s->refcount = 1; 2325 s->refcount = 1;
2323#ifdef CONFIG_NUMA 2326#ifdef CONFIG_NUMA
2324 s->remote_node_defrag_ratio = 1000; 2327 s->remote_node_defrag_ratio = 1000;