aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-08-09 19:21:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-09 19:21:33 -0400
commit4fbb71597af591fa0ef565df1ba745c92d5070f7 (patch)
treedcfdfa866e2ad3a14992b5a1fe6cd19711d5efab /mm
parente6ca23289f865f0372b89f42884c258a8e85460b (diff)
parent5595cffc8248e4672c5803547445e85e4053c8fc (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: SLUB: dynamic per-cache MIN_PARTIAL mm: unexport ksize
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c27
3 files changed, 19 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 918f04f7fef1..e76eee466886 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4472,4 +4472,3 @@ size_t ksize(const void *objp)
4472 4472
4473 return obj_size(virt_to_cache(objp)); 4473 return obj_size(virt_to_cache(objp));
4474} 4474}
4475EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index d8fbd4d1bfa7..4c82dd41f32e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -519,7 +519,6 @@ size_t ksize(const void *block)
519 else 519 else
520 return sp->page.private; 520 return sp->page.private;
521} 521}
522EXPORT_SYMBOL(ksize);
523 522
524struct kmem_cache { 523struct kmem_cache {
525 unsigned int size, align; 524 unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index b7e2cd5d82db..4f5b96149458 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1329 n = get_node(s, zone_to_nid(zone)); 1329 n = get_node(s, zone_to_nid(zone));
1330 1330
1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1332 n->nr_partial > MIN_PARTIAL) { 1332 n->nr_partial > n->min_partial) {
1333 page = get_partial_node(n); 1333 page = get_partial_node(n);
1334 if (page) 1334 if (page)
1335 return page; 1335 return page;
@@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1381 slab_unlock(page); 1381 slab_unlock(page);
1382 } else { 1382 } else {
1383 stat(c, DEACTIVATE_EMPTY); 1383 stat(c, DEACTIVATE_EMPTY);
1384 if (n->nr_partial < MIN_PARTIAL) { 1384 if (n->nr_partial < n->min_partial) {
1385 /* 1385 /*
1386 * Adding an empty slab to the partial slabs in order 1386 * Adding an empty slab to the partial slabs in order
1387 * to avoid page allocator overhead. This slab needs 1387 * to avoid page allocator overhead. This slab needs
@@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1913#endif 1913#endif
1914} 1914}
1915 1915
1916static void init_kmem_cache_node(struct kmem_cache_node *n) 1916static void
1917init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1917{ 1918{
1918 n->nr_partial = 0; 1919 n->nr_partial = 0;
1920
1921 /*
1922 * The larger the object size is, the more pages we want on the partial
1923 * list to avoid pounding the page allocator excessively.
1924 */
1925 n->min_partial = ilog2(s->size);
1926 if (n->min_partial < MIN_PARTIAL)
1927 n->min_partial = MIN_PARTIAL;
1928 else if (n->min_partial > MAX_PARTIAL)
1929 n->min_partial = MAX_PARTIAL;
1930
1919 spin_lock_init(&n->list_lock); 1931 spin_lock_init(&n->list_lock);
1920 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1921#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
@@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2087 init_object(kmalloc_caches, n, 1); 2099 init_object(kmalloc_caches, n, 1);
2088 init_tracking(kmalloc_caches, n); 2100 init_tracking(kmalloc_caches, n);
2089#endif 2101#endif
2090 init_kmem_cache_node(n); 2102 init_kmem_cache_node(n, kmalloc_caches);
2091 inc_slabs_node(kmalloc_caches, node, page->objects); 2103 inc_slabs_node(kmalloc_caches, node, page->objects);
2092 2104
2093 /* 2105 /*
@@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2144 2156
2145 } 2157 }
2146 s->node[node] = n; 2158 s->node[node] = n;
2147 init_kmem_cache_node(n); 2159 init_kmem_cache_node(n, s);
2148 } 2160 }
2149 return 1; 2161 return 1;
2150} 2162}
@@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2155 2167
2156static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2168static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2157{ 2169{
2158 init_kmem_cache_node(&s->local_node); 2170 init_kmem_cache_node(&s->local_node, s);
2159 return 1; 2171 return 1;
2160} 2172}
2161#endif 2173#endif
@@ -2715,7 +2727,6 @@ size_t ksize(const void *object)
2715 */ 2727 */
2716 return s->size; 2728 return s->size;
2717} 2729}
2718EXPORT_SYMBOL(ksize);
2719 2730
2720void kfree(const void *x) 2731void kfree(const void *x)
2721{ 2732{
@@ -2890,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg)
2890 ret = -ENOMEM; 2901 ret = -ENOMEM;
2891 goto out; 2902 goto out;
2892 } 2903 }
2893 init_kmem_cache_node(n); 2904 init_kmem_cache_node(n, s);
2894 s->node[nid] = n; 2905 s->node[nid] = n;
2895 } 2906 }
2896out: 2907out: