aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-10-09 18:26:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:51 -0400
commit12220dea07f1ac6ac717707104773d771c3f3077 (patch)
tree5d12f754560c7b06e6d1bda9cf29000765fe921f
parent423c929cbbecc60e9c407f9048e58f5422f7995d (diff)
mm/slab: support slab merge
Slab merge is good feature to reduce fragmentation. If new creating slab have similar size and property with exsitent slab, this feature reuse it rather than creating new one. As a result, objects are packed into fewer slabs so that fragmentation is reduced. Below is result of my testing. * After boot, sleep 20; cat /proc/meminfo | grep Slab <Before> Slab: 25136 kB <After> Slab: 24364 kB We can save 3% memory used by slab. For supporting this feature in SLAB, we need to implement SLAB specific kmem_cache_flag() and __kmem_cache_alias(), because SLUB implements some SLUB specific processing related to debug flag and object size change on these functions. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c26
-rw-r--r--mm/slab.h2
2 files changed, 27 insertions, 1 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f989af87b72c..328233a724af 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2104,6 +2104,32 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2104 return 0; 2104 return 0;
2105} 2105}
2106 2106
2107unsigned long kmem_cache_flags(unsigned long object_size,
2108 unsigned long flags, const char *name,
2109 void (*ctor)(void *))
2110{
2111 return flags;
2112}
2113
2114struct kmem_cache *
2115__kmem_cache_alias(const char *name, size_t size, size_t align,
2116 unsigned long flags, void (*ctor)(void *))
2117{
2118 struct kmem_cache *cachep;
2119
2120 cachep = find_mergeable(size, align, flags, name, ctor);
2121 if (cachep) {
2122 cachep->refcount++;
2123
2124 /*
2125 * Adjust the object sizes so that we clear
2126 * the complete object on kzalloc.
2127 */
2128 cachep->object_size = max_t(int, cachep->object_size, size);
2129 }
2130 return cachep;
2131}
2132
2107/** 2133/**
2108 * __kmem_cache_create - Create a cache. 2134 * __kmem_cache_create - Create a cache.
2109 * @cachep: cache management descriptor 2135 * @cachep: cache management descriptor
diff --git a/mm/slab.h b/mm/slab.h
index c44d28b60609..50d29d716db4 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -92,7 +92,7 @@ struct mem_cgroup;
92int slab_unmergeable(struct kmem_cache *s); 92int slab_unmergeable(struct kmem_cache *s);
93struct kmem_cache *find_mergeable(size_t size, size_t align, 93struct kmem_cache *find_mergeable(size_t size, size_t align,
94 unsigned long flags, const char *name, void (*ctor)(void *)); 94 unsigned long flags, const char *name, void (*ctor)(void *));
95#ifdef CONFIG_SLUB 95#ifndef CONFIG_SLOB
96struct kmem_cache * 96struct kmem_cache *
97__kmem_cache_alias(const char *name, size_t size, size_t align, 97__kmem_cache_alias(const char *name, size_t size, size_t align,
98 unsigned long flags, void (*ctor)(void *)); 98 unsigned long flags, void (*ctor)(void *));