aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-04-07 18:39:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:36:12 -0400
commita44cb9449182fd7b25bf5f1cc38b7f19e0b96f6d (patch)
tree2edcdb2aed0a5c54f5397c943abc0a2bd41031b3 /mm
parentcf7bc58f6dd4fdbab22e2ec5f27fe59674f425bf (diff)
memcg, slab: never try to merge memcg caches
When a kmem cache is created (kmem_cache_create_memcg()), we first try to find a compatible cache that already exists and can handle requests from the new cache, i.e. has the same object size, alignment, ctor, etc. If there is such a cache, we do not create any new caches, instead we simply increment the refcount of the cache found and return it. Currently we do this procedure not only when creating root caches, but also for memcg caches. However, there is no point in that, because, as every memcg cache has exactly the same parameters as its parent and cache merging cannot be turned off in runtime (only on boot by passing "slub_nomerge"), the root caches of any two potentially mergeable memcg caches should be merged already, i.e. it must be the same root cache, and therefore we couldn't even get to the memcg cache creation, because it already exists. The only exception is boot caches - they are explicitly forbidden to be merged by setting their refcount to -1. There are currently only two of them - kmem_cache and kmem_cache_node, which are used in slab internals (I do not count kmalloc caches as their refcount is set to 1 immediately after creation). Since they are prevented from merging preliminary I guess we should avoid to merge their children too. So let's remove the useless code responsible for merging memcg caches. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.h21
-rw-r--r--mm/slab_common.c8
-rw-r--r--mm/slub.c19
3 files changed, 18 insertions, 30 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 8184a7cde272..3045316b7c9d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -55,12 +55,12 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
55struct mem_cgroup; 55struct mem_cgroup;
56#ifdef CONFIG_SLUB 56#ifdef CONFIG_SLUB
57struct kmem_cache * 57struct kmem_cache *
58__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 58__kmem_cache_alias(const char *name, size_t size, size_t align,
59 size_t align, unsigned long flags, void (*ctor)(void *)); 59 unsigned long flags, void (*ctor)(void *));
60#else 60#else
61static inline struct kmem_cache * 61static inline struct kmem_cache *
62__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 62__kmem_cache_alias(const char *name, size_t size, size_t align,
63 size_t align, unsigned long flags, void (*ctor)(void *)) 63 unsigned long flags, void (*ctor)(void *))
64{ return NULL; } 64{ return NULL; }
65#endif 65#endif
66 66
@@ -119,13 +119,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
119 return !s->memcg_params || s->memcg_params->is_root_cache; 119 return !s->memcg_params || s->memcg_params->is_root_cache;
120} 120}
121 121
122static inline bool cache_match_memcg(struct kmem_cache *cachep,
123 struct mem_cgroup *memcg)
124{
125 return (is_root_cache(cachep) && !memcg) ||
126 (cachep->memcg_params->memcg == memcg);
127}
128
129static inline void memcg_bind_pages(struct kmem_cache *s, int order) 122static inline void memcg_bind_pages(struct kmem_cache *s, int order)
130{ 123{
131 if (!is_root_cache(s)) 124 if (!is_root_cache(s))
@@ -204,12 +197,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
204 return true; 197 return true;
205} 198}
206 199
207static inline bool cache_match_memcg(struct kmem_cache *cachep,
208 struct mem_cgroup *memcg)
209{
210 return true;
211}
212
213static inline void memcg_bind_pages(struct kmem_cache *s, int order) 200static inline void memcg_bind_pages(struct kmem_cache *s, int order)
214{ 201{
215} 202}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1ec3c619ba04..e77b51eb7347 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -200,9 +200,11 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
200 */ 200 */
201 flags &= CACHE_CREATE_MASK; 201 flags &= CACHE_CREATE_MASK;
202 202
203 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); 203 if (!memcg) {
204 if (s) 204 s = __kmem_cache_alias(name, size, align, flags, ctor);
205 goto out_unlock; 205 if (s)
206 goto out_unlock;
207 }
206 208
207 err = -ENOMEM; 209 err = -ENOMEM;
208 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 210 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
diff --git a/mm/slub.c b/mm/slub.c
index 5b05e4fe9a1a..7d81afb27048 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3685,6 +3685,9 @@ static int slab_unmergeable(struct kmem_cache *s)
3685 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3685 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3686 return 1; 3686 return 1;
3687 3687
3688 if (!is_root_cache(s))
3689 return 1;
3690
3688 if (s->ctor) 3691 if (s->ctor)
3689 return 1; 3692 return 1;
3690 3693
@@ -3697,9 +3700,8 @@ static int slab_unmergeable(struct kmem_cache *s)
3697 return 0; 3700 return 0;
3698} 3701}
3699 3702
3700static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size, 3703static struct kmem_cache *find_mergeable(size_t size, size_t align,
3701 size_t align, unsigned long flags, const char *name, 3704 unsigned long flags, const char *name, void (*ctor)(void *))
3702 void (*ctor)(void *))
3703{ 3705{
3704 struct kmem_cache *s; 3706 struct kmem_cache *s;
3705 3707
@@ -3722,7 +3724,7 @@ static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
3722 continue; 3724 continue;
3723 3725
3724 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3726 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3725 continue; 3727 continue;
3726 /* 3728 /*
3727 * Check if alignment is compatible. 3729 * Check if alignment is compatible.
3728 * Courtesy of Adrian Drzewiecki 3730 * Courtesy of Adrian Drzewiecki
@@ -3733,21 +3735,18 @@ static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
3733 if (s->size - size >= sizeof(void *)) 3735 if (s->size - size >= sizeof(void *))
3734 continue; 3736 continue;
3735 3737
3736 if (!cache_match_memcg(s, memcg))
3737 continue;
3738
3739 return s; 3738 return s;
3740 } 3739 }
3741 return NULL; 3740 return NULL;
3742} 3741}
3743 3742
3744struct kmem_cache * 3743struct kmem_cache *
3745__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 3744__kmem_cache_alias(const char *name, size_t size, size_t align,
3746 size_t align, unsigned long flags, void (*ctor)(void *)) 3745 unsigned long flags, void (*ctor)(void *))
3747{ 3746{
3748 struct kmem_cache *s; 3747 struct kmem_cache *s;
3749 3748
3750 s = find_mergeable(memcg, size, align, flags, name, ctor); 3749 s = find_mergeable(size, align, flags, name, ctor);
3751 if (s) { 3750 if (s) {
3752 s->refcount++; 3751 s->refcount++;
3753 /* 3752 /*