aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:22:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:13 -0500
commit2633d7a028239a738b793be5ca8fa6ac312f5793 (patch)
tree48a9f157b2c2a8218611aaade9667cacc2e018ec /mm/slab_common.c
parent6ccfb5bcf52bcf100fa085946f044fdbba015048 (diff)
slab/slub: consider a memcg parameter in kmem_create_cache
Allow a memcg parameter to be passed during cache creation. When the slub allocator is being used, it will only merge caches that belong to the same memcg. We'll do this by scanning the global list, and then translating the cache to a memcg-specific cache Default function is created as a wrapper, passing NULL to the memcg version. We only merge caches that belong to the same memcg. A helper is provided, memcg_css_id: because slub needs a unique cache name for sysfs. Since this is visible, but not the canonical location for slab data, the cache name is not used, the css_id should suffice. Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c42
1 files changed, 33 insertions, 9 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a8e76d79ee65..3031badcc577 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <linux/memcontrol.h>
21 22
22#include "slab.h" 23#include "slab.h"
23 24
@@ -27,7 +28,8 @@ DEFINE_MUTEX(slab_mutex);
27struct kmem_cache *kmem_cache; 28struct kmem_cache *kmem_cache;
28 29
29#ifdef CONFIG_DEBUG_VM 30#ifdef CONFIG_DEBUG_VM
30static int kmem_cache_sanity_check(const char *name, size_t size) 31static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
32 size_t size)
31{ 33{
32 struct kmem_cache *s = NULL; 34 struct kmem_cache *s = NULL;
33 35
@@ -53,7 +55,13 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
53 continue; 55 continue;
54 } 56 }
55 57
56 if (!strcmp(s->name, name)) { 58 /*
59 * For simplicity, we won't check this in the list of memcg
60 * caches. We have control over memcg naming, and if there
61 * aren't duplicates in the global list, there won't be any
62 * duplicates in the memcg lists as well.
63 */
64 if (!memcg && !strcmp(s->name, name)) {
57 pr_err("%s (%s): Cache name already exists.\n", 65 pr_err("%s (%s): Cache name already exists.\n",
58 __func__, name); 66 __func__, name);
59 dump_stack(); 67 dump_stack();
@@ -66,7 +74,8 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
66 return 0; 74 return 0;
67} 75}
68#else 76#else
69static inline int kmem_cache_sanity_check(const char *name, size_t size) 77static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
78 const char *name, size_t size)
70{ 79{
71 return 0; 80 return 0;
72} 81}
@@ -125,8 +134,9 @@ unsigned long calculate_alignment(unsigned long flags,
125 * as davem. 134 * as davem.
126 */ 135 */
127 136
128struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, 137struct kmem_cache *
129 unsigned long flags, void (*ctor)(void *)) 138kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
139 size_t align, unsigned long flags, void (*ctor)(void *))
130{ 140{
131 struct kmem_cache *s = NULL; 141 struct kmem_cache *s = NULL;
132 int err = 0; 142 int err = 0;
@@ -134,7 +144,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
134 get_online_cpus(); 144 get_online_cpus();
135 mutex_lock(&slab_mutex); 145 mutex_lock(&slab_mutex);
136 146
137 if (!kmem_cache_sanity_check(name, size) == 0) 147 if (!kmem_cache_sanity_check(memcg, name, size) == 0)
138 goto out_locked; 148 goto out_locked;
139 149
140 /* 150 /*
@@ -145,7 +155,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
145 */ 155 */
146 flags &= CACHE_CREATE_MASK; 156 flags &= CACHE_CREATE_MASK;
147 157
148 s = __kmem_cache_alias(name, size, align, flags, ctor); 158 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
149 if (s) 159 if (s)
150 goto out_locked; 160 goto out_locked;
151 161
@@ -154,6 +164,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
154 s->object_size = s->size = size; 164 s->object_size = s->size = size;
155 s->align = calculate_alignment(flags, align, size); 165 s->align = calculate_alignment(flags, align, size);
156 s->ctor = ctor; 166 s->ctor = ctor;
167
168 if (memcg_register_cache(memcg, s)) {
169 kmem_cache_free(kmem_cache, s);
170 err = -ENOMEM;
171 goto out_locked;
172 }
173
157 s->name = kstrdup(name, GFP_KERNEL); 174 s->name = kstrdup(name, GFP_KERNEL);
158 if (!s->name) { 175 if (!s->name) {
159 kmem_cache_free(kmem_cache, s); 176 kmem_cache_free(kmem_cache, s);
@@ -163,10 +180,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
163 180
164 err = __kmem_cache_create(s, flags); 181 err = __kmem_cache_create(s, flags);
165 if (!err) { 182 if (!err) {
166
167 s->refcount = 1; 183 s->refcount = 1;
168 list_add(&s->list, &slab_caches); 184 list_add(&s->list, &slab_caches);
169 185 memcg_cache_list_add(memcg, s);
170 } else { 186 } else {
171 kfree(s->name); 187 kfree(s->name);
172 kmem_cache_free(kmem_cache, s); 188 kmem_cache_free(kmem_cache, s);
@@ -194,6 +210,13 @@ out_locked:
194 210
195 return s; 211 return s;
196} 212}
213
214struct kmem_cache *
215kmem_cache_create(const char *name, size_t size, size_t align,
216 unsigned long flags, void (*ctor)(void *))
217{
218 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor);
219}
197EXPORT_SYMBOL(kmem_cache_create); 220EXPORT_SYMBOL(kmem_cache_create);
198 221
199void kmem_cache_destroy(struct kmem_cache *s) 222void kmem_cache_destroy(struct kmem_cache *s)
@@ -209,6 +232,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
209 if (s->flags & SLAB_DESTROY_BY_RCU) 232 if (s->flags & SLAB_DESTROY_BY_RCU)
210 rcu_barrier(); 233 rcu_barrier();
211 234
235 memcg_release_cache(s);
212 kfree(s->name); 236 kfree(s->name);
213 kmem_cache_free(kmem_cache, s); 237 kmem_cache_free(kmem_cache, s);
214 } else { 238 } else {