aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-04-07 18:39:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:36:12 -0400
commit794b1248be4e7e157f5535c3ee49168aa4643349 (patch)
tree93bfa09b76abff9ec2f9d154bb43971bcce2f574
parent5722d094ad2b56fa2c1cb3adaf40071a55bbf242 (diff)
memcg, slab: separate memcg vs root cache creation paths
Memcg-awareness turned kmem_cache_create() into a dirty interweaving of memcg-only and except-for-memcg calls. To clean this up, let's move the code responsible for memcg cache creation to a separate function. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/slab.h6
-rw-r--r--mm/memcontrol.c7
-rw-r--r--mm/slab_common.c187
4 files changed, 111 insertions, 95 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab7f02884983..02d3072841e9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -638,12 +638,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
638 return -1; 638 return -1;
639} 639}
640 640
641static inline char *memcg_create_cache_name(struct mem_cgroup *memcg,
642 struct kmem_cache *root_cache)
643{
644 return NULL;
645}
646
647static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, 641static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
648 struct kmem_cache *s, struct kmem_cache *root_cache) 642 struct kmem_cache *s, struct kmem_cache *root_cache)
649{ 643{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b5b2df60299e..3dd389aa91c7 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -115,9 +115,9 @@ int slab_is_available(void);
115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long, 116 unsigned long,
117 void (*)(void *)); 117 void (*)(void *));
118struct kmem_cache * 118#ifdef CONFIG_MEMCG_KMEM
119kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, 119void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
120 unsigned long, void (*)(void *), struct kmem_cache *); 120#endif
121void kmem_cache_destroy(struct kmem_cache *); 121void kmem_cache_destroy(struct kmem_cache *);
122int kmem_cache_shrink(struct kmem_cache *); 122int kmem_cache_shrink(struct kmem_cache *);
123void kmem_cache_free(struct kmem_cache *, void *); 123void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 32c7342df4bf..451523c3bd4e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3395,13 +3395,8 @@ static void memcg_create_cache_work_func(struct work_struct *w)
3395 struct create_work *cw = container_of(w, struct create_work, work); 3395 struct create_work *cw = container_of(w, struct create_work, work);
3396 struct mem_cgroup *memcg = cw->memcg; 3396 struct mem_cgroup *memcg = cw->memcg;
3397 struct kmem_cache *cachep = cw->cachep; 3397 struct kmem_cache *cachep = cw->cachep;
3398 struct kmem_cache *new;
3399 3398
3400 new = kmem_cache_create_memcg(memcg, cachep->name, 3399 kmem_cache_create_memcg(memcg, cachep);
3401 cachep->object_size, cachep->align,
3402 cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep);
3403 if (new)
3404 new->allocflags |= __GFP_KMEMCG;
3405 css_put(&memcg->css); 3400 css_put(&memcg->css);
3406 kfree(cw); 3401 kfree(cw);
3407} 3402}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 11857abf7057..ccc012f00126 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -29,8 +29,7 @@ DEFINE_MUTEX(slab_mutex);
29struct kmem_cache *kmem_cache; 29struct kmem_cache *kmem_cache;
30 30
31#ifdef CONFIG_DEBUG_VM 31#ifdef CONFIG_DEBUG_VM
32static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name, 32static int kmem_cache_sanity_check(const char *name, size_t size)
33 size_t size)
34{ 33{
35 struct kmem_cache *s = NULL; 34 struct kmem_cache *s = NULL;
36 35
@@ -57,13 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
57 } 56 }
58 57
59#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
60 /* 59 if (!strcmp(s->name, name)) {
61 * For simplicity, we won't check this in the list of memcg
62 * caches. We have control over memcg naming, and if there
63 * aren't duplicates in the global list, there won't be any
64 * duplicates in the memcg lists as well.
65 */
66 if (!memcg && !strcmp(s->name, name)) {
67 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
68 __func__, name); 61 __func__, name);
69 dump_stack(); 62 dump_stack();
@@ -77,8 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
77 return 0; 70 return 0;
78} 71}
79#else 72#else
80static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg, 73static inline int kmem_cache_sanity_check(const char *name, size_t size)
81 const char *name, size_t size)
82{ 74{
83 return 0; 75 return 0;
84} 76}
@@ -139,6 +131,46 @@ unsigned long calculate_alignment(unsigned long flags,
139 return ALIGN(align, sizeof(void *)); 131 return ALIGN(align, sizeof(void *));
140} 132}
141 133
134static struct kmem_cache *
135do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
136 unsigned long flags, void (*ctor)(void *),
137 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
138{
139 struct kmem_cache *s;
140 int err;
141
142 err = -ENOMEM;
143 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
144 if (!s)
145 goto out;
146
147 s->name = name;
148 s->object_size = object_size;
149 s->size = size;
150 s->align = align;
151 s->ctor = ctor;
152
153 err = memcg_alloc_cache_params(memcg, s, root_cache);
154 if (err)
155 goto out_free_cache;
156
157 err = __kmem_cache_create(s, flags);
158 if (err)
159 goto out_free_cache;
160
161 s->refcount = 1;
162 list_add(&s->list, &slab_caches);
163 memcg_register_cache(s);
164out:
165 if (err)
166 return ERR_PTR(err);
167 return s;
168
169out_free_cache:
170 memcg_free_cache_params(s);
171 kfree(s);
172 goto out;
173}
142 174
143/* 175/*
144 * kmem_cache_create - Create a cache. 176 * kmem_cache_create - Create a cache.
@@ -164,34 +196,21 @@ unsigned long calculate_alignment(unsigned long flags,
164 * cacheline. This can be beneficial if you're counting cycles as closely 196 * cacheline. This can be beneficial if you're counting cycles as closely
165 * as davem. 197 * as davem.
166 */ 198 */
167
168struct kmem_cache * 199struct kmem_cache *
169kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, 200kmem_cache_create(const char *name, size_t size, size_t align,
170 size_t align, unsigned long flags, void (*ctor)(void *), 201 unsigned long flags, void (*ctor)(void *))
171 struct kmem_cache *parent_cache)
172{ 202{
173 struct kmem_cache *s = NULL; 203 struct kmem_cache *s;
204 char *cache_name;
174 int err; 205 int err;
175 206
176 get_online_cpus(); 207 get_online_cpus();
177 mutex_lock(&slab_mutex); 208 mutex_lock(&slab_mutex);
178 209
179 err = kmem_cache_sanity_check(memcg, name, size); 210 err = kmem_cache_sanity_check(name, size);
180 if (err) 211 if (err)
181 goto out_unlock; 212 goto out_unlock;
182 213
183 if (memcg) {
184 /*
185 * Since per-memcg caches are created asynchronously on first
186 * allocation (see memcg_kmem_get_cache()), several threads can
187 * try to create the same cache, but only one of them may
188 * succeed. Therefore if we get here and see the cache has
189 * already been created, we silently return NULL.
190 */
191 if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
192 goto out_unlock;
193 }
194
195 /* 214 /*
196 * Some allocators will constraint the set of valid flags to a subset 215 * Some allocators will constraint the set of valid flags to a subset
197 * of all flags. We expect them to define CACHE_CREATE_MASK in this 216 * of all flags. We expect them to define CACHE_CREATE_MASK in this
@@ -200,55 +219,29 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
200 */ 219 */
201 flags &= CACHE_CREATE_MASK; 220 flags &= CACHE_CREATE_MASK;
202 221
203 if (!memcg) { 222 s = __kmem_cache_alias(name, size, align, flags, ctor);
204 s = __kmem_cache_alias(name, size, align, flags, ctor); 223 if (s)
205 if (s)
206 goto out_unlock;
207 }
208
209 err = -ENOMEM;
210 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
211 if (!s)
212 goto out_unlock; 224 goto out_unlock;
213 225
214 s->object_size = s->size = size; 226 cache_name = kstrdup(name, GFP_KERNEL);
215 s->align = calculate_alignment(flags, align, size); 227 if (!cache_name) {
216 s->ctor = ctor; 228 err = -ENOMEM;
217 229 goto out_unlock;
218 if (memcg) 230 }
219 s->name = memcg_create_cache_name(memcg, parent_cache);
220 else
221 s->name = kstrdup(name, GFP_KERNEL);
222 if (!s->name)
223 goto out_free_cache;
224
225 err = memcg_alloc_cache_params(memcg, s, parent_cache);
226 if (err)
227 goto out_free_cache;
228
229 err = __kmem_cache_create(s, flags);
230 if (err)
231 goto out_free_cache;
232 231
233 s->refcount = 1; 232 s = do_kmem_cache_create(cache_name, size, size,
234 list_add(&s->list, &slab_caches); 233 calculate_alignment(flags, align, size),
235 memcg_register_cache(s); 234 flags, ctor, NULL, NULL);
235 if (IS_ERR(s)) {
236 err = PTR_ERR(s);
237 kfree(cache_name);
238 }
236 239
237out_unlock: 240out_unlock:
238 mutex_unlock(&slab_mutex); 241 mutex_unlock(&slab_mutex);
239 put_online_cpus(); 242 put_online_cpus();
240 243
241 if (err) { 244 if (err) {
242 /*
243 * There is no point in flooding logs with warnings or
244 * especially crashing the system if we fail to create a cache
245 * for a memcg. In this case we will be accounting the memcg
246 * allocation to the root cgroup until we succeed to create its
247 * own cache, but it isn't that critical.
248 */
249 if (!memcg)
250 return NULL;
251
252 if (flags & SLAB_PANIC) 245 if (flags & SLAB_PANIC)
253 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 246 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
254 name, err); 247 name, err);
@@ -260,21 +253,55 @@ out_unlock:
260 return NULL; 253 return NULL;
261 } 254 }
262 return s; 255 return s;
263
264out_free_cache:
265 memcg_free_cache_params(s);
266 kfree(s->name);
267 kmem_cache_free(kmem_cache, s);
268 goto out_unlock;
269} 256}
257EXPORT_SYMBOL(kmem_cache_create);
270 258
271struct kmem_cache * 259#ifdef CONFIG_MEMCG_KMEM
272kmem_cache_create(const char *name, size_t size, size_t align, 260/*
273 unsigned long flags, void (*ctor)(void *)) 261 * kmem_cache_create_memcg - Create a cache for a memory cgroup.
262 * @memcg: The memory cgroup the new cache is for.
263 * @root_cache: The parent of the new cache.
264 *
265 * This function attempts to create a kmem cache that will serve allocation
266 * requests going from @memcg to @root_cache. The new cache inherits properties
267 * from its parent.
268 */
269void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache)
274{ 270{
275 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL); 271 struct kmem_cache *s;
272 char *cache_name;
273
274 get_online_cpus();
275 mutex_lock(&slab_mutex);
276
277 /*
278 * Since per-memcg caches are created asynchronously on first
279 * allocation (see memcg_kmem_get_cache()), several threads can try to
280 * create the same cache, but only one of them may succeed.
281 */
282 if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg)))
283 goto out_unlock;
284
285 cache_name = memcg_create_cache_name(memcg, root_cache);
286 if (!cache_name)
287 goto out_unlock;
288
289 s = do_kmem_cache_create(cache_name, root_cache->object_size,
290 root_cache->size, root_cache->align,
291 root_cache->flags, root_cache->ctor,
292 memcg, root_cache);
293 if (IS_ERR(s)) {
294 kfree(cache_name);
295 goto out_unlock;
296 }
297
298 s->allocflags |= __GFP_KMEMCG;
299
300out_unlock:
301 mutex_unlock(&slab_mutex);
302 put_online_cpus();
276} 303}
277EXPORT_SYMBOL(kmem_cache_create); 304#endif /* CONFIG_MEMCG_KMEM */
278 305
279void kmem_cache_destroy(struct kmem_cache *s) 306void kmem_cache_destroy(struct kmem_cache *s)
280{ 307{