aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2013-03-28 03:48:14 -0400
committerTejun Heo <tj@kernel.org>2013-04-07 12:28:23 -0400
commitd9c10ddddc98db0a316243cd266c466875975a94 (patch)
tree534cfc1a15c32047b07d7879c0b4a87a5d2095fd /mm
parent1e2ccd1c0f67c3f958d6139de2496787b9a57182 (diff)
memcg: fix memcg_cache_name() to use cgroup_name()
As cgroup supports rename, it's unsafe to dereference dentry->d_name without proper vfs locks. Fix this by using cgroup_name() rather than dentry directly. Also open code memcg_cache_name because it is called only from kmem_cache_dup which frees the returned name right after kmem_cache_create_memcg makes a copy of it. Such a short-lived allocation doesn't make too much sense. So replace it by a static buffer as kmem_cache_dup is called with memcg_cache_mutex. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Michal Hocko <mhocko@suse.cz> Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c63
1 files changed, 32 insertions, 31 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53b8201b31eb..9715c0c491b0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3214,52 +3214,53 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3214 schedule_work(&cachep->memcg_params->destroy); 3214 schedule_work(&cachep->memcg_params->destroy);
3215} 3215}
3216 3216
3217static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) 3217/*
3218{ 3218 * This lock protects updaters, not readers. We want readers to be as fast as
3219 char *name; 3219 * they can, and they will either see NULL or a valid cache value. Our model
3220 struct dentry *dentry; 3220 * allow them to see NULL, in which case the root memcg will be selected.
3221 3221 *
3222 rcu_read_lock(); 3222 * We need this lock because multiple allocations to the same cache from a non
3223 dentry = rcu_dereference(memcg->css.cgroup->dentry); 3223 * will span more than one worker. Only one of them can create the cache.
3224 rcu_read_unlock(); 3224 */
3225 3225static DEFINE_MUTEX(memcg_cache_mutex);
3226 BUG_ON(dentry == NULL);
3227
3228 name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
3229 memcg_cache_id(memcg), dentry->d_name.name);
3230
3231 return name;
3232}
3233 3226
3227/*
3228 * Called with memcg_cache_mutex held
3229 */
3234static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, 3230static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
3235 struct kmem_cache *s) 3231 struct kmem_cache *s)
3236{ 3232{
3237 char *name;
3238 struct kmem_cache *new; 3233 struct kmem_cache *new;
3234 static char *tmp_name = NULL;
3239 3235
3240 name = memcg_cache_name(memcg, s); 3236 lockdep_assert_held(&memcg_cache_mutex);
3241 if (!name) 3237
3242 return NULL; 3238 /*
3239 * kmem_cache_create_memcg duplicates the given name and
3240 * cgroup_name for this name requires RCU context.
3241 * This static temporary buffer is used to prevent from
3242 * pointless shortliving allocation.
3243 */
3244 if (!tmp_name) {
3245 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
3246 if (!tmp_name)
3247 return NULL;
3248 }
3249
3250 rcu_read_lock();
3251 snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
3252 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
3253 rcu_read_unlock();
3243 3254
3244 new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, 3255 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
3245 (s->flags & ~SLAB_PANIC), s->ctor, s); 3256 (s->flags & ~SLAB_PANIC), s->ctor, s);
3246 3257
3247 if (new) 3258 if (new)
3248 new->allocflags |= __GFP_KMEMCG; 3259 new->allocflags |= __GFP_KMEMCG;
3249 3260
3250 kfree(name);
3251 return new; 3261 return new;
3252} 3262}
3253 3263
3254/*
3255 * This lock protects updaters, not readers. We want readers to be as fast as
3256 * they can, and they will either see NULL or a valid cache value. Our model
3257 * allow them to see NULL, in which case the root memcg will be selected.
3258 *
3259 * We need this lock because multiple allocations to the same cache from a non
3260 * will span more than one worker. Only one of them can create the cache.
3261 */
3262static DEFINE_MUTEX(memcg_cache_mutex);
3263static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3264static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3264 struct kmem_cache *cachep) 3265 struct kmem_cache *cachep)
3265{ 3266{