aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-06-04 19:10:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:08 -0400
commit776ed0f0377914d1e65fed903c052e9eef3f4cc3 (patch)
tree1ba937f01707b339acbed696d71a46157e523aa8 /mm/memcontrol.c
parent172cb4b3d49a1339dd67ee05e3f47972a70f556f (diff)
memcg: cleanup kmem cache creation/destruction functions naming
Current names are rather inconsistent. Let's try to improve them. Brief change log: ** old name ** ** new name ** kmem_cache_create_memcg memcg_create_kmem_cache memcg_kmem_create_cache memcg_regsiter_cache memcg_kmem_destroy_cache memcg_unregister_cache kmem_cache_destroy_memcg_children memcg_cleanup_cache_params mem_cgroup_destroy_all_caches memcg_unregister_all_caches create_work memcg_register_cache_work memcg_create_cache_work_func memcg_register_cache_func memcg_create_cache_enqueue memcg_schedule_register_cache Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c60
1 files changed, 28 insertions, 32 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5e2bfcc96da9..d176edb1d5e8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3132,8 +3132,8 @@ void memcg_free_cache_params(struct kmem_cache *s)
3132 kfree(s->memcg_params); 3132 kfree(s->memcg_params);
3133} 3133}
3134 3134
3135static void memcg_kmem_create_cache(struct mem_cgroup *memcg, 3135static void memcg_register_cache(struct mem_cgroup *memcg,
3136 struct kmem_cache *root_cache) 3136 struct kmem_cache *root_cache)
3137{ 3137{
3138 static char memcg_name_buf[NAME_MAX + 1]; /* protected by 3138 static char memcg_name_buf[NAME_MAX + 1]; /* protected by
3139 memcg_slab_mutex */ 3139 memcg_slab_mutex */
@@ -3153,7 +3153,7 @@ static void memcg_kmem_create_cache(struct mem_cgroup *memcg,
3153 return; 3153 return;
3154 3154
3155 cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1); 3155 cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
3156 cachep = kmem_cache_create_memcg(memcg, root_cache, memcg_name_buf); 3156 cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
3157 /* 3157 /*
3158 * If we could not create a memcg cache, do not complain, because 3158 * If we could not create a memcg cache, do not complain, because
3159 * that's not critical at all as we can always proceed with the root 3159 * that's not critical at all as we can always proceed with the root
@@ -3175,7 +3175,7 @@ static void memcg_kmem_create_cache(struct mem_cgroup *memcg,
3175 root_cache->memcg_params->memcg_caches[id] = cachep; 3175 root_cache->memcg_params->memcg_caches[id] = cachep;
3176} 3176}
3177 3177
3178static void memcg_kmem_destroy_cache(struct kmem_cache *cachep) 3178static void memcg_unregister_cache(struct kmem_cache *cachep)
3179{ 3179{
3180 struct kmem_cache *root_cache; 3180 struct kmem_cache *root_cache;
3181 struct mem_cgroup *memcg; 3181 struct mem_cgroup *memcg;
@@ -3228,7 +3228,7 @@ static inline void memcg_resume_kmem_account(void)
3228 current->memcg_kmem_skip_account--; 3228 current->memcg_kmem_skip_account--;
3229} 3229}
3230 3230
3231int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) 3231int __memcg_cleanup_cache_params(struct kmem_cache *s)
3232{ 3232{
3233 struct kmem_cache *c; 3233 struct kmem_cache *c;
3234 int i, failed = 0; 3234 int i, failed = 0;
@@ -3239,7 +3239,7 @@ int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3239 if (!c) 3239 if (!c)
3240 continue; 3240 continue;
3241 3241
3242 memcg_kmem_destroy_cache(c); 3242 memcg_unregister_cache(c);
3243 3243
3244 if (cache_from_memcg_idx(s, i)) 3244 if (cache_from_memcg_idx(s, i))
3245 failed++; 3245 failed++;
@@ -3248,7 +3248,7 @@ int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3248 return failed; 3248 return failed;
3249} 3249}
3250 3250
3251static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3251static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
3252{ 3252{
3253 struct kmem_cache *cachep; 3253 struct kmem_cache *cachep;
3254 struct memcg_cache_params *params, *tmp; 3254 struct memcg_cache_params *params, *tmp;
@@ -3261,25 +3261,26 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3261 cachep = memcg_params_to_cache(params); 3261 cachep = memcg_params_to_cache(params);
3262 kmem_cache_shrink(cachep); 3262 kmem_cache_shrink(cachep);
3263 if (atomic_read(&cachep->memcg_params->nr_pages) == 0) 3263 if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
3264 memcg_kmem_destroy_cache(cachep); 3264 memcg_unregister_cache(cachep);
3265 } 3265 }
3266 mutex_unlock(&memcg_slab_mutex); 3266 mutex_unlock(&memcg_slab_mutex);
3267} 3267}
3268 3268
3269struct create_work { 3269struct memcg_register_cache_work {
3270 struct mem_cgroup *memcg; 3270 struct mem_cgroup *memcg;
3271 struct kmem_cache *cachep; 3271 struct kmem_cache *cachep;
3272 struct work_struct work; 3272 struct work_struct work;
3273}; 3273};
3274 3274
3275static void memcg_create_cache_work_func(struct work_struct *w) 3275static void memcg_register_cache_func(struct work_struct *w)
3276{ 3276{
3277 struct create_work *cw = container_of(w, struct create_work, work); 3277 struct memcg_register_cache_work *cw =
3278 container_of(w, struct memcg_register_cache_work, work);
3278 struct mem_cgroup *memcg = cw->memcg; 3279 struct mem_cgroup *memcg = cw->memcg;
3279 struct kmem_cache *cachep = cw->cachep; 3280 struct kmem_cache *cachep = cw->cachep;
3280 3281
3281 mutex_lock(&memcg_slab_mutex); 3282 mutex_lock(&memcg_slab_mutex);
3282 memcg_kmem_create_cache(memcg, cachep); 3283 memcg_register_cache(memcg, cachep);
3283 mutex_unlock(&memcg_slab_mutex); 3284 mutex_unlock(&memcg_slab_mutex);
3284 3285
3285 css_put(&memcg->css); 3286 css_put(&memcg->css);
@@ -3289,12 +3290,12 @@ static void memcg_create_cache_work_func(struct work_struct *w)
3289/* 3290/*
3290 * Enqueue the creation of a per-memcg kmem_cache. 3291 * Enqueue the creation of a per-memcg kmem_cache.
3291 */ 3292 */
3292static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3293static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
3293 struct kmem_cache *cachep) 3294 struct kmem_cache *cachep)
3294{ 3295{
3295 struct create_work *cw; 3296 struct memcg_register_cache_work *cw;
3296 3297
3297 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); 3298 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
3298 if (cw == NULL) { 3299 if (cw == NULL) {
3299 css_put(&memcg->css); 3300 css_put(&memcg->css);
3300 return; 3301 return;
@@ -3303,17 +3304,17 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3303 cw->memcg = memcg; 3304 cw->memcg = memcg;
3304 cw->cachep = cachep; 3305 cw->cachep = cachep;
3305 3306
3306 INIT_WORK(&cw->work, memcg_create_cache_work_func); 3307 INIT_WORK(&cw->work, memcg_register_cache_func);
3307 schedule_work(&cw->work); 3308 schedule_work(&cw->work);
3308} 3309}
3309 3310
3310static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3311static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
3311 struct kmem_cache *cachep) 3312 struct kmem_cache *cachep)
3312{ 3313{
3313 /* 3314 /*
3314 * We need to stop accounting when we kmalloc, because if the 3315 * We need to stop accounting when we kmalloc, because if the
3315 * corresponding kmalloc cache is not yet created, the first allocation 3316 * corresponding kmalloc cache is not yet created, the first allocation
3316 * in __memcg_create_cache_enqueue will recurse. 3317 * in __memcg_schedule_register_cache will recurse.
3317 * 3318 *
3318 * However, it is better to enclose the whole function. Depending on 3319 * However, it is better to enclose the whole function. Depending on
3319 * the debugging options enabled, INIT_WORK(), for instance, can 3320 * the debugging options enabled, INIT_WORK(), for instance, can
@@ -3322,7 +3323,7 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3322 * the safest choice is to do it like this, wrapping the whole function. 3323 * the safest choice is to do it like this, wrapping the whole function.
3323 */ 3324 */
3324 memcg_stop_kmem_account(); 3325 memcg_stop_kmem_account();
3325 __memcg_create_cache_enqueue(memcg, cachep); 3326 __memcg_schedule_register_cache(memcg, cachep);
3326 memcg_resume_kmem_account(); 3327 memcg_resume_kmem_account();
3327} 3328}
3328 3329
@@ -3393,16 +3394,11 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3393 * 3394 *
3394 * However, there are some clashes that can arrive from locking. 3395 * However, there are some clashes that can arrive from locking.
3395 * For instance, because we acquire the slab_mutex while doing 3396 * For instance, because we acquire the slab_mutex while doing
3396 * kmem_cache_dup, this means no further allocation could happen 3397 * memcg_create_kmem_cache, this means no further allocation
3397 * with the slab_mutex held. 3398 * could happen with the slab_mutex held. So it's better to
3398 * 3399 * defer everything.
3399 * Also, because cache creation issue get_online_cpus(), this
3400 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3401 * that ends up reversed during cpu hotplug. (cpuset allocates
3402 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3403 * better to defer everything.
3404 */ 3400 */
3405 memcg_create_cache_enqueue(memcg, cachep); 3401 memcg_schedule_register_cache(memcg, cachep);
3406 return cachep; 3402 return cachep;
3407out: 3403out:
3408 rcu_read_unlock(); 3404 rcu_read_unlock();
@@ -3526,7 +3522,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
3526 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3522 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3527} 3523}
3528#else 3524#else
3529static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3525static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
3530{ 3526{
3531} 3527}
3532#endif /* CONFIG_MEMCG_KMEM */ 3528#endif /* CONFIG_MEMCG_KMEM */
@@ -6372,7 +6368,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6372 css_for_each_descendant_post(iter, css) 6368 css_for_each_descendant_post(iter, css)
6373 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); 6369 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6374 6370
6375 mem_cgroup_destroy_all_caches(memcg); 6371 memcg_unregister_all_caches(memcg);
6376 vmpressure_cleanup(&memcg->vmpressure); 6372 vmpressure_cleanup(&memcg->vmpressure);
6377} 6373}
6378 6374