diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2014-01-23 18:52:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-23 19:36:51 -0500 |
commit | 1aa13254259bdef0bca723849ab3bab308d2f0c3 (patch) | |
tree | 4d2207788bf99f908ffed696f9d58c96eaf0b8b0 /mm | |
parent | 363a044f739b0f07a8c063b838c5528d10720e02 (diff) |
memcg, slab: clean up memcg cache initialization/destruction
Currently, we have rather a messy function set relating to per-memcg
kmem cache initialization/destruction.
Per-memcg caches are created in memcg_create_kmem_cache(). This
function calls kmem_cache_create_memcg() to allocate and initialize a
kmem cache and then "registers" the new cache in the
memcg_params::memcg_caches array of the parent cache.
During its work-flow, kmem_cache_create_memcg() executes the following
memcg-related functions:
- memcg_alloc_cache_params(), to initialize memcg_params of the newly
created cache;
- memcg_cache_list_add(), to add the new cache to the memcg_slab_caches
list.
On the other hand, kmem_cache_destroy() called on a cache destruction
only calls memcg_release_cache(), which does all the work: it cleans the
reference to the cache in its parent's memcg_params::memcg_caches,
removes the cache from the memcg_slab_caches list, and frees
memcg_params.
Such an inconsistency between destruction and initialization paths make
the code difficult to read, so let's clean this up a bit.
This patch moves all the code relating to registration of per-memcg
caches (adding to memcg list, setting the pointer to a cache from its
parent) to the newly created memcg_register_cache() and
memcg_unregister_cache() functions making the initialization and
destruction paths look symmetrical.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 64 | ||||
-rw-r--r-- | mm/slab_common.c | 5 |
2 files changed, 33 insertions, 36 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b8ebe71f872d..739383cd3f70 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3095,16 +3095,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) | |||
3095 | css_put(&memcg->css); | 3095 | css_put(&memcg->css); |
3096 | } | 3096 | } |
3097 | 3097 | ||
3098 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) | ||
3099 | { | ||
3100 | if (!memcg) | ||
3101 | return; | ||
3102 | |||
3103 | mutex_lock(&memcg->slab_caches_mutex); | ||
3104 | list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); | ||
3105 | mutex_unlock(&memcg->slab_caches_mutex); | ||
3106 | } | ||
3107 | |||
3108 | /* | 3098 | /* |
3109 | * helper for acessing a memcg's index. It will be used as an index in the | 3099 | * helper for acessing a memcg's index. It will be used as an index in the |
3110 | * child cache array in kmem_cache, and also to derive its name. This function | 3100 | * child cache array in kmem_cache, and also to derive its name. This function |
@@ -3265,21 +3255,41 @@ void memcg_free_cache_params(struct kmem_cache *s) | |||
3265 | kfree(s->memcg_params); | 3255 | kfree(s->memcg_params); |
3266 | } | 3256 | } |
3267 | 3257 | ||
3268 | void memcg_release_cache(struct kmem_cache *s) | 3258 | void memcg_register_cache(struct kmem_cache *s) |
3269 | { | 3259 | { |
3270 | struct kmem_cache *root; | 3260 | struct kmem_cache *root; |
3271 | struct mem_cgroup *memcg; | 3261 | struct mem_cgroup *memcg; |
3272 | int id; | 3262 | int id; |
3273 | 3263 | ||
3264 | if (is_root_cache(s)) | ||
3265 | return; | ||
3266 | |||
3267 | root = s->memcg_params->root_cache; | ||
3268 | memcg = s->memcg_params->memcg; | ||
3269 | id = memcg_cache_id(memcg); | ||
3270 | |||
3271 | css_get(&memcg->css); | ||
3272 | |||
3273 | mutex_lock(&memcg->slab_caches_mutex); | ||
3274 | list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); | ||
3275 | mutex_unlock(&memcg->slab_caches_mutex); | ||
3276 | |||
3277 | root->memcg_params->memcg_caches[id] = s; | ||
3274 | /* | 3278 | /* |
3275 | * This happens, for instance, when a root cache goes away before we | 3279 | * the readers won't lock, make sure everybody sees the updated value, |
3276 | * add any memcg. | 3280 | * so they won't put stuff in the queue again for no reason |
3277 | */ | 3281 | */ |
3278 | if (!s->memcg_params) | 3282 | wmb(); |
3279 | return; | 3283 | } |
3280 | 3284 | ||
3281 | if (s->memcg_params->is_root_cache) | 3285 | void memcg_unregister_cache(struct kmem_cache *s) |
3282 | goto out; | 3286 | { |
3287 | struct kmem_cache *root; | ||
3288 | struct mem_cgroup *memcg; | ||
3289 | int id; | ||
3290 | |||
3291 | if (is_root_cache(s)) | ||
3292 | return; | ||
3283 | 3293 | ||
3284 | memcg = s->memcg_params->memcg; | 3294 | memcg = s->memcg_params->memcg; |
3285 | id = memcg_cache_id(memcg); | 3295 | id = memcg_cache_id(memcg); |
@@ -3292,8 +3302,6 @@ void memcg_release_cache(struct kmem_cache *s) | |||
3292 | mutex_unlock(&memcg->slab_caches_mutex); | 3302 | mutex_unlock(&memcg->slab_caches_mutex); |
3293 | 3303 | ||
3294 | css_put(&memcg->css); | 3304 | css_put(&memcg->css); |
3295 | out: | ||
3296 | memcg_free_cache_params(s); | ||
3297 | } | 3305 | } |
3298 | 3306 | ||
3299 | /* | 3307 | /* |
@@ -3451,26 +3459,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
3451 | 3459 | ||
3452 | mutex_lock(&memcg_cache_mutex); | 3460 | mutex_lock(&memcg_cache_mutex); |
3453 | new_cachep = cache_from_memcg_idx(cachep, idx); | 3461 | new_cachep = cache_from_memcg_idx(cachep, idx); |
3454 | if (new_cachep) { | 3462 | if (new_cachep) |
3455 | css_put(&memcg->css); | ||
3456 | goto out; | 3463 | goto out; |
3457 | } | ||
3458 | 3464 | ||
3459 | new_cachep = kmem_cache_dup(memcg, cachep); | 3465 | new_cachep = kmem_cache_dup(memcg, cachep); |
3460 | if (new_cachep == NULL) { | 3466 | if (new_cachep == NULL) |
3461 | new_cachep = cachep; | 3467 | new_cachep = cachep; |
3462 | css_put(&memcg->css); | ||
3463 | goto out; | ||
3464 | } | ||
3465 | |||
3466 | atomic_set(&new_cachep->memcg_params->nr_pages , 0); | ||
3467 | 3468 | ||
3468 | cachep->memcg_params->memcg_caches[idx] = new_cachep; | ||
3469 | /* | ||
3470 | * the readers won't lock, make sure everybody sees the updated value, | ||
3471 | * so they won't put stuff in the queue again for no reason | ||
3472 | */ | ||
3473 | wmb(); | ||
3474 | out: | 3469 | out: |
3475 | mutex_unlock(&memcg_cache_mutex); | 3470 | mutex_unlock(&memcg_cache_mutex); |
3476 | return new_cachep; | 3471 | return new_cachep; |
@@ -3550,6 +3545,7 @@ static void memcg_create_cache_work_func(struct work_struct *w) | |||
3550 | 3545 | ||
3551 | cw = container_of(w, struct create_work, work); | 3546 | cw = container_of(w, struct create_work, work); |
3552 | memcg_create_kmem_cache(cw->memcg, cw->cachep); | 3547 | memcg_create_kmem_cache(cw->memcg, cw->cachep); |
3548 | css_put(&cw->memcg->css); | ||
3553 | kfree(cw); | 3549 | kfree(cw); |
3554 | } | 3550 | } |
3555 | 3551 | ||
diff --git a/mm/slab_common.c b/mm/slab_common.c index 70f9e249ac30..db24ec48b946 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -215,7 +215,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, | |||
215 | 215 | ||
216 | s->refcount = 1; | 216 | s->refcount = 1; |
217 | list_add(&s->list, &slab_caches); | 217 | list_add(&s->list, &slab_caches); |
218 | memcg_cache_list_add(memcg, s); | 218 | memcg_register_cache(s); |
219 | 219 | ||
220 | out_unlock: | 220 | out_unlock: |
221 | mutex_unlock(&slab_mutex); | 221 | mutex_unlock(&slab_mutex); |
@@ -265,7 +265,8 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
265 | if (s->flags & SLAB_DESTROY_BY_RCU) | 265 | if (s->flags & SLAB_DESTROY_BY_RCU) |
266 | rcu_barrier(); | 266 | rcu_barrier(); |
267 | 267 | ||
268 | memcg_release_cache(s); | 268 | memcg_unregister_cache(s); |
269 | memcg_free_cache_params(s); | ||
269 | kfree(s->name); | 270 | kfree(s->name); |
270 | kmem_cache_free(kmem_cache, s); | 271 | kmem_cache_free(kmem_cache, s); |
271 | } else { | 272 | } else { |