aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorQiang Huang <h.huangqiang@huawei.com>2013-11-12 18:08:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 22:09:10 -0500
commit7a67d7abcc8da30a16ed64c3909d3fea004bde93 (patch)
treea325a2f97b5ef0b312230e2c03d5478446afe8ef /mm/memcontrol.c
parent2ade4de871172b17dd81b336cf0488a83885ffde (diff)
memcg, kmem: use cache_from_memcg_idx instead of hard code
Signed-off-by: Qiang Huang <h.huangqiang@huawei.com> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3d28d5a61efd..3d4bb07c7679 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2956,7 +2956,7 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2956 2956
2957 VM_BUG_ON(p->is_root_cache); 2957 VM_BUG_ON(p->is_root_cache);
2958 cachep = p->root_cache; 2958 cachep = p->root_cache;
2959 return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; 2959 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2960} 2960}
2961 2961
2962#ifdef CONFIG_SLABINFO 2962#ifdef CONFIG_SLABINFO
@@ -3393,7 +3393,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3393 idx = memcg_cache_id(memcg); 3393 idx = memcg_cache_id(memcg);
3394 3394
3395 mutex_lock(&memcg_cache_mutex); 3395 mutex_lock(&memcg_cache_mutex);
3396 new_cachep = cachep->memcg_params->memcg_caches[idx]; 3396 new_cachep = cache_from_memcg_idx(cachep, idx);
3397 if (new_cachep) { 3397 if (new_cachep) {
3398 css_put(&memcg->css); 3398 css_put(&memcg->css);
3399 goto out; 3399 goto out;
@@ -3439,8 +3439,8 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3439 * we'll take the set_limit_mutex to protect ourselves against this. 3439 * we'll take the set_limit_mutex to protect ourselves against this.
3440 */ 3440 */
3441 mutex_lock(&set_limit_mutex); 3441 mutex_lock(&set_limit_mutex);
3442 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3442 for_each_memcg_cache_index(i) {
3443 c = s->memcg_params->memcg_caches[i]; 3443 c = cache_from_memcg_idx(s, i);
3444 if (!c) 3444 if (!c)
3445 continue; 3445 continue;
3446 3446
@@ -3573,8 +3573,8 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3573 * code updating memcg_caches will issue a write barrier to match this. 3573 * code updating memcg_caches will issue a write barrier to match this.
3574 */ 3574 */
3575 read_barrier_depends(); 3575 read_barrier_depends();
3576 if (likely(cachep->memcg_params->memcg_caches[idx])) { 3576 if (likely(cache_from_memcg_idx(cachep, idx))) {
3577 cachep = cachep->memcg_params->memcg_caches[idx]; 3577 cachep = cache_from_memcg_idx(cachep, idx);
3578 goto out; 3578 goto out;
3579 } 3579 }
3580 3580