aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-10-09 18:28:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:59 -0400
commitf3bb3043a092368a255bca5d1c6f4352c96a3b2d (patch)
tree94509d92d4c0feb55db1548834e417ac43ebc81e /mm/memcontrol.c
parent33a690c45b202e4c6483bfd1d93ad8d0f51df2ca (diff)
memcg: don't call memcg_update_all_caches if new cache id fits
memcg_update_all_caches grows arrays of per-memcg caches, so we only need to call it when memcg_limited_groups_array_size is increased. However, currently we invoke it each time a new kmem-active memory cgroup is created. Then it just iterates over all slab_caches and does nothing (memcg_update_cache_size returns immediately). This patch fixes this insanity. In the meantime it moves the code dealing with id allocations to separate functions, memcg_alloc_cache_id and memcg_free_cache_id. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Glauber Costa <glommer@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c136
1 files changed, 72 insertions, 64 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 865e87c014d6..ef4fbc5e4ca3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -649,11 +649,13 @@ int memcg_limited_groups_array_size;
649struct static_key memcg_kmem_enabled_key; 649struct static_key memcg_kmem_enabled_key;
650EXPORT_SYMBOL(memcg_kmem_enabled_key); 650EXPORT_SYMBOL(memcg_kmem_enabled_key);
651 651
652static void memcg_free_cache_id(int id);
653
652static void disarm_kmem_keys(struct mem_cgroup *memcg) 654static void disarm_kmem_keys(struct mem_cgroup *memcg)
653{ 655{
654 if (memcg_kmem_is_active(memcg)) { 656 if (memcg_kmem_is_active(memcg)) {
655 static_key_slow_dec(&memcg_kmem_enabled_key); 657 static_key_slow_dec(&memcg_kmem_enabled_key);
656 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id); 658 memcg_free_cache_id(memcg->kmemcg_id);
657 } 659 }
658 /* 660 /*
659 * This check can't live in kmem destruction function, 661 * This check can't live in kmem destruction function,
@@ -2906,19 +2908,44 @@ int memcg_cache_id(struct mem_cgroup *memcg)
2906 return memcg ? memcg->kmemcg_id : -1; 2908 return memcg ? memcg->kmemcg_id : -1;
2907} 2909}
2908 2910
2909static size_t memcg_caches_array_size(int num_groups) 2911static int memcg_alloc_cache_id(void)
2910{ 2912{
2911 ssize_t size; 2913 int id, size;
2912 if (num_groups <= 0) 2914 int err;
2913 return 0; 2915
2916 id = ida_simple_get(&kmem_limited_groups,
2917 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2918 if (id < 0)
2919 return id;
2914 2920
2915 size = 2 * num_groups; 2921 if (id < memcg_limited_groups_array_size)
2922 return id;
2923
2924 /*
2925 * There's no space for the new id in memcg_caches arrays,
2926 * so we have to grow them.
2927 */
2928
2929 size = 2 * (id + 1);
2916 if (size < MEMCG_CACHES_MIN_SIZE) 2930 if (size < MEMCG_CACHES_MIN_SIZE)
2917 size = MEMCG_CACHES_MIN_SIZE; 2931 size = MEMCG_CACHES_MIN_SIZE;
2918 else if (size > MEMCG_CACHES_MAX_SIZE) 2932 else if (size > MEMCG_CACHES_MAX_SIZE)
2919 size = MEMCG_CACHES_MAX_SIZE; 2933 size = MEMCG_CACHES_MAX_SIZE;
2920 2934
2921 return size; 2935 mutex_lock(&memcg_slab_mutex);
2936 err = memcg_update_all_caches(size);
2937 mutex_unlock(&memcg_slab_mutex);
2938
2939 if (err) {
2940 ida_simple_remove(&kmem_limited_groups, id);
2941 return err;
2942 }
2943 return id;
2944}
2945
2946static void memcg_free_cache_id(int id)
2947{
2948 ida_simple_remove(&kmem_limited_groups, id);
2922} 2949}
2923 2950
2924/* 2951/*
@@ -2928,59 +2955,55 @@ static size_t memcg_caches_array_size(int num_groups)
2928 */ 2955 */
2929void memcg_update_array_size(int num) 2956void memcg_update_array_size(int num)
2930{ 2957{
2931 if (num > memcg_limited_groups_array_size) 2958 memcg_limited_groups_array_size = num;
2932 memcg_limited_groups_array_size = memcg_caches_array_size(num);
2933} 2959}
2934 2960
2935int memcg_update_cache_size(struct kmem_cache *s, int num_groups) 2961int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
2936{ 2962{
2937 struct memcg_cache_params *cur_params = s->memcg_params; 2963 struct memcg_cache_params *cur_params = s->memcg_params;
2964 struct memcg_cache_params *new_params;
2965 size_t size;
2966 int i;
2938 2967
2939 VM_BUG_ON(!is_root_cache(s)); 2968 VM_BUG_ON(!is_root_cache(s));
2940 2969
2941 if (num_groups > memcg_limited_groups_array_size) { 2970 size = num_groups * sizeof(void *);
2942 int i; 2971 size += offsetof(struct memcg_cache_params, memcg_caches);
2943 struct memcg_cache_params *new_params;
2944 ssize_t size = memcg_caches_array_size(num_groups);
2945 2972
2946 size *= sizeof(void *); 2973 new_params = kzalloc(size, GFP_KERNEL);
2947 size += offsetof(struct memcg_cache_params, memcg_caches); 2974 if (!new_params)
2948 2975 return -ENOMEM;
2949 new_params = kzalloc(size, GFP_KERNEL);
2950 if (!new_params)
2951 return -ENOMEM;
2952
2953 new_params->is_root_cache = true;
2954 2976
2955 /* 2977 new_params->is_root_cache = true;
2956 * There is the chance it will be bigger than
2957 * memcg_limited_groups_array_size, if we failed an allocation
2958 * in a cache, in which case all caches updated before it, will
2959 * have a bigger array.
2960 *
2961 * But if that is the case, the data after
2962 * memcg_limited_groups_array_size is certainly unused
2963 */
2964 for (i = 0; i < memcg_limited_groups_array_size; i++) {
2965 if (!cur_params->memcg_caches[i])
2966 continue;
2967 new_params->memcg_caches[i] =
2968 cur_params->memcg_caches[i];
2969 }
2970 2978
2971 /* 2979 /*
2972 * Ideally, we would wait until all caches succeed, and only 2980 * There is the chance it will be bigger than
2973 * then free the old one. But this is not worth the extra 2981 * memcg_limited_groups_array_size, if we failed an allocation
2974 * pointer per-cache we'd have to have for this. 2982 * in a cache, in which case all caches updated before it, will
2975 * 2983 * have a bigger array.
2976 * It is not a big deal if some caches are left with a size 2984 *
2977 * bigger than the others. And all updates will reset this 2985 * But if that is the case, the data after
2978 * anyway. 2986 * memcg_limited_groups_array_size is certainly unused
2979 */ 2987 */
2980 rcu_assign_pointer(s->memcg_params, new_params); 2988 for (i = 0; i < memcg_limited_groups_array_size; i++) {
2981 if (cur_params) 2989 if (!cur_params->memcg_caches[i])
2982 kfree_rcu(cur_params, rcu_head); 2990 continue;
2991 new_params->memcg_caches[i] =
2992 cur_params->memcg_caches[i];
2983 } 2993 }
2994
2995 /*
2996 * Ideally, we would wait until all caches succeed, and only
2997 * then free the old one. But this is not worth the extra
2998 * pointer per-cache we'd have to have for this.
2999 *
3000 * It is not a big deal if some caches are left with a size
3001 * bigger than the others. And all updates will reset this
3002 * anyway.
3003 */
3004 rcu_assign_pointer(s->memcg_params, new_params);
3005 if (cur_params)
3006 kfree_rcu(cur_params, rcu_head);
2984 return 0; 3007 return 0;
2985} 3008}
2986 3009
@@ -4181,23 +4204,12 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg,
4181 if (err) 4204 if (err)
4182 goto out; 4205 goto out;
4183 4206
4184 memcg_id = ida_simple_get(&kmem_limited_groups, 4207 memcg_id = memcg_alloc_cache_id();
4185 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
4186 if (memcg_id < 0) { 4208 if (memcg_id < 0) {
4187 err = memcg_id; 4209 err = memcg_id;
4188 goto out; 4210 goto out;
4189 } 4211 }
4190 4212
4191 /*
4192 * Make sure we have enough space for this cgroup in each root cache's
4193 * memcg_params.
4194 */
4195 mutex_lock(&memcg_slab_mutex);
4196 err = memcg_update_all_caches(memcg_id + 1);
4197 mutex_unlock(&memcg_slab_mutex);
4198 if (err)
4199 goto out_rmid;
4200
4201 memcg->kmemcg_id = memcg_id; 4213 memcg->kmemcg_id = memcg_id;
4202 INIT_LIST_HEAD(&memcg->memcg_slab_caches); 4214 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
4203 4215
@@ -4218,10 +4230,6 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg,
4218out: 4230out:
4219 memcg_resume_kmem_account(); 4231 memcg_resume_kmem_account();
4220 return err; 4232 return err;
4221
4222out_rmid:
4223 ida_simple_remove(&kmem_limited_groups, memcg_id);
4224 goto out;
4225} 4233}
4226 4234
4227static int memcg_activate_kmem(struct mem_cgroup *memcg, 4235static int memcg_activate_kmem(struct mem_cgroup *memcg,