aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:10 -0500
commit2a4db7eb9391a544ff58f4fa11d35246e87c87af (patch)
tree3bbd57297a8303ffa227d6ea5600d2593a0302f4
parentf1008365bbe4931d6a94dcfc11cf4cdada359664 (diff)
memcg: free memcg_caches slot on css offline
We need to look up a kmem_cache in ->memcg_params.memcg_caches arrays only on allocations, so there is no need to have the array entries set until css free - we can clear them on css offline. This will allow us to reuse array entries more efficiently and avoid costly array relocations. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/slab.h10
-rw-r--r--mm/memcontrol.c38
-rw-r--r--mm/slab_common.c39
3 files changed, 65 insertions, 22 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 26d99f41b410..ed2ffaab59ea 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -115,13 +115,12 @@ int slab_is_available(void);
115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long, 116 unsigned long,
117 void (*)(void *)); 117 void (*)(void *));
118#ifdef CONFIG_MEMCG_KMEM
119void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
120void memcg_destroy_kmem_caches(struct mem_cgroup *);
121#endif
122void kmem_cache_destroy(struct kmem_cache *); 118void kmem_cache_destroy(struct kmem_cache *);
123int kmem_cache_shrink(struct kmem_cache *); 119int kmem_cache_shrink(struct kmem_cache *);
124void kmem_cache_free(struct kmem_cache *, void *); 120
121void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
122void memcg_deactivate_kmem_caches(struct mem_cgroup *);
123void memcg_destroy_kmem_caches(struct mem_cgroup *);
125 124
126/* 125/*
127 * Please use this macro to create slab caches. Simply specify the 126 * Please use this macro to create slab caches. Simply specify the
@@ -288,6 +287,7 @@ static __always_inline int kmalloc_index(size_t size)
288 287
289void *__kmalloc(size_t size, gfp_t flags); 288void *__kmalloc(size_t size, gfp_t flags);
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 289void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
290void kmem_cache_free(struct kmem_cache *, void *);
291 291
292#ifdef CONFIG_NUMA 292#ifdef CONFIG_NUMA
293void *__kmalloc_node(size_t size, gfp_t flags, int node); 293void *__kmalloc_node(size_t size, gfp_t flags, int node);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6f3c0fcd7a2d..abfe0135bfdc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -334,6 +334,7 @@ struct mem_cgroup {
334#if defined(CONFIG_MEMCG_KMEM) 334#if defined(CONFIG_MEMCG_KMEM)
335 /* Index in the kmem_cache->memcg_params.memcg_caches array */ 335 /* Index in the kmem_cache->memcg_params.memcg_caches array */
336 int kmemcg_id; 336 int kmemcg_id;
337 bool kmem_acct_active;
337#endif 338#endif
338 339
339 int last_scanned_node; 340 int last_scanned_node;
@@ -354,7 +355,7 @@ struct mem_cgroup {
354#ifdef CONFIG_MEMCG_KMEM 355#ifdef CONFIG_MEMCG_KMEM
355bool memcg_kmem_is_active(struct mem_cgroup *memcg) 356bool memcg_kmem_is_active(struct mem_cgroup *memcg)
356{ 357{
357 return memcg->kmemcg_id >= 0; 358 return memcg->kmem_acct_active;
358} 359}
359#endif 360#endif
360 361
@@ -585,7 +586,7 @@ static void memcg_free_cache_id(int id);
585 586
586static void disarm_kmem_keys(struct mem_cgroup *memcg) 587static void disarm_kmem_keys(struct mem_cgroup *memcg)
587{ 588{
588 if (memcg_kmem_is_active(memcg)) { 589 if (memcg->kmemcg_id >= 0) {
589 static_key_slow_dec(&memcg_kmem_enabled_key); 590 static_key_slow_dec(&memcg_kmem_enabled_key);
590 memcg_free_cache_id(memcg->kmemcg_id); 591 memcg_free_cache_id(memcg->kmemcg_id);
591 } 592 }
@@ -2666,6 +2667,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2666{ 2667{
2667 struct mem_cgroup *memcg; 2668 struct mem_cgroup *memcg;
2668 struct kmem_cache *memcg_cachep; 2669 struct kmem_cache *memcg_cachep;
2670 int kmemcg_id;
2669 2671
2670 VM_BUG_ON(!is_root_cache(cachep)); 2672 VM_BUG_ON(!is_root_cache(cachep));
2671 2673
@@ -2673,10 +2675,11 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2673 return cachep; 2675 return cachep;
2674 2676
2675 memcg = get_mem_cgroup_from_mm(current->mm); 2677 memcg = get_mem_cgroup_from_mm(current->mm);
2676 if (!memcg_kmem_is_active(memcg)) 2678 kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
2679 if (kmemcg_id < 0)
2677 goto out; 2680 goto out;
2678 2681
2679 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); 2682 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2680 if (likely(memcg_cachep)) 2683 if (likely(memcg_cachep))
2681 return memcg_cachep; 2684 return memcg_cachep;
2682 2685
@@ -3318,8 +3321,8 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
3318 int err = 0; 3321 int err = 0;
3319 int memcg_id; 3322 int memcg_id;
3320 3323
3321 if (memcg_kmem_is_active(memcg)) 3324 BUG_ON(memcg->kmemcg_id >= 0);
3322 return 0; 3325 BUG_ON(memcg->kmem_acct_active);
3323 3326
3324 /* 3327 /*
3325 * For simplicity, we won't allow this to be disabled. It also can't 3328 * For simplicity, we won't allow this to be disabled. It also can't
@@ -3362,6 +3365,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
3362 * patched. 3365 * patched.
3363 */ 3366 */
3364 memcg->kmemcg_id = memcg_id; 3367 memcg->kmemcg_id = memcg_id;
3368 memcg->kmem_acct_active = true;
3365out: 3369out:
3366 return err; 3370 return err;
3367} 3371}
@@ -4041,6 +4045,22 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4041 return mem_cgroup_sockets_init(memcg, ss); 4045 return mem_cgroup_sockets_init(memcg, ss);
4042} 4046}
4043 4047
4048static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
4049{
4050 if (!memcg->kmem_acct_active)
4051 return;
4052
4053 /*
4054 * Clear the 'active' flag before clearing memcg_caches arrays entries.
4055 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
4056 * guarantees no cache will be created for this cgroup after we are
4057 * done (see memcg_create_kmem_cache()).
4058 */
4059 memcg->kmem_acct_active = false;
4060
4061 memcg_deactivate_kmem_caches(memcg);
4062}
4063
4044static void memcg_destroy_kmem(struct mem_cgroup *memcg) 4064static void memcg_destroy_kmem(struct mem_cgroup *memcg)
4045{ 4065{
4046 memcg_destroy_kmem_caches(memcg); 4066 memcg_destroy_kmem_caches(memcg);
@@ -4052,6 +4072,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4052 return 0; 4072 return 0;
4053} 4073}
4054 4074
4075static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
4076{
4077}
4078
4055static void memcg_destroy_kmem(struct mem_cgroup *memcg) 4079static void memcg_destroy_kmem(struct mem_cgroup *memcg)
4056{ 4080{
4057} 4081}
@@ -4608,6 +4632,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4608 spin_unlock(&memcg->event_list_lock); 4632 spin_unlock(&memcg->event_list_lock);
4609 4633
4610 vmpressure_cleanup(&memcg->vmpressure); 4634 vmpressure_cleanup(&memcg->vmpressure);
4635
4636 memcg_deactivate_kmem(memcg);
4611} 4637}
4612 4638
4613static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4639static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 6087b1f9a385..0873bcc61c7a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -440,18 +440,8 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
440 *need_rcu_barrier = true; 440 *need_rcu_barrier = true;
441 441
442#ifdef CONFIG_MEMCG_KMEM 442#ifdef CONFIG_MEMCG_KMEM
443 if (!is_root_cache(s)) { 443 if (!is_root_cache(s))
444 int idx;
445 struct memcg_cache_array *arr;
446
447 idx = memcg_cache_id(s->memcg_params.memcg);
448 arr = rcu_dereference_protected(s->memcg_params.root_cache->
449 memcg_params.memcg_caches,
450 lockdep_is_held(&slab_mutex));
451 BUG_ON(arr->entries[idx] != s);
452 arr->entries[idx] = NULL;
453 list_del(&s->memcg_params.list); 444 list_del(&s->memcg_params.list);
454 }
455#endif 445#endif
456 list_move(&s->list, release); 446 list_move(&s->list, release);
457 return 0; 447 return 0;
@@ -499,6 +489,13 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
499 489
500 mutex_lock(&slab_mutex); 490 mutex_lock(&slab_mutex);
501 491
492 /*
493 * The memory cgroup could have been deactivated while the cache
494 * creation work was pending.
495 */
496 if (!memcg_kmem_is_active(memcg))
497 goto out_unlock;
498
502 idx = memcg_cache_id(memcg); 499 idx = memcg_cache_id(memcg);
503 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, 500 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
504 lockdep_is_held(&slab_mutex)); 501 lockdep_is_held(&slab_mutex));
@@ -548,6 +545,26 @@ out_unlock:
548 put_online_cpus(); 545 put_online_cpus();
549} 546}
550 547
548void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
549{
550 int idx;
551 struct memcg_cache_array *arr;
552 struct kmem_cache *s;
553
554 idx = memcg_cache_id(memcg);
555
556 mutex_lock(&slab_mutex);
557 list_for_each_entry(s, &slab_caches, list) {
558 if (!is_root_cache(s))
559 continue;
560
561 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
562 lockdep_is_held(&slab_mutex));
563 arr->entries[idx] = NULL;
564 }
565 mutex_unlock(&slab_mutex);
566}
567
551void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) 568void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
552{ 569{
553 LIST_HEAD(release); 570 LIST_HEAD(release);