aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:09 -0500
commit426589f571f7d6d5ab2ca33ece73164149279ca1 (patch)
tree9b9c94fad5bd7bb8330d8c9b4c510479b75c5588 /mm/slab_common.c
parentf7ce3190c4a35bf887adb7a1aa1ba899b679872d (diff)
slab: link memcg caches of the same kind into a list
Sometimes, we need to iterate over all memcg copies of a particular root kmem cache. Currently, we use memcg_cache_params->memcg_caches array for that, because it contains all existing memcg caches. However, it's a bad practice to keep all caches, including those that belong to offline cgroups, in this array, because it will be growing beyond any bounds then. I'm going to wipe away dead caches from it to save space. To still be able to perform iterations over all memcg caches of the same kind, let us link them into a list. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7cc32cf126ef..989784bd88be 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -109,6 +109,7 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
109void slab_init_memcg_params(struct kmem_cache *s) 109void slab_init_memcg_params(struct kmem_cache *s)
110{ 110{
111 s->memcg_params.is_root_cache = true; 111 s->memcg_params.is_root_cache = true;
112 INIT_LIST_HEAD(&s->memcg_params.list);
112 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); 113 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
113} 114}
114 115
@@ -449,6 +450,7 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
449 lockdep_is_held(&slab_mutex)); 450 lockdep_is_held(&slab_mutex));
450 BUG_ON(arr->entries[idx] != s); 451 BUG_ON(arr->entries[idx] != s);
451 arr->entries[idx] = NULL; 452 arr->entries[idx] = NULL;
453 list_del(&s->memcg_params.list);
452 } 454 }
453#endif 455#endif
454 list_move(&s->list, release); 456 list_move(&s->list, release);
@@ -529,6 +531,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
529 goto out_unlock; 531 goto out_unlock;
530 } 532 }
531 533
534 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
535
532 /* 536 /*
533 * Since readers won't lock (see cache_from_memcg_idx()), we need a 537 * Since readers won't lock (see cache_from_memcg_idx()), we need a
534 * barrier here to ensure nobody will see the kmem_cache partially 538 * barrier here to ensure nobody will see the kmem_cache partially
@@ -581,11 +585,13 @@ void slab_kmem_cache_release(struct kmem_cache *s)
581 585
582void kmem_cache_destroy(struct kmem_cache *s) 586void kmem_cache_destroy(struct kmem_cache *s)
583{ 587{
584 int i; 588 struct kmem_cache *c, *c2;
585 LIST_HEAD(release); 589 LIST_HEAD(release);
586 bool need_rcu_barrier = false; 590 bool need_rcu_barrier = false;
587 bool busy = false; 591 bool busy = false;
588 592
593 BUG_ON(!is_root_cache(s));
594
589 get_online_cpus(); 595 get_online_cpus();
590 get_online_mems(); 596 get_online_mems();
591 597
@@ -595,10 +601,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
595 if (s->refcount) 601 if (s->refcount)
596 goto out_unlock; 602 goto out_unlock;
597 603
598 for_each_memcg_cache_index(i) { 604 for_each_memcg_cache_safe(c, c2, s) {
599 struct kmem_cache *c = cache_from_memcg_idx(s, i); 605 if (do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
600
601 if (c && do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
602 busy = true; 606 busy = true;
603 } 607 }
604 608
@@ -932,16 +936,11 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
932{ 936{
933 struct kmem_cache *c; 937 struct kmem_cache *c;
934 struct slabinfo sinfo; 938 struct slabinfo sinfo;
935 int i;
936 939
937 if (!is_root_cache(s)) 940 if (!is_root_cache(s))
938 return; 941 return;
939 942
940 for_each_memcg_cache_index(i) { 943 for_each_memcg_cache(c, s) {
941 c = cache_from_memcg_idx(s, i);
942 if (!c)
943 continue;
944
945 memset(&sinfo, 0, sizeof(sinfo)); 944 memset(&sinfo, 0, sizeof(sinfo));
946 get_slabinfo(c, &sinfo); 945 get_slabinfo(c, &sinfo);
947 946