aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:09 -0500
commit426589f571f7d6d5ab2ca33ece73164149279ca1 (patch)
tree9b9c94fad5bd7bb8330d8c9b4c510479b75c5588
parentf7ce3190c4a35bf887adb7a1aa1ba899b679872d (diff)
slab: link memcg caches of the same kind into a list
Sometimes, we need to iterate over all memcg copies of a particular root kmem cache. Currently, we use memcg_cache_params->memcg_caches array for that, because it contains all existing memcg caches. However, it's a bad practice to keep all caches, including those that belong to offline cgroups, in this array, because it will be growing beyond any bounds then. I'm going to wipe away dead caches from it to save space. To still be able to perform iterations over all memcg caches of the same kind, let us link them into a list. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/slab.h4
-rw-r--r--mm/slab.c13
-rw-r--r--mm/slab.h17
-rw-r--r--mm/slab_common.c21
-rw-r--r--mm/slub.c19
5 files changed, 41 insertions, 33 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1e03c11bbfbd..26d99f41b410 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -491,9 +491,13 @@ struct memcg_cache_array {
491 * 491 *
492 * @memcg: pointer to the memcg this cache belongs to 492 * @memcg: pointer to the memcg this cache belongs to
493 * @root_cache: pointer to the global, root cache, this cache was derived from 493 * @root_cache: pointer to the global, root cache, this cache was derived from
494 *
495 * Both root and child caches of the same kind are linked into a list chained
496 * through @list.
494 */ 497 */
495struct memcg_cache_params { 498struct memcg_cache_params {
496 bool is_root_cache; 499 bool is_root_cache;
500 struct list_head list;
497 union { 501 union {
498 struct memcg_cache_array __rcu *memcg_caches; 502 struct memcg_cache_array __rcu *memcg_caches;
499 struct { 503 struct {
diff --git a/mm/slab.c b/mm/slab.c
index 65b5dcb6f671..7894017bc160 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3708,8 +3708,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3708 int batchcount, int shared, gfp_t gfp) 3708 int batchcount, int shared, gfp_t gfp)
3709{ 3709{
3710 int ret; 3710 int ret;
3711 struct kmem_cache *c = NULL; 3711 struct kmem_cache *c;
3712 int i = 0;
3713 3712
3714 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3713 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3715 3714
@@ -3719,12 +3718,10 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3719 if ((ret < 0) || !is_root_cache(cachep)) 3718 if ((ret < 0) || !is_root_cache(cachep))
3720 return ret; 3719 return ret;
3721 3720
3722 VM_BUG_ON(!mutex_is_locked(&slab_mutex)); 3721 lockdep_assert_held(&slab_mutex);
3723 for_each_memcg_cache_index(i) { 3722 for_each_memcg_cache(c, cachep) {
3724 c = cache_from_memcg_idx(cachep, i); 3723 /* return value determined by the root cache only */
3725 if (c) 3724 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3726 /* return value determined by the parent cache only */
3727 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3728 } 3725 }
3729 3726
3730 return ret; 3727 return ret;
diff --git a/mm/slab.h b/mm/slab.h
index 53a623f85931..0a56d76ac0e9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -163,6 +163,18 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
163 size_t count, loff_t *ppos); 163 size_t count, loff_t *ppos);
164 164
165#ifdef CONFIG_MEMCG_KMEM 165#ifdef CONFIG_MEMCG_KMEM
166/*
167 * Iterate over all memcg caches of the given root cache. The caller must hold
168 * slab_mutex.
169 */
170#define for_each_memcg_cache(iter, root) \
171 list_for_each_entry(iter, &(root)->memcg_params.list, \
172 memcg_params.list)
173
174#define for_each_memcg_cache_safe(iter, tmp, root) \
175 list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
176 memcg_params.list)
177
166static inline bool is_root_cache(struct kmem_cache *s) 178static inline bool is_root_cache(struct kmem_cache *s)
167{ 179{
168 return s->memcg_params.is_root_cache; 180 return s->memcg_params.is_root_cache;
@@ -241,6 +253,11 @@ extern void slab_init_memcg_params(struct kmem_cache *);
241 253
242#else /* !CONFIG_MEMCG_KMEM */ 254#else /* !CONFIG_MEMCG_KMEM */
243 255
256#define for_each_memcg_cache(iter, root) \
257 for ((void)(iter), (void)(root); 0; )
258#define for_each_memcg_cache_safe(iter, tmp, root) \
259 for ((void)(iter), (void)(tmp), (void)(root); 0; )
260
244static inline bool is_root_cache(struct kmem_cache *s) 261static inline bool is_root_cache(struct kmem_cache *s)
245{ 262{
246 return true; 263 return true;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7cc32cf126ef..989784bd88be 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -109,6 +109,7 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
109void slab_init_memcg_params(struct kmem_cache *s) 109void slab_init_memcg_params(struct kmem_cache *s)
110{ 110{
111 s->memcg_params.is_root_cache = true; 111 s->memcg_params.is_root_cache = true;
112 INIT_LIST_HEAD(&s->memcg_params.list);
112 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); 113 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
113} 114}
114 115
@@ -449,6 +450,7 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
449 lockdep_is_held(&slab_mutex)); 450 lockdep_is_held(&slab_mutex));
450 BUG_ON(arr->entries[idx] != s); 451 BUG_ON(arr->entries[idx] != s);
451 arr->entries[idx] = NULL; 452 arr->entries[idx] = NULL;
453 list_del(&s->memcg_params.list);
452 } 454 }
453#endif 455#endif
454 list_move(&s->list, release); 456 list_move(&s->list, release);
@@ -529,6 +531,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
529 goto out_unlock; 531 goto out_unlock;
530 } 532 }
531 533
534 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
535
532 /* 536 /*
533 * Since readers won't lock (see cache_from_memcg_idx()), we need a 537 * Since readers won't lock (see cache_from_memcg_idx()), we need a
534 * barrier here to ensure nobody will see the kmem_cache partially 538 * barrier here to ensure nobody will see the kmem_cache partially
@@ -581,11 +585,13 @@ void slab_kmem_cache_release(struct kmem_cache *s)
581 585
582void kmem_cache_destroy(struct kmem_cache *s) 586void kmem_cache_destroy(struct kmem_cache *s)
583{ 587{
584 int i; 588 struct kmem_cache *c, *c2;
585 LIST_HEAD(release); 589 LIST_HEAD(release);
586 bool need_rcu_barrier = false; 590 bool need_rcu_barrier = false;
587 bool busy = false; 591 bool busy = false;
588 592
593 BUG_ON(!is_root_cache(s));
594
589 get_online_cpus(); 595 get_online_cpus();
590 get_online_mems(); 596 get_online_mems();
591 597
@@ -595,10 +601,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
595 if (s->refcount) 601 if (s->refcount)
596 goto out_unlock; 602 goto out_unlock;
597 603
598 for_each_memcg_cache_index(i) { 604 for_each_memcg_cache_safe(c, c2, s) {
599 struct kmem_cache *c = cache_from_memcg_idx(s, i); 605 if (do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
600
601 if (c && do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
602 busy = true; 606 busy = true;
603 } 607 }
604 608
@@ -932,16 +936,11 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
932{ 936{
933 struct kmem_cache *c; 937 struct kmem_cache *c;
934 struct slabinfo sinfo; 938 struct slabinfo sinfo;
935 int i;
936 939
937 if (!is_root_cache(s)) 940 if (!is_root_cache(s))
938 return; 941 return;
939 942
940 for_each_memcg_cache_index(i) { 943 for_each_memcg_cache(c, s) {
941 c = cache_from_memcg_idx(s, i);
942 if (!c)
943 continue;
944
945 memset(&sinfo, 0, sizeof(sinfo)); 944 memset(&sinfo, 0, sizeof(sinfo));
946 get_slabinfo(c, &sinfo); 945 get_slabinfo(c, &sinfo);
947 946
diff --git a/mm/slub.c b/mm/slub.c
index 75d55fdfe3a1..1e5a4636cb23 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3636,13 +3636,10 @@ struct kmem_cache *
3636__kmem_cache_alias(const char *name, size_t size, size_t align, 3636__kmem_cache_alias(const char *name, size_t size, size_t align,
3637 unsigned long flags, void (*ctor)(void *)) 3637 unsigned long flags, void (*ctor)(void *))
3638{ 3638{
3639 struct kmem_cache *s; 3639 struct kmem_cache *s, *c;
3640 3640
3641 s = find_mergeable(size, align, flags, name, ctor); 3641 s = find_mergeable(size, align, flags, name, ctor);
3642 if (s) { 3642 if (s) {
3643 int i;
3644 struct kmem_cache *c;
3645
3646 s->refcount++; 3643 s->refcount++;
3647 3644
3648 /* 3645 /*
@@ -3652,10 +3649,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
3652 s->object_size = max(s->object_size, (int)size); 3649 s->object_size = max(s->object_size, (int)size);
3653 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3650 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3654 3651
3655 for_each_memcg_cache_index(i) { 3652 for_each_memcg_cache(c, s) {
3656 c = cache_from_memcg_idx(s, i);
3657 if (!c)
3658 continue;
3659 c->object_size = s->object_size; 3653 c->object_size = s->object_size;
3660 c->inuse = max_t(int, c->inuse, 3654 c->inuse = max_t(int, c->inuse,
3661 ALIGN(size, sizeof(void *))); 3655 ALIGN(size, sizeof(void *)));
@@ -4921,7 +4915,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
4921 err = attribute->store(s, buf, len); 4915 err = attribute->store(s, buf, len);
4922#ifdef CONFIG_MEMCG_KMEM 4916#ifdef CONFIG_MEMCG_KMEM
4923 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { 4917 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
4924 int i; 4918 struct kmem_cache *c;
4925 4919
4926 mutex_lock(&slab_mutex); 4920 mutex_lock(&slab_mutex);
4927 if (s->max_attr_size < len) 4921 if (s->max_attr_size < len)
@@ -4944,11 +4938,8 @@ static ssize_t slab_attr_store(struct kobject *kobj,
4944 * directly either failed or succeeded, in which case we loop 4938 * directly either failed or succeeded, in which case we loop
4945 * through the descendants with best-effort propagation. 4939 * through the descendants with best-effort propagation.
4946 */ 4940 */
4947 for_each_memcg_cache_index(i) { 4941 for_each_memcg_cache(c, s)
4948 struct kmem_cache *c = cache_from_memcg_idx(s, i); 4942 attribute->store(c, buf, len);
4949 if (c)
4950 attribute->store(c, buf, len);
4951 }
4952 mutex_unlock(&slab_mutex); 4943 mutex_unlock(&slab_mutex);
4953 } 4944 }
4954#endif 4945#endif