aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:10 -0500
commit2788cf0c401c268b4819c5407493a8769b7007aa (patch)
tree863ea244d6908bd6e8149e6cd81270389a9426a8 /mm
parent3f97b163207c67a3b35931494ad3db1de66356f0 (diff)
memcg: reparent list_lrus and free kmemcg_id on css offline
Now, the only reason to keep kmemcg_id till css free is list_lru, which uses it to distribute elements between per-memcg lists. However, it can be easily sorted out - we only need to change kmemcg_id of an offline cgroup to its parent's id, making further list_lru_add()'s add elements to the parent's list, and then move all elements from the offline cgroup's list to the one of its parent. It will work, because a racing list_lru_del() does not need to know the list it is deleting the element from. It can decrement the wrong nr_items counter though, but the ongoing reparenting will fix it. After list_lru reparenting is done we are free to release kmemcg_id saving a valuable slot in a per-memcg array for new cgroups. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/list_lru.c46
-rw-r--r--mm/memcontrol.c39
2 files changed, 77 insertions, 8 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 8d9d168c6c38..909eca2c820e 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -100,7 +100,6 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
100 100
101 spin_lock(&nlru->lock); 101 spin_lock(&nlru->lock);
102 l = list_lru_from_kmem(nlru, item); 102 l = list_lru_from_kmem(nlru, item);
103 WARN_ON_ONCE(l->nr_items < 0);
104 if (list_empty(item)) { 103 if (list_empty(item)) {
105 list_add_tail(item, &l->list); 104 list_add_tail(item, &l->list);
106 l->nr_items++; 105 l->nr_items++;
@@ -123,7 +122,6 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
123 if (!list_empty(item)) { 122 if (!list_empty(item)) {
124 list_del_init(item); 123 list_del_init(item);
125 l->nr_items--; 124 l->nr_items--;
126 WARN_ON_ONCE(l->nr_items < 0);
127 spin_unlock(&nlru->lock); 125 spin_unlock(&nlru->lock);
128 return true; 126 return true;
129 } 127 }
@@ -156,7 +154,6 @@ static unsigned long __list_lru_count_one(struct list_lru *lru,
156 154
157 spin_lock(&nlru->lock); 155 spin_lock(&nlru->lock);
158 l = list_lru_from_memcg_idx(nlru, memcg_idx); 156 l = list_lru_from_memcg_idx(nlru, memcg_idx);
159 WARN_ON_ONCE(l->nr_items < 0);
160 count = l->nr_items; 157 count = l->nr_items;
161 spin_unlock(&nlru->lock); 158 spin_unlock(&nlru->lock);
162 159
@@ -458,6 +455,49 @@ fail:
458 memcg_cancel_update_list_lru(lru, old_size, new_size); 455 memcg_cancel_update_list_lru(lru, old_size, new_size);
459 goto out; 456 goto out;
460} 457}
458
459static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
460 int src_idx, int dst_idx)
461{
462 struct list_lru_one *src, *dst;
463
464 /*
465 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
466 * we have to use IRQ-safe primitives here to avoid deadlock.
467 */
468 spin_lock_irq(&nlru->lock);
469
470 src = list_lru_from_memcg_idx(nlru, src_idx);
471 dst = list_lru_from_memcg_idx(nlru, dst_idx);
472
473 list_splice_init(&src->list, &dst->list);
474 dst->nr_items += src->nr_items;
475 src->nr_items = 0;
476
477 spin_unlock_irq(&nlru->lock);
478}
479
480static void memcg_drain_list_lru(struct list_lru *lru,
481 int src_idx, int dst_idx)
482{
483 int i;
484
485 if (!list_lru_memcg_aware(lru))
486 return;
487
488 for (i = 0; i < nr_node_ids; i++)
489 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
490}
491
492void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
493{
494 struct list_lru *lru;
495
496 mutex_lock(&list_lrus_mutex);
497 list_for_each_entry(lru, &list_lrus, list)
498 memcg_drain_list_lru(lru, src_idx, dst_idx);
499 mutex_unlock(&list_lrus_mutex);
500}
461#else 501#else
462static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) 502static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
463{ 503{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index abfe0135bfdc..419c06b1794a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -334,6 +334,7 @@ struct mem_cgroup {
334#if defined(CONFIG_MEMCG_KMEM) 334#if defined(CONFIG_MEMCG_KMEM)
335 /* Index in the kmem_cache->memcg_params.memcg_caches array */ 335 /* Index in the kmem_cache->memcg_params.memcg_caches array */
336 int kmemcg_id; 336 int kmemcg_id;
337 bool kmem_acct_activated;
337 bool kmem_acct_active; 338 bool kmem_acct_active;
338#endif 339#endif
339 340
@@ -582,14 +583,10 @@ void memcg_put_cache_ids(void)
582struct static_key memcg_kmem_enabled_key; 583struct static_key memcg_kmem_enabled_key;
583EXPORT_SYMBOL(memcg_kmem_enabled_key); 584EXPORT_SYMBOL(memcg_kmem_enabled_key);
584 585
585static void memcg_free_cache_id(int id);
586
587static void disarm_kmem_keys(struct mem_cgroup *memcg) 586static void disarm_kmem_keys(struct mem_cgroup *memcg)
588{ 587{
589 if (memcg->kmemcg_id >= 0) { 588 if (memcg->kmem_acct_activated)
590 static_key_slow_dec(&memcg_kmem_enabled_key); 589 static_key_slow_dec(&memcg_kmem_enabled_key);
591 memcg_free_cache_id(memcg->kmemcg_id);
592 }
593 /* 590 /*
594 * This check can't live in kmem destruction function, 591 * This check can't live in kmem destruction function,
595 * since the charges will outlive the cgroup 592 * since the charges will outlive the cgroup
@@ -3322,6 +3319,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
3322 int memcg_id; 3319 int memcg_id;
3323 3320
3324 BUG_ON(memcg->kmemcg_id >= 0); 3321 BUG_ON(memcg->kmemcg_id >= 0);
3322 BUG_ON(memcg->kmem_acct_activated);
3325 BUG_ON(memcg->kmem_acct_active); 3323 BUG_ON(memcg->kmem_acct_active);
3326 3324
3327 /* 3325 /*
@@ -3365,6 +3363,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
3365 * patched. 3363 * patched.
3366 */ 3364 */
3367 memcg->kmemcg_id = memcg_id; 3365 memcg->kmemcg_id = memcg_id;
3366 memcg->kmem_acct_activated = true;
3368 memcg->kmem_acct_active = true; 3367 memcg->kmem_acct_active = true;
3369out: 3368out:
3370 return err; 3369 return err;
@@ -4047,6 +4046,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4047 4046
4048static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 4047static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
4049{ 4048{
4049 struct cgroup_subsys_state *css;
4050 struct mem_cgroup *parent, *child;
4051 int kmemcg_id;
4052
4050 if (!memcg->kmem_acct_active) 4053 if (!memcg->kmem_acct_active)
4051 return; 4054 return;
4052 4055
@@ -4059,6 +4062,32 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
4059 memcg->kmem_acct_active = false; 4062 memcg->kmem_acct_active = false;
4060 4063
4061 memcg_deactivate_kmem_caches(memcg); 4064 memcg_deactivate_kmem_caches(memcg);
4065
4066 kmemcg_id = memcg->kmemcg_id;
4067 BUG_ON(kmemcg_id < 0);
4068
4069 parent = parent_mem_cgroup(memcg);
4070 if (!parent)
4071 parent = root_mem_cgroup;
4072
4073 /*
4074 * Change kmemcg_id of this cgroup and all its descendants to the
4075 * parent's id, and then move all entries from this cgroup's list_lrus
4076 * to ones of the parent. After we have finished, all list_lrus
4077 * corresponding to this cgroup are guaranteed to remain empty. The
4078 * ordering is imposed by list_lru_node->lock taken by
4079 * memcg_drain_all_list_lrus().
4080 */
4081 css_for_each_descendant_pre(css, &memcg->css) {
4082 child = mem_cgroup_from_css(css);
4083 BUG_ON(child->kmemcg_id != kmemcg_id);
4084 child->kmemcg_id = parent->kmemcg_id;
4085 if (!memcg->use_hierarchy)
4086 break;
4087 }
4088 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
4089
4090 memcg_free_cache_id(kmemcg_id);
4062} 4091}
4063 4092
4064static void memcg_destroy_kmem(struct mem_cgroup *memcg) 4093static void memcg_destroy_kmem(struct mem_cgroup *memcg)