diff options
author | Filipe Brandenburger <filbranden@google.com> | 2014-03-03 18:38:25 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-03-04 10:55:48 -0500 |
commit | 4fb1a86fb5e4209a7d4426d4e586c58e9edc74ac (patch) | |
tree | 3afeae40bb074a8670a83babc1ce76cf8def31fa /mm/memcontrol.c | |
parent | ce48225fe3b1b0d1fc9fceb96ac3d8a879e45114 (diff) |
memcg: reparent charges of children before processing parent
Sometimes the cleanup after memcg hierarchy testing gets stuck in
mem_cgroup_reparent_charges(), unable to bring non-kmem usage down to 0.
There may turn out to be several causes, but a major cause is this: the
workitem to offline parent can get run before workitem to offline child;
parent's mem_cgroup_reparent_charges() circles around waiting for the
child's pages to be reparented to its lrus, but it's holding
cgroup_mutex which prevents the child from reaching its
mem_cgroup_reparent_charges().
Further testing showed that an ordered workqueue for cgroup_destroy_wq
is not always good enough: percpu_ref_kill_and_confirm's call_rcu_sched
stage on the way can mess up the order before reaching the workqueue.
Instead, when offlining a memcg, call mem_cgroup_reparent_charges() on
all its children (and grandchildren, in the correct order) to have their
charges reparented first.
Fixes: e5fca243abae ("cgroup: use a dedicated workqueue for cgroup destruction")
Signed-off-by: Filipe Brandenburger <filbranden@google.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Tejun Heo <tj@kernel.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: <stable@vger.kernel.org> [v3.10+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9d17310be470..5b6b0039f725 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
6595 | { | 6595 | { |
6596 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 6596 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
6597 | struct mem_cgroup_event *event, *tmp; | 6597 | struct mem_cgroup_event *event, *tmp; |
6598 | struct cgroup_subsys_state *iter; | ||
6598 | 6599 | ||
6599 | /* | 6600 | /* |
6600 | * Unregister events and notify userspace. | 6601 | * Unregister events and notify userspace. |
@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
6611 | kmem_cgroup_css_offline(memcg); | 6612 | kmem_cgroup_css_offline(memcg); |
6612 | 6613 | ||
6613 | mem_cgroup_invalidate_reclaim_iterators(memcg); | 6614 | mem_cgroup_invalidate_reclaim_iterators(memcg); |
6614 | mem_cgroup_reparent_charges(memcg); | 6615 | |
6616 | /* | ||
6617 | * This requires that offlining is serialized. Right now that is | ||
6618 | * guaranteed because css_killed_work_fn() holds the cgroup_mutex. | ||
6619 | */ | ||
6620 | css_for_each_descendant_post(iter, css) | ||
6621 | mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); | ||
6622 | |||
6615 | mem_cgroup_destroy_all_caches(memcg); | 6623 | mem_cgroup_destroy_all_caches(memcg); |
6616 | vmpressure_cleanup(&memcg->vmpressure); | 6624 | vmpressure_cleanup(&memcg->vmpressure); |
6617 | } | 6625 | } |