diff options
author | Glauber Costa <glommer@parallels.com> | 2012-05-29 18:07:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:28 -0400 |
commit | 3afe36b1fe7d1e3f66752bb9548a763942f3a104 (patch) | |
tree | 7cb138ee742517f2924c1cbc1ae18c4d6afab12b /mm/memcontrol.c | |
parent | fa9add641b1b1c564db916accac1db346e7a2759 (diff) |
memcg: always free struct memcg through schedule_work()
Right now we free struct memcg with kfree right after a rcu grace period,
but defer it if we need to use vfree() to get rid of that memory area. We
do that by need, because we need vfree to be called in a process context.
This patch unifies this behavior, by ensuring that even kfree will happen
in a separate thread. The goal is to have a stable place to call the
upcoming jump label destruction function outside the realm of the
complicated and quite far-reaching cgroup lock (that can't be held when
holding either the cpu_hotplug.lock or jump_label_mutex)
[akpm@linux-foundation.org: tweak comment]
Signed-off-by: Glauber Costa <glommer@parallels.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Li Zefan <lizefan@huawei.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index bb8d7d3cf302..6fbf50977f77 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -258,8 +258,8 @@ struct mem_cgroup { | |||
258 | */ | 258 | */ |
259 | struct rcu_head rcu_freeing; | 259 | struct rcu_head rcu_freeing; |
260 | /* | 260 | /* |
261 | * But when using vfree(), that cannot be done at | 261 | * We also need some space for a worker in deferred freeing. |
262 | * interrupt time, so we must then queue the work. | 262 | * By the time we call it, rcu_freeing is no longer in use. |
263 | */ | 263 | */ |
264 | struct work_struct work_freeing; | 264 | struct work_struct work_freeing; |
265 | }; | 265 | }; |
@@ -4702,23 +4702,28 @@ out_free: | |||
4702 | } | 4702 | } |
4703 | 4703 | ||
4704 | /* | 4704 | /* |
4705 | * Helpers for freeing a vzalloc()ed mem_cgroup by RCU, | 4705 | * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, |
4706 | * but in process context. The work_freeing structure is overlaid | 4706 | * but in process context. The work_freeing structure is overlaid |
4707 | * on the rcu_freeing structure, which itself is overlaid on memsw. | 4707 | * on the rcu_freeing structure, which itself is overlaid on memsw. |
4708 | */ | 4708 | */ |
4709 | static void vfree_work(struct work_struct *work) | 4709 | static void free_work(struct work_struct *work) |
4710 | { | 4710 | { |
4711 | struct mem_cgroup *memcg; | 4711 | struct mem_cgroup *memcg; |
4712 | int size = sizeof(struct mem_cgroup); | ||
4712 | 4713 | ||
4713 | memcg = container_of(work, struct mem_cgroup, work_freeing); | 4714 | memcg = container_of(work, struct mem_cgroup, work_freeing); |
4714 | vfree(memcg); | 4715 | if (size < PAGE_SIZE) |
4716 | kfree(memcg); | ||
4717 | else | ||
4718 | vfree(memcg); | ||
4715 | } | 4719 | } |
4716 | static void vfree_rcu(struct rcu_head *rcu_head) | 4720 | |
4721 | static void free_rcu(struct rcu_head *rcu_head) | ||
4717 | { | 4722 | { |
4718 | struct mem_cgroup *memcg; | 4723 | struct mem_cgroup *memcg; |
4719 | 4724 | ||
4720 | memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); | 4725 | memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); |
4721 | INIT_WORK(&memcg->work_freeing, vfree_work); | 4726 | INIT_WORK(&memcg->work_freeing, free_work); |
4722 | schedule_work(&memcg->work_freeing); | 4727 | schedule_work(&memcg->work_freeing); |
4723 | } | 4728 | } |
4724 | 4729 | ||
@@ -4744,10 +4749,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) | |||
4744 | free_mem_cgroup_per_zone_info(memcg, node); | 4749 | free_mem_cgroup_per_zone_info(memcg, node); |
4745 | 4750 | ||
4746 | free_percpu(memcg->stat); | 4751 | free_percpu(memcg->stat); |
4747 | if (sizeof(struct mem_cgroup) < PAGE_SIZE) | 4752 | call_rcu(&memcg->rcu_freeing, free_rcu); |
4748 | kfree_rcu(memcg, rcu_freeing); | ||
4749 | else | ||
4750 | call_rcu(&memcg->rcu_freeing, vfree_rcu); | ||
4751 | } | 4753 | } |
4752 | 4754 | ||
4753 | static void mem_cgroup_get(struct mem_cgroup *memcg) | 4755 | static void mem_cgroup_get(struct mem_cgroup *memcg) |