aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2013-07-08 19:00:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-09 13:33:24 -0400
commit4050377b509b326c14b275fedb2f69b46f37a7a9 (patch)
treedda709eadbb2f0bd388811c6efaaeff65349b824 /mm/memcontrol.c
parent10d5ebf40ff09db03b97cb177f24b9c7c8b4bb52 (diff)
memcg: use css_get/put for swap memcg
Use css_get/put instead of mem_cgroup_get/put. A simple replacement will do. The historical reason that memcg has its own refcnt instead of always using css_get/put, is that cgroup couldn't be removed if there're still css refs, so css refs can't be used as long-lived reference. The situation has changed so that rmdir a cgroup will succeed regardless css refs, but won't be freed until css refs goes down to 0. Signed-off-by: Li Zefan <lizefan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Glauber Costa <glommer@openvz.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bdc9582585af..76c0c99b002f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4231,12 +4231,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4231 unlock_page_cgroup(pc); 4231 unlock_page_cgroup(pc);
4232 /* 4232 /*
4233 * even after unlock, we have memcg->res.usage here and this memcg 4233 * even after unlock, we have memcg->res.usage here and this memcg
4234 * will never be freed. 4234 * will never be freed, so it's safe to call css_get().
4235 */ 4235 */
4236 memcg_check_events(memcg, page); 4236 memcg_check_events(memcg, page);
4237 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 4237 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
4238 mem_cgroup_swap_statistics(memcg, true); 4238 mem_cgroup_swap_statistics(memcg, true);
4239 mem_cgroup_get(memcg); 4239 css_get(&memcg->css);
4240 } 4240 }
4241 /* 4241 /*
4242 * Migration does not charge the res_counter for the 4242 * Migration does not charge the res_counter for the
@@ -4348,7 +4348,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4348 4348
4349 /* 4349 /*
4350 * record memcg information, if swapout && memcg != NULL, 4350 * record memcg information, if swapout && memcg != NULL,
4351 * mem_cgroup_get() was called in uncharge(). 4351 * css_get() was called in uncharge().
4352 */ 4352 */
4353 if (do_swap_account && swapout && memcg) 4353 if (do_swap_account && swapout && memcg)
4354 swap_cgroup_record(ent, css_id(&memcg->css)); 4354 swap_cgroup_record(ent, css_id(&memcg->css));
@@ -4379,7 +4379,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
4379 if (!mem_cgroup_is_root(memcg)) 4379 if (!mem_cgroup_is_root(memcg))
4380 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 4380 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4381 mem_cgroup_swap_statistics(memcg, false); 4381 mem_cgroup_swap_statistics(memcg, false);
4382 mem_cgroup_put(memcg); 4382 css_put(&memcg->css);
4383 } 4383 }
4384 rcu_read_unlock(); 4384 rcu_read_unlock();
4385} 4385}
@@ -4413,11 +4413,14 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
4413 * This function is only called from task migration context now. 4413 * This function is only called from task migration context now.
4414 * It postpones res_counter and refcount handling till the end 4414 * It postpones res_counter and refcount handling till the end
4415 * of task migration(mem_cgroup_clear_mc()) for performance 4415 * of task migration(mem_cgroup_clear_mc()) for performance
4416 * improvement. But we cannot postpone mem_cgroup_get(to) 4416 * improvement. But we cannot postpone css_get(to) because if
4417 * because if the process that has been moved to @to does 4417 * the process that has been moved to @to does swap-in, the
4418 * swap-in, the refcount of @to might be decreased to 0. 4418 * refcount of @to might be decreased to 0.
4419 *
4420 * We are in attach() phase, so the cgroup is guaranteed to be
4421 * alive, so we can just call css_get().
4419 */ 4422 */
4420 mem_cgroup_get(to); 4423 css_get(&to->css);
4421 return 0; 4424 return 0;
4422 } 4425 }
4423 return -EINVAL; 4426 return -EINVAL;
@@ -6718,6 +6721,7 @@ static void __mem_cgroup_clear_mc(void)
6718{ 6721{
6719 struct mem_cgroup *from = mc.from; 6722 struct mem_cgroup *from = mc.from;
6720 struct mem_cgroup *to = mc.to; 6723 struct mem_cgroup *to = mc.to;
6724 int i;
6721 6725
6722 /* we must uncharge all the leftover precharges from mc.to */ 6726 /* we must uncharge all the leftover precharges from mc.to */
6723 if (mc.precharge) { 6727 if (mc.precharge) {
@@ -6738,7 +6742,9 @@ static void __mem_cgroup_clear_mc(void)
6738 if (!mem_cgroup_is_root(mc.from)) 6742 if (!mem_cgroup_is_root(mc.from))
6739 res_counter_uncharge(&mc.from->memsw, 6743 res_counter_uncharge(&mc.from->memsw,
6740 PAGE_SIZE * mc.moved_swap); 6744 PAGE_SIZE * mc.moved_swap);
6741 __mem_cgroup_put(mc.from, mc.moved_swap); 6745
6746 for (i = 0; i < mc.moved_swap; i++)
6747 css_put(&mc.from->css);
6742 6748
6743 if (!mem_cgroup_is_root(mc.to)) { 6749 if (!mem_cgroup_is_root(mc.to)) {
6744 /* 6750 /*
@@ -6748,7 +6754,7 @@ static void __mem_cgroup_clear_mc(void)
6748 res_counter_uncharge(&mc.to->res, 6754 res_counter_uncharge(&mc.to->res,
6749 PAGE_SIZE * mc.moved_swap); 6755 PAGE_SIZE * mc.moved_swap);
6750 } 6756 }
6751 /* we've already done mem_cgroup_get(mc.to) */ 6757 /* we've already done css_get(mc.to) */
6752 mc.moved_swap = 0; 6758 mc.moved_swap = 0;
6753 } 6759 }
6754 memcg_oom_recover(from); 6760 memcg_oom_recover(from);