aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-08-10 21:02:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-11 11:59:18 -0400
commit2bd9bb206b338888b226e70139a25a67d10007f0 (patch)
treeac556b1bfa52f06fe2998371d10edec38d5cf2a9 /mm/memcontrol.c
parent4b53433468c87794b523e4683fbd4e8e8aca1f63 (diff)
memcg: clean up waiting move acct
Now, for checking a memcg is under task-account-moving, we do css_tryget() against mc.to and mc.from. But this is just complicating things. This patch makes the check easier. This patch adds a spinlock to move_charge_struct and guard modification of mc.to and mc.from. By this, we don't have to think about complicated races arount this not-critical path. [balbir@linux.vnet.ibm.com: don't crash on a null memcg being passed] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 991860e6e0a7..27981e705b0a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -268,6 +268,7 @@ enum move_type {
268 268
269/* "mc" and its members are protected by cgroup_mutex */ 269/* "mc" and its members are protected by cgroup_mutex */
270static struct move_charge_struct { 270static struct move_charge_struct {
271 spinlock_t lock; /* for from, to, moving_task */
271 struct mem_cgroup *from; 272 struct mem_cgroup *from;
272 struct mem_cgroup *to; 273 struct mem_cgroup *to;
273 unsigned long precharge; 274 unsigned long precharge;
@@ -276,6 +277,7 @@ static struct move_charge_struct {
276 struct task_struct *moving_task; /* a task moving charges */ 277 struct task_struct *moving_task; /* a task moving charges */
277 wait_queue_head_t waitq; /* a waitq for other context */ 278 wait_queue_head_t waitq; /* a waitq for other context */
278} mc = { 279} mc = {
280 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
279 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 281 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
280}; 282};
281 283
@@ -1051,26 +1053,24 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg)
1051 1053
1052static bool mem_cgroup_under_move(struct mem_cgroup *mem) 1054static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1053{ 1055{
1054 struct mem_cgroup *from = mc.from; 1056 struct mem_cgroup *from;
1055 struct mem_cgroup *to = mc.to; 1057 struct mem_cgroup *to;
1056 bool ret = false; 1058 bool ret = false;
1057 1059 /*
1058 if (from == mem || to == mem) 1060 * Unlike task_move routines, we access mc.to, mc.from not under
1059 return true; 1061 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1060 1062 */
1061 if (!from || !to || !mem->use_hierarchy) 1063 spin_lock(&mc.lock);
1062 return false; 1064 from = mc.from;
1063 1065 to = mc.to;
1064 rcu_read_lock(); 1066 if (!from)
1065 if (css_tryget(&from->css)) { 1067 goto unlock;
1066 ret = css_is_ancestor(&from->css, &mem->css); 1068 if (from == mem || to == mem
1067 css_put(&from->css); 1069 || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1068 } 1070 || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1069 if (!ret && css_tryget(&to->css)) { 1071 ret = true;
1070 ret = css_is_ancestor(&to->css, &mem->css); 1072unlock:
1071 css_put(&to->css); 1073 spin_unlock(&mc.lock);
1072 }
1073 rcu_read_unlock();
1074 return ret; 1074 return ret;
1075} 1075}
1076 1076
@@ -1406,7 +1406,7 @@ static void memcg_wakeup_oom(struct mem_cgroup *mem)
1406 1406
1407static void memcg_oom_recover(struct mem_cgroup *mem) 1407static void memcg_oom_recover(struct mem_cgroup *mem)
1408{ 1408{
1409 if (atomic_read(&mem->oom_lock)) 1409 if (mem && atomic_read(&mem->oom_lock))
1410 memcg_wakeup_oom(mem); 1410 memcg_wakeup_oom(mem);
1411} 1411}
1412 1412
@@ -4441,11 +4441,13 @@ static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4441 4441
4442static void mem_cgroup_clear_mc(void) 4442static void mem_cgroup_clear_mc(void)
4443{ 4443{
4444 struct mem_cgroup *from = mc.from;
4445 struct mem_cgroup *to = mc.to;
4446
4444 /* we must uncharge all the leftover precharges from mc.to */ 4447 /* we must uncharge all the leftover precharges from mc.to */
4445 if (mc.precharge) { 4448 if (mc.precharge) {
4446 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 4449 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4447 mc.precharge = 0; 4450 mc.precharge = 0;
4448 memcg_oom_recover(mc.to);
4449 } 4451 }
4450 /* 4452 /*
4451 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4453 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
@@ -4454,7 +4456,6 @@ static void mem_cgroup_clear_mc(void)
4454 if (mc.moved_charge) { 4456 if (mc.moved_charge) {
4455 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 4457 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4456 mc.moved_charge = 0; 4458 mc.moved_charge = 0;
4457 memcg_oom_recover(mc.from);
4458 } 4459 }
4459 /* we must fixup refcnts and charges */ 4460 /* we must fixup refcnts and charges */
4460 if (mc.moved_swap) { 4461 if (mc.moved_swap) {
@@ -4479,9 +4480,13 @@ static void mem_cgroup_clear_mc(void)
4479 4480
4480 mc.moved_swap = 0; 4481 mc.moved_swap = 0;
4481 } 4482 }
4483 spin_lock(&mc.lock);
4482 mc.from = NULL; 4484 mc.from = NULL;
4483 mc.to = NULL; 4485 mc.to = NULL;
4484 mc.moving_task = NULL; 4486 mc.moving_task = NULL;
4487 spin_unlock(&mc.lock);
4488 memcg_oom_recover(from);
4489 memcg_oom_recover(to);
4485 wake_up_all(&mc.waitq); 4490 wake_up_all(&mc.waitq);
4486} 4491}
4487 4492
@@ -4510,12 +4515,14 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4510 VM_BUG_ON(mc.moved_charge); 4515 VM_BUG_ON(mc.moved_charge);
4511 VM_BUG_ON(mc.moved_swap); 4516 VM_BUG_ON(mc.moved_swap);
4512 VM_BUG_ON(mc.moving_task); 4517 VM_BUG_ON(mc.moving_task);
4518 spin_lock(&mc.lock);
4513 mc.from = from; 4519 mc.from = from;
4514 mc.to = mem; 4520 mc.to = mem;
4515 mc.precharge = 0; 4521 mc.precharge = 0;
4516 mc.moved_charge = 0; 4522 mc.moved_charge = 0;
4517 mc.moved_swap = 0; 4523 mc.moved_swap = 0;
4518 mc.moving_task = current; 4524 mc.moving_task = current;
4525 spin_unlock(&mc.lock);
4519 4526
4520 ret = mem_cgroup_precharge_mc(mm); 4527 ret = mem_cgroup_precharge_mc(mm);
4521 if (ret) 4528 if (ret)