aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-12-10 18:44:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:08 -0500
commit312722cbb2a6e12b74177f025a8ee7189816b04b (patch)
tree12d1d918accab6dd2b7f94a058071b2372f33352 /mm/memcontrol.c
parente544a4e74e02108035de69f97fde7bdf19dba978 (diff)
mm: memcontrol: shorten the page statistics update slowpath
While moving charges from one memcg to another, page stat updates must acquire the old memcg's move_lock to prevent double accounting. That situation is denoted by an increased memcg->move_accounting. However, the charge moving code declares this way too early for now, even before summing up the RSS and pre-allocating destination charges. Shorten this slowpath mode by increasing memcg->move_accounting only right before walking the task's address space with the intention of actually moving the pages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9d30129b0d4a..9073d07c1149 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5333,8 +5333,6 @@ static void __mem_cgroup_clear_mc(void)
5333 5333
5334static void mem_cgroup_clear_mc(void) 5334static void mem_cgroup_clear_mc(void)
5335{ 5335{
5336 struct mem_cgroup *from = mc.from;
5337
5338 /* 5336 /*
5339 * we must clear moving_task before waking up waiters at the end of 5337 * we must clear moving_task before waking up waiters at the end of
5340 * task migration. 5338 * task migration.
@@ -5345,8 +5343,6 @@ static void mem_cgroup_clear_mc(void)
5345 mc.from = NULL; 5343 mc.from = NULL;
5346 mc.to = NULL; 5344 mc.to = NULL;
5347 spin_unlock(&mc.lock); 5345 spin_unlock(&mc.lock);
5348
5349 atomic_dec(&from->moving_account);
5350} 5346}
5351 5347
5352static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 5348static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
@@ -5380,15 +5376,6 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5380 VM_BUG_ON(mc.moved_charge); 5376 VM_BUG_ON(mc.moved_charge);
5381 VM_BUG_ON(mc.moved_swap); 5377 VM_BUG_ON(mc.moved_swap);
5382 5378
5383 /*
5384 * Signal mem_cgroup_begin_page_stat() to take
5385 * the memcg's move_lock while we're moving
5386 * its pages to another memcg. Then wait for
5387 * already started RCU-only updates to finish.
5388 */
5389 atomic_inc(&from->moving_account);
5390 synchronize_rcu();
5391
5392 spin_lock(&mc.lock); 5379 spin_lock(&mc.lock);
5393 mc.from = from; 5380 mc.from = from;
5394 mc.to = memcg; 5381 mc.to = memcg;
@@ -5520,6 +5507,13 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
5520 struct vm_area_struct *vma; 5507 struct vm_area_struct *vma;
5521 5508
5522 lru_add_drain_all(); 5509 lru_add_drain_all();
5510 /*
5511 * Signal mem_cgroup_begin_page_stat() to take the memcg's
5512 * move_lock while we're moving its pages to another memcg.
5513 * Then wait for already started RCU-only updates to finish.
5514 */
5515 atomic_inc(&mc.from->moving_account);
5516 synchronize_rcu();
5523retry: 5517retry:
5524 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5518 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5525 /* 5519 /*
@@ -5552,6 +5546,7 @@ retry:
5552 break; 5546 break;
5553 } 5547 }
5554 up_read(&mm->mmap_sem); 5548 up_read(&mm->mmap_sem);
5549 atomic_dec(&mc.from->moving_account);
5555} 5550}
5556 5551
5557static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 5552static void mem_cgroup_move_task(struct cgroup_subsys_state *css,