aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-10 18:22:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-12 18:52:37 -0500
commit430e48631e72aeab74d844c57b441f98a2e36eee (patch)
treee1b3eadc1b5c1871db0bf1247af8684b53cae12a /mm/memcontrol.c
parentc62b1a3b31b5e27a6c5c2e91cc5ce05fdb6344d0 (diff)
memcg: update threshold and softlimit at commit
Presently, move_task does "batched" precharge. Because res_counter or css's refcnt are not-scalable jobs for memcg, try_charge_().. tend to be done in batched manner if allowed. Now, softlimit and threshold check their event counter in try_charge, but the charge is not a per-page event. And event counter is not updated at charge(). Moreover, precharge doesn't pass "page" to try_charge() and softlimit tree will be never updated until uncharge() causes an event." So the best place to check the event counter is commit_charge(). This is per-page event by its nature. This patch move checks to there. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9c9dfcf7a6d1..006fe142d4ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1424,8 +1424,7 @@ static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1424 * oom-killer can be invoked. 1424 * oom-killer can be invoked.
1425 */ 1425 */
1426static int __mem_cgroup_try_charge(struct mm_struct *mm, 1426static int __mem_cgroup_try_charge(struct mm_struct *mm,
1427 gfp_t gfp_mask, struct mem_cgroup **memcg, 1427 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1428 bool oom, struct page *page)
1429{ 1428{
1430 struct mem_cgroup *mem, *mem_over_limit; 1429 struct mem_cgroup *mem, *mem_over_limit;
1431 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1430 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
@@ -1463,7 +1462,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1463 unsigned long flags = 0; 1462 unsigned long flags = 0;
1464 1463
1465 if (consume_stock(mem)) 1464 if (consume_stock(mem))
1466 goto charged; 1465 goto done;
1467 1466
1468 ret = res_counter_charge(&mem->res, csize, &fail_res); 1467 ret = res_counter_charge(&mem->res, csize, &fail_res);
1469 if (likely(!ret)) { 1468 if (likely(!ret)) {
@@ -1558,16 +1557,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1558 } 1557 }
1559 if (csize > PAGE_SIZE) 1558 if (csize > PAGE_SIZE)
1560 refill_stock(mem, csize - PAGE_SIZE); 1559 refill_stock(mem, csize - PAGE_SIZE);
1561charged:
1562 /*
1563 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1564 * if they exceeds softlimit.
1565 */
1566 if (page && mem_cgroup_soft_limit_check(mem))
1567 mem_cgroup_update_tree(mem, page);
1568done: 1560done:
1569 if (mem_cgroup_threshold_check(mem))
1570 mem_cgroup_threshold(mem);
1571 return 0; 1561 return 0;
1572nomem: 1562nomem:
1573 css_put(&mem->css); 1563 css_put(&mem->css);
@@ -1691,6 +1681,16 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1691 mem_cgroup_charge_statistics(mem, pc, true); 1681 mem_cgroup_charge_statistics(mem, pc, true);
1692 1682
1693 unlock_page_cgroup(pc); 1683 unlock_page_cgroup(pc);
1684 /*
1685 * "charge_statistics" updated event counter. Then, check it.
1686 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1687 * if they exceeds softlimit.
1688 */
1689 if (mem_cgroup_soft_limit_check(mem))
1690 mem_cgroup_update_tree(mem, pc->page);
1691 if (mem_cgroup_threshold_check(mem))
1692 mem_cgroup_threshold(mem);
1693
1694} 1694}
1695 1695
1696/** 1696/**
@@ -1788,7 +1788,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
1788 goto put; 1788 goto put;
1789 1789
1790 parent = mem_cgroup_from_cont(pcg); 1790 parent = mem_cgroup_from_cont(pcg);
1791 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page); 1791 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1792 if (ret || !parent) 1792 if (ret || !parent)
1793 goto put_back; 1793 goto put_back;
1794 1794
@@ -1824,7 +1824,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1824 prefetchw(pc); 1824 prefetchw(pc);
1825 1825
1826 mem = memcg; 1826 mem = memcg;
1827 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page); 1827 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1828 if (ret || !mem) 1828 if (ret || !mem)
1829 return ret; 1829 return ret;
1830 1830
@@ -1944,14 +1944,14 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1944 if (!mem) 1944 if (!mem)
1945 goto charge_cur_mm; 1945 goto charge_cur_mm;
1946 *ptr = mem; 1946 *ptr = mem;
1947 ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page); 1947 ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1948 /* drop extra refcnt from tryget */ 1948 /* drop extra refcnt from tryget */
1949 css_put(&mem->css); 1949 css_put(&mem->css);
1950 return ret; 1950 return ret;
1951charge_cur_mm: 1951charge_cur_mm:
1952 if (unlikely(!mm)) 1952 if (unlikely(!mm))
1953 mm = &init_mm; 1953 mm = &init_mm;
1954 return __mem_cgroup_try_charge(mm, mask, ptr, true, page); 1954 return __mem_cgroup_try_charge(mm, mask, ptr, true);
1955} 1955}
1956 1956
1957static void 1957static void
@@ -2340,8 +2340,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2340 unlock_page_cgroup(pc); 2340 unlock_page_cgroup(pc);
2341 2341
2342 if (mem) { 2342 if (mem) {
2343 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, 2343 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
2344 page);
2345 css_put(&mem->css); 2344 css_put(&mem->css);
2346 } 2345 }
2347 *ptr = mem; 2346 *ptr = mem;
@@ -3872,8 +3871,7 @@ one_by_one:
3872 batch_count = PRECHARGE_COUNT_AT_ONCE; 3871 batch_count = PRECHARGE_COUNT_AT_ONCE;
3873 cond_resched(); 3872 cond_resched();
3874 } 3873 }
3875 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, 3874 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
3876 false, NULL);
3877 if (ret || !mem) 3875 if (ret || !mem)
3878 /* mem_cgroup_clear_mc() will do uncharge later */ 3876 /* mem_cgroup_clear_mc() will do uncharge later */
3879 return -ENOMEM; 3877 return -ENOMEM;