aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2014-08-06 19:05:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:17 -0400
commit0029e19ebf84dcd70b226820daa7747b28d5956d (patch)
treed643d76a8a45b75a5abd80e2731d799e2e1e7c3b /mm/memcontrol.c
parent9b1306192d335759a6cf2f3b404c49e811e5f953 (diff)
mm: memcontrol: remove explicit OOM parameter in charge path
For the page allocator, __GFP_NORETRY implies that no OOM should be triggered, whereas memcg has an explicit parameter to disable OOM. The only callsites that want OOM disabled are THP charges and charge moving. THP already uses __GFP_NORETRY and charge moving can use it as well - one full reclaim cycle should be plenty. Switch it over, then remove the OOM parameter. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c32
1 files changed, 10 insertions, 22 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3069d6420b0e..8aaca8267dfe 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2555,15 +2555,13 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2555 * mem_cgroup_try_charge - try charging a memcg 2555 * mem_cgroup_try_charge - try charging a memcg
2556 * @memcg: memcg to charge 2556 * @memcg: memcg to charge
2557 * @nr_pages: number of pages to charge 2557 * @nr_pages: number of pages to charge
2558 * @oom: trigger OOM if reclaim fails
2559 * 2558 *
2560 * Returns 0 if @memcg was charged successfully, -EINTR if the charge 2559 * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2561 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed. 2560 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
2562 */ 2561 */
2563static int mem_cgroup_try_charge(struct mem_cgroup *memcg, 2562static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2564 gfp_t gfp_mask, 2563 gfp_t gfp_mask,
2565 unsigned int nr_pages, 2564 unsigned int nr_pages)
2566 bool oom)
2567{ 2565{
2568 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2566 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2569 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2567 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
@@ -2647,9 +2645,6 @@ retry:
2647 if (fatal_signal_pending(current)) 2645 if (fatal_signal_pending(current))
2648 goto bypass; 2646 goto bypass;
2649 2647
2650 if (!oom)
2651 goto nomem;
2652
2653 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(batch)); 2648 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(batch));
2654nomem: 2649nomem:
2655 if (!(gfp_mask & __GFP_NOFAIL)) 2650 if (!(gfp_mask & __GFP_NOFAIL))
@@ -2675,15 +2670,14 @@ done:
2675 */ 2670 */
2676static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm, 2671static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2677 gfp_t gfp_mask, 2672 gfp_t gfp_mask,
2678 unsigned int nr_pages, 2673 unsigned int nr_pages)
2679 bool oom)
2680 2674
2681{ 2675{
2682 struct mem_cgroup *memcg; 2676 struct mem_cgroup *memcg;
2683 int ret; 2677 int ret;
2684 2678
2685 memcg = get_mem_cgroup_from_mm(mm); 2679 memcg = get_mem_cgroup_from_mm(mm);
2686 ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom); 2680 ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages);
2687 css_put(&memcg->css); 2681 css_put(&memcg->css);
2688 if (ret == -EINTR) 2682 if (ret == -EINTR)
2689 memcg = root_mem_cgroup; 2683 memcg = root_mem_cgroup;
@@ -2900,8 +2894,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2900 if (ret) 2894 if (ret)
2901 return ret; 2895 return ret;
2902 2896
2903 ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT, 2897 ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT);
2904 oom_gfp_allowed(gfp));
2905 if (ret == -EINTR) { 2898 if (ret == -EINTR) {
2906 /* 2899 /*
2907 * mem_cgroup_try_charge() chosed to bypass to root due to 2900 * mem_cgroup_try_charge() chosed to bypass to root due to
@@ -3650,7 +3643,6 @@ int mem_cgroup_charge_anon(struct page *page,
3650{ 3643{
3651 unsigned int nr_pages = 1; 3644 unsigned int nr_pages = 1;
3652 struct mem_cgroup *memcg; 3645 struct mem_cgroup *memcg;
3653 bool oom = true;
3654 3646
3655 if (mem_cgroup_disabled()) 3647 if (mem_cgroup_disabled())
3656 return 0; 3648 return 0;
@@ -3662,14 +3654,9 @@ int mem_cgroup_charge_anon(struct page *page,
3662 if (PageTransHuge(page)) { 3654 if (PageTransHuge(page)) {
3663 nr_pages <<= compound_order(page); 3655 nr_pages <<= compound_order(page);
3664 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3656 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3665 /*
3666 * Never OOM-kill a process for a huge page. The
3667 * fault handler will fall back to regular pages.
3668 */
3669 oom = false;
3670 } 3657 }
3671 3658
3672 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom); 3659 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages);
3673 if (!memcg) 3660 if (!memcg)
3674 return -ENOMEM; 3661 return -ENOMEM;
3675 __mem_cgroup_commit_charge(memcg, page, nr_pages, 3662 __mem_cgroup_commit_charge(memcg, page, nr_pages,
@@ -3706,7 +3693,7 @@ static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3706 memcg = try_get_mem_cgroup_from_page(page); 3693 memcg = try_get_mem_cgroup_from_page(page);
3707 if (!memcg) 3694 if (!memcg)
3708 memcg = get_mem_cgroup_from_mm(mm); 3695 memcg = get_mem_cgroup_from_mm(mm);
3709 ret = mem_cgroup_try_charge(memcg, mask, 1, true); 3696 ret = mem_cgroup_try_charge(memcg, mask, 1);
3710 css_put(&memcg->css); 3697 css_put(&memcg->css);
3711 if (ret == -EINTR) 3698 if (ret == -EINTR)
3712 memcg = root_mem_cgroup; 3699 memcg = root_mem_cgroup;
@@ -3733,7 +3720,7 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3733 if (!PageSwapCache(page)) { 3720 if (!PageSwapCache(page)) {
3734 struct mem_cgroup *memcg; 3721 struct mem_cgroup *memcg;
3735 3722
3736 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true); 3723 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1);
3737 if (!memcg) 3724 if (!memcg)
3738 return -ENOMEM; 3725 return -ENOMEM;
3739 *memcgp = memcg; 3726 *memcgp = memcg;
@@ -3802,7 +3789,7 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3802 return 0; 3789 return 0;
3803 } 3790 }
3804 3791
3805 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true); 3792 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1);
3806 if (!memcg) 3793 if (!memcg)
3807 return -ENOMEM; 3794 return -ENOMEM;
3808 __mem_cgroup_commit_charge(memcg, page, 1, type, false); 3795 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
@@ -6440,7 +6427,8 @@ one_by_one:
6440 batch_count = PRECHARGE_COUNT_AT_ONCE; 6427 batch_count = PRECHARGE_COUNT_AT_ONCE;
6441 cond_resched(); 6428 cond_resched();
6442 } 6429 }
6443 ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false); 6430 ret = mem_cgroup_try_charge(memcg,
6431 GFP_KERNEL & ~__GFP_NORETRY, 1);
6444 if (ret) 6432 if (ret)
6445 /* mem_cgroup_clear_mc() will do uncharge later */ 6433 /* mem_cgroup_clear_mc() will do uncharge later */
6446 return ret; 6434 return ret;