summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-15 19:52:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitf627c2f53786b0445abca47f6aa84c96a1fffec2 (patch)
treea2c0a52a4448ad779d9027c943eb8e1217ae2504 /mm/memory.c
parentd281ee6145183594788ab6d5b55f8d144e69eace (diff)
memcg: adjust to support new THP refcounting
As with rmap, with new refcounting we cannot rely on PageTransHuge() to check if we need to charge size of huge page form the cgroup. We need to get information from caller to know whether it was mapped with PMD or PTE. We do uncharge when last reference on the page gone. At that point if we see PageTransHuge() it means we need to unchange whole huge page. The tricky part is partial unmap -- when we try to unmap part of huge page. We don't do a special handing of this situation, meaning we don't uncharge the part of huge page unless last user is gone or split_huge_page() is triggered. In case of cgroup memory pressure happens the partial unmapped page will be split through shrinker. This should be good enough. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f964d190ce83..a021c295e88d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2087,7 +2087,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2087 cow_user_page(new_page, old_page, address, vma); 2087 cow_user_page(new_page, old_page, address, vma);
2088 } 2088 }
2089 2089
2090 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) 2090 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
2091 goto oom_free_new; 2091 goto oom_free_new;
2092 2092
2093 __SetPageUptodate(new_page); 2093 __SetPageUptodate(new_page);
@@ -2119,7 +2119,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2119 */ 2119 */
2120 ptep_clear_flush_notify(vma, address, page_table); 2120 ptep_clear_flush_notify(vma, address, page_table);
2121 page_add_new_anon_rmap(new_page, vma, address, false); 2121 page_add_new_anon_rmap(new_page, vma, address, false);
2122 mem_cgroup_commit_charge(new_page, memcg, false); 2122 mem_cgroup_commit_charge(new_page, memcg, false, false);
2123 lru_cache_add_active_or_unevictable(new_page, vma); 2123 lru_cache_add_active_or_unevictable(new_page, vma);
2124 /* 2124 /*
2125 * We call the notify macro here because, when using secondary 2125 * We call the notify macro here because, when using secondary
@@ -2158,7 +2158,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2158 new_page = old_page; 2158 new_page = old_page;
2159 page_copied = 1; 2159 page_copied = 1;
2160 } else { 2160 } else {
2161 mem_cgroup_cancel_charge(new_page, memcg); 2161 mem_cgroup_cancel_charge(new_page, memcg, false);
2162 } 2162 }
2163 2163
2164 if (new_page) 2164 if (new_page)
@@ -2533,7 +2533,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2533 goto out_page; 2533 goto out_page;
2534 } 2534 }
2535 2535
2536 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { 2536 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) {
2537 ret = VM_FAULT_OOM; 2537 ret = VM_FAULT_OOM;
2538 goto out_page; 2538 goto out_page;
2539 } 2539 }
@@ -2575,10 +2575,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2575 set_pte_at(mm, address, page_table, pte); 2575 set_pte_at(mm, address, page_table, pte);
2576 if (page == swapcache) { 2576 if (page == swapcache) {
2577 do_page_add_anon_rmap(page, vma, address, exclusive); 2577 do_page_add_anon_rmap(page, vma, address, exclusive);
2578 mem_cgroup_commit_charge(page, memcg, true); 2578 mem_cgroup_commit_charge(page, memcg, true, false);
2579 } else { /* ksm created a completely new copy */ 2579 } else { /* ksm created a completely new copy */
2580 page_add_new_anon_rmap(page, vma, address, false); 2580 page_add_new_anon_rmap(page, vma, address, false);
2581 mem_cgroup_commit_charge(page, memcg, false); 2581 mem_cgroup_commit_charge(page, memcg, false, false);
2582 lru_cache_add_active_or_unevictable(page, vma); 2582 lru_cache_add_active_or_unevictable(page, vma);
2583 } 2583 }
2584 2584
@@ -2613,7 +2613,7 @@ unlock:
2613out: 2613out:
2614 return ret; 2614 return ret;
2615out_nomap: 2615out_nomap:
2616 mem_cgroup_cancel_charge(page, memcg); 2616 mem_cgroup_cancel_charge(page, memcg, false);
2617 pte_unmap_unlock(page_table, ptl); 2617 pte_unmap_unlock(page_table, ptl);
2618out_page: 2618out_page:
2619 unlock_page(page); 2619 unlock_page(page);
@@ -2707,7 +2707,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2707 if (!page) 2707 if (!page)
2708 goto oom; 2708 goto oom;
2709 2709
2710 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) 2710 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false))
2711 goto oom_free_page; 2711 goto oom_free_page;
2712 2712
2713 /* 2713 /*
@@ -2728,7 +2728,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2728 /* Deliver the page fault to userland, check inside PT lock */ 2728 /* Deliver the page fault to userland, check inside PT lock */
2729 if (userfaultfd_missing(vma)) { 2729 if (userfaultfd_missing(vma)) {
2730 pte_unmap_unlock(page_table, ptl); 2730 pte_unmap_unlock(page_table, ptl);
2731 mem_cgroup_cancel_charge(page, memcg); 2731 mem_cgroup_cancel_charge(page, memcg, false);
2732 page_cache_release(page); 2732 page_cache_release(page);
2733 return handle_userfault(vma, address, flags, 2733 return handle_userfault(vma, address, flags,
2734 VM_UFFD_MISSING); 2734 VM_UFFD_MISSING);
@@ -2736,7 +2736,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2736 2736
2737 inc_mm_counter_fast(mm, MM_ANONPAGES); 2737 inc_mm_counter_fast(mm, MM_ANONPAGES);
2738 page_add_new_anon_rmap(page, vma, address, false); 2738 page_add_new_anon_rmap(page, vma, address, false);
2739 mem_cgroup_commit_charge(page, memcg, false); 2739 mem_cgroup_commit_charge(page, memcg, false, false);
2740 lru_cache_add_active_or_unevictable(page, vma); 2740 lru_cache_add_active_or_unevictable(page, vma);
2741setpte: 2741setpte:
2742 set_pte_at(mm, address, page_table, entry); 2742 set_pte_at(mm, address, page_table, entry);
@@ -2747,7 +2747,7 @@ unlock:
2747 pte_unmap_unlock(page_table, ptl); 2747 pte_unmap_unlock(page_table, ptl);
2748 return 0; 2748 return 0;
2749release: 2749release:
2750 mem_cgroup_cancel_charge(page, memcg); 2750 mem_cgroup_cancel_charge(page, memcg, false);
2751 page_cache_release(page); 2751 page_cache_release(page);
2752 goto unlock; 2752 goto unlock;
2753oom_free_page: 2753oom_free_page:
@@ -3000,7 +3000,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3000 if (!new_page) 3000 if (!new_page)
3001 return VM_FAULT_OOM; 3001 return VM_FAULT_OOM;
3002 3002
3003 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) { 3003 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
3004 page_cache_release(new_page); 3004 page_cache_release(new_page);
3005 return VM_FAULT_OOM; 3005 return VM_FAULT_OOM;
3006 } 3006 }
@@ -3029,7 +3029,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3029 goto uncharge_out; 3029 goto uncharge_out;
3030 } 3030 }
3031 do_set_pte(vma, address, new_page, pte, true, true); 3031 do_set_pte(vma, address, new_page, pte, true, true);
3032 mem_cgroup_commit_charge(new_page, memcg, false); 3032 mem_cgroup_commit_charge(new_page, memcg, false, false);
3033 lru_cache_add_active_or_unevictable(new_page, vma); 3033 lru_cache_add_active_or_unevictable(new_page, vma);
3034 pte_unmap_unlock(pte, ptl); 3034 pte_unmap_unlock(pte, ptl);
3035 if (fault_page) { 3035 if (fault_page) {
@@ -3044,7 +3044,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3044 } 3044 }
3045 return ret; 3045 return ret;
3046uncharge_out: 3046uncharge_out:
3047 mem_cgroup_cancel_charge(new_page, memcg); 3047 mem_cgroup_cancel_charge(new_page, memcg, false);
3048 page_cache_release(new_page); 3048 page_cache_release(new_page);
3049 return ret; 3049 return ret;
3050} 3050}