aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-15 19:52:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitf627c2f53786b0445abca47f6aa84c96a1fffec2 (patch)
treea2c0a52a4448ad779d9027c943eb8e1217ae2504
parentd281ee6145183594788ab6d5b55f8d144e69eace (diff)
memcg: adjust to support new THP refcounting
As with rmap, with new refcounting we cannot rely on PageTransHuge() to check if we need to charge size of huge page form the cgroup. We need to get information from caller to know whether it was mapped with PMD or PTE. We do uncharge when last reference on the page gone. At that point if we see PageTransHuge() it means we need to unchange whole huge page. The tricky part is partial unmap -- when we try to unmap part of huge page. We don't do a special handing of this situation, meaning we don't uncharge the part of huge page unless last user is gone or split_huge_page() is triggered. In case of cgroup memory pressure happens the partial unmapped page will be split through shrinker. This should be good enough. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h16
-rw-r--r--kernel/events/uprobes.c7
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/huge_memory.c32
-rw-r--r--mm/memcontrol.c62
-rw-r--r--mm/memory.c28
-rw-r--r--mm/shmem.c21
-rw-r--r--mm/swapfile.c9
-rw-r--r--mm/userfaultfd.c6
9 files changed, 92 insertions, 97 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2292468f2a30..189f04d4d2ec 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -280,10 +280,12 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg,
280bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 280bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
281 281
282int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 282int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
283 gfp_t gfp_mask, struct mem_cgroup **memcgp); 283 gfp_t gfp_mask, struct mem_cgroup **memcgp,
284 bool compound);
284void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 285void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
285 bool lrucare); 286 bool lrucare, bool compound);
286void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 287void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
288 bool compound);
287void mem_cgroup_uncharge(struct page *page); 289void mem_cgroup_uncharge(struct page *page);
288void mem_cgroup_uncharge_list(struct list_head *page_list); 290void mem_cgroup_uncharge_list(struct list_head *page_list);
289 291
@@ -515,7 +517,8 @@ static inline bool mem_cgroup_low(struct mem_cgroup *root,
515 517
516static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 518static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
517 gfp_t gfp_mask, 519 gfp_t gfp_mask,
518 struct mem_cgroup **memcgp) 520 struct mem_cgroup **memcgp,
521 bool compound)
519{ 522{
520 *memcgp = NULL; 523 *memcgp = NULL;
521 return 0; 524 return 0;
@@ -523,12 +526,13 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
523 526
524static inline void mem_cgroup_commit_charge(struct page *page, 527static inline void mem_cgroup_commit_charge(struct page *page,
525 struct mem_cgroup *memcg, 528 struct mem_cgroup *memcg,
526 bool lrucare) 529 bool lrucare, bool compound)
527{ 530{
528} 531}
529 532
530static inline void mem_cgroup_cancel_charge(struct page *page, 533static inline void mem_cgroup_cancel_charge(struct page *page,
531 struct mem_cgroup *memcg) 534 struct mem_cgroup *memcg,
535 bool compound)
532{ 536{
533} 537}
534 538
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 060c7a0edfdf..0167679182c0 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -161,7 +161,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
161 const unsigned long mmun_end = addr + PAGE_SIZE; 161 const unsigned long mmun_end = addr + PAGE_SIZE;
162 struct mem_cgroup *memcg; 162 struct mem_cgroup *memcg;
163 163
164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); 164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg,
165 false);
165 if (err) 166 if (err)
166 return err; 167 return err;
167 168
@@ -176,7 +177,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
176 177
177 get_page(kpage); 178 get_page(kpage);
178 page_add_new_anon_rmap(kpage, vma, addr, false); 179 page_add_new_anon_rmap(kpage, vma, addr, false);
179 mem_cgroup_commit_charge(kpage, memcg, false); 180 mem_cgroup_commit_charge(kpage, memcg, false, false);
180 lru_cache_add_active_or_unevictable(kpage, vma); 181 lru_cache_add_active_or_unevictable(kpage, vma);
181 182
182 if (!PageAnon(page)) { 183 if (!PageAnon(page)) {
@@ -199,7 +200,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
199 200
200 err = 0; 201 err = 0;
201 unlock: 202 unlock:
202 mem_cgroup_cancel_charge(kpage, memcg); 203 mem_cgroup_cancel_charge(kpage, memcg, false);
203 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 204 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
204 unlock_page(page); 205 unlock_page(page);
205 return err; 206 return err;
diff --git a/mm/filemap.c b/mm/filemap.c
index ae652ded700c..a729345ed6ec 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -618,7 +618,7 @@ static int __add_to_page_cache_locked(struct page *page,
618 618
619 if (!huge) { 619 if (!huge) {
620 error = mem_cgroup_try_charge(page, current->mm, 620 error = mem_cgroup_try_charge(page, current->mm,
621 gfp_mask, &memcg); 621 gfp_mask, &memcg, false);
622 if (error) 622 if (error)
623 return error; 623 return error;
624 } 624 }
@@ -626,7 +626,7 @@ static int __add_to_page_cache_locked(struct page *page,
626 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 626 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
627 if (error) { 627 if (error) {
628 if (!huge) 628 if (!huge)
629 mem_cgroup_cancel_charge(page, memcg); 629 mem_cgroup_cancel_charge(page, memcg, false);
630 return error; 630 return error;
631 } 631 }
632 632
@@ -645,7 +645,7 @@ static int __add_to_page_cache_locked(struct page *page,
645 __inc_zone_page_state(page, NR_FILE_PAGES); 645 __inc_zone_page_state(page, NR_FILE_PAGES);
646 spin_unlock_irq(&mapping->tree_lock); 646 spin_unlock_irq(&mapping->tree_lock);
647 if (!huge) 647 if (!huge)
648 mem_cgroup_commit_charge(page, memcg, false); 648 mem_cgroup_commit_charge(page, memcg, false, false);
649 trace_mm_filemap_add_to_page_cache(page); 649 trace_mm_filemap_add_to_page_cache(page);
650 return 0; 650 return 0;
651err_insert: 651err_insert:
@@ -653,7 +653,7 @@ err_insert:
653 /* Leave page->index set: truncation relies upon it */ 653 /* Leave page->index set: truncation relies upon it */
654 spin_unlock_irq(&mapping->tree_lock); 654 spin_unlock_irq(&mapping->tree_lock);
655 if (!huge) 655 if (!huge)
656 mem_cgroup_cancel_charge(page, memcg); 656 mem_cgroup_cancel_charge(page, memcg, false);
657 page_cache_release(page); 657 page_cache_release(page);
658 return error; 658 return error;
659} 659}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b7669cfe9dc9..4211682f223b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -751,7 +751,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
751 751
752 VM_BUG_ON_PAGE(!PageCompound(page), page); 752 VM_BUG_ON_PAGE(!PageCompound(page), page);
753 753
754 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { 754 if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
755 put_page(page); 755 put_page(page);
756 count_vm_event(THP_FAULT_FALLBACK); 756 count_vm_event(THP_FAULT_FALLBACK);
757 return VM_FAULT_FALLBACK; 757 return VM_FAULT_FALLBACK;
@@ -759,7 +759,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
759 759
760 pgtable = pte_alloc_one(mm, haddr); 760 pgtable = pte_alloc_one(mm, haddr);
761 if (unlikely(!pgtable)) { 761 if (unlikely(!pgtable)) {
762 mem_cgroup_cancel_charge(page, memcg); 762 mem_cgroup_cancel_charge(page, memcg, true);
763 put_page(page); 763 put_page(page);
764 return VM_FAULT_OOM; 764 return VM_FAULT_OOM;
765 } 765 }
@@ -775,7 +775,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
775 ptl = pmd_lock(mm, pmd); 775 ptl = pmd_lock(mm, pmd);
776 if (unlikely(!pmd_none(*pmd))) { 776 if (unlikely(!pmd_none(*pmd))) {
777 spin_unlock(ptl); 777 spin_unlock(ptl);
778 mem_cgroup_cancel_charge(page, memcg); 778 mem_cgroup_cancel_charge(page, memcg, true);
779 put_page(page); 779 put_page(page);
780 pte_free(mm, pgtable); 780 pte_free(mm, pgtable);
781 } else { 781 } else {
@@ -786,7 +786,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
786 int ret; 786 int ret;
787 787
788 spin_unlock(ptl); 788 spin_unlock(ptl);
789 mem_cgroup_cancel_charge(page, memcg); 789 mem_cgroup_cancel_charge(page, memcg, true);
790 put_page(page); 790 put_page(page);
791 pte_free(mm, pgtable); 791 pte_free(mm, pgtable);
792 ret = handle_userfault(vma, address, flags, 792 ret = handle_userfault(vma, address, flags,
@@ -798,7 +798,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
798 entry = mk_huge_pmd(page, vma->vm_page_prot); 798 entry = mk_huge_pmd(page, vma->vm_page_prot);
799 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 799 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
800 page_add_new_anon_rmap(page, vma, haddr, true); 800 page_add_new_anon_rmap(page, vma, haddr, true);
801 mem_cgroup_commit_charge(page, memcg, false); 801 mem_cgroup_commit_charge(page, memcg, false, true);
802 lru_cache_add_active_or_unevictable(page, vma); 802 lru_cache_add_active_or_unevictable(page, vma);
803 pgtable_trans_huge_deposit(mm, pmd, pgtable); 803 pgtable_trans_huge_deposit(mm, pmd, pgtable);
804 set_pmd_at(mm, haddr, pmd, entry); 804 set_pmd_at(mm, haddr, pmd, entry);
@@ -1095,13 +1095,14 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1095 vma, address, page_to_nid(page)); 1095 vma, address, page_to_nid(page));
1096 if (unlikely(!pages[i] || 1096 if (unlikely(!pages[i] ||
1097 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, 1097 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1098 &memcg))) { 1098 &memcg, false))) {
1099 if (pages[i]) 1099 if (pages[i])
1100 put_page(pages[i]); 1100 put_page(pages[i]);
1101 while (--i >= 0) { 1101 while (--i >= 0) {
1102 memcg = (void *)page_private(pages[i]); 1102 memcg = (void *)page_private(pages[i]);
1103 set_page_private(pages[i], 0); 1103 set_page_private(pages[i], 0);
1104 mem_cgroup_cancel_charge(pages[i], memcg); 1104 mem_cgroup_cancel_charge(pages[i], memcg,
1105 false);
1105 put_page(pages[i]); 1106 put_page(pages[i]);
1106 } 1107 }
1107 kfree(pages); 1108 kfree(pages);
@@ -1140,7 +1141,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1140 memcg = (void *)page_private(pages[i]); 1141 memcg = (void *)page_private(pages[i]);
1141 set_page_private(pages[i], 0); 1142 set_page_private(pages[i], 0);
1142 page_add_new_anon_rmap(pages[i], vma, haddr, false); 1143 page_add_new_anon_rmap(pages[i], vma, haddr, false);
1143 mem_cgroup_commit_charge(pages[i], memcg, false); 1144 mem_cgroup_commit_charge(pages[i], memcg, false, false);
1144 lru_cache_add_active_or_unevictable(pages[i], vma); 1145 lru_cache_add_active_or_unevictable(pages[i], vma);
1145 pte = pte_offset_map(&_pmd, haddr); 1146 pte = pte_offset_map(&_pmd, haddr);
1146 VM_BUG_ON(!pte_none(*pte)); 1147 VM_BUG_ON(!pte_none(*pte));
@@ -1168,7 +1169,7 @@ out_free_pages:
1168 for (i = 0; i < HPAGE_PMD_NR; i++) { 1169 for (i = 0; i < HPAGE_PMD_NR; i++) {
1169 memcg = (void *)page_private(pages[i]); 1170 memcg = (void *)page_private(pages[i]);
1170 set_page_private(pages[i], 0); 1171 set_page_private(pages[i], 0);
1171 mem_cgroup_cancel_charge(pages[i], memcg); 1172 mem_cgroup_cancel_charge(pages[i], memcg, false);
1172 put_page(pages[i]); 1173 put_page(pages[i]);
1173 } 1174 }
1174 kfree(pages); 1175 kfree(pages);
@@ -1234,7 +1235,8 @@ alloc:
1234 goto out; 1235 goto out;
1235 } 1236 }
1236 1237
1237 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) { 1238 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
1239 true))) {
1238 put_page(new_page); 1240 put_page(new_page);
1239 if (page) { 1241 if (page) {
1240 split_huge_page(page); 1242 split_huge_page(page);
@@ -1263,7 +1265,7 @@ alloc:
1263 put_user_huge_page(page); 1265 put_user_huge_page(page);
1264 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1266 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1265 spin_unlock(ptl); 1267 spin_unlock(ptl);
1266 mem_cgroup_cancel_charge(new_page, memcg); 1268 mem_cgroup_cancel_charge(new_page, memcg, true);
1267 put_page(new_page); 1269 put_page(new_page);
1268 goto out_mn; 1270 goto out_mn;
1269 } else { 1271 } else {
@@ -1272,7 +1274,7 @@ alloc:
1272 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1274 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1273 pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1275 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1274 page_add_new_anon_rmap(new_page, vma, haddr, true); 1276 page_add_new_anon_rmap(new_page, vma, haddr, true);
1275 mem_cgroup_commit_charge(new_page, memcg, false); 1277 mem_cgroup_commit_charge(new_page, memcg, false, true);
1276 lru_cache_add_active_or_unevictable(new_page, vma); 1278 lru_cache_add_active_or_unevictable(new_page, vma);
1277 set_pmd_at(mm, haddr, pmd, entry); 1279 set_pmd_at(mm, haddr, pmd, entry);
1278 update_mmu_cache_pmd(vma, address, pmd); 1280 update_mmu_cache_pmd(vma, address, pmd);
@@ -2583,7 +2585,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2583 goto out_nolock; 2585 goto out_nolock;
2584 } 2586 }
2585 2587
2586 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) { 2588 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
2587 result = SCAN_CGROUP_CHARGE_FAIL; 2589 result = SCAN_CGROUP_CHARGE_FAIL;
2588 goto out_nolock; 2590 goto out_nolock;
2589 } 2591 }
@@ -2683,7 +2685,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2683 spin_lock(pmd_ptl); 2685 spin_lock(pmd_ptl);
2684 BUG_ON(!pmd_none(*pmd)); 2686 BUG_ON(!pmd_none(*pmd));
2685 page_add_new_anon_rmap(new_page, vma, address, true); 2687 page_add_new_anon_rmap(new_page, vma, address, true);
2686 mem_cgroup_commit_charge(new_page, memcg, false); 2688 mem_cgroup_commit_charge(new_page, memcg, false, true);
2687 lru_cache_add_active_or_unevictable(new_page, vma); 2689 lru_cache_add_active_or_unevictable(new_page, vma);
2688 pgtable_trans_huge_deposit(mm, pmd, pgtable); 2690 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2689 set_pmd_at(mm, address, pmd, _pmd); 2691 set_pmd_at(mm, address, pmd, _pmd);
@@ -2703,7 +2705,7 @@ out_nolock:
2703 trace_mm_collapse_huge_page(mm, isolated, result); 2705 trace_mm_collapse_huge_page(mm, isolated, result);
2704 return; 2706 return;
2705out: 2707out:
2706 mem_cgroup_cancel_charge(new_page, memcg); 2708 mem_cgroup_cancel_charge(new_page, memcg, true);
2707 goto out_up_write; 2709 goto out_up_write;
2708} 2710}
2709 2711
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 54eae4f19d80..311fd2b71bae 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -647,7 +647,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
647 647
648static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 648static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
649 struct page *page, 649 struct page *page,
650 int nr_pages) 650 bool compound, int nr_pages)
651{ 651{
652 /* 652 /*
653 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 653 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
@@ -660,9 +660,11 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
660 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 660 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
661 nr_pages); 661 nr_pages);
662 662
663 if (PageTransHuge(page)) 663 if (compound) {
664 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
664 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 665 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
665 nr_pages); 666 nr_pages);
667 }
666 668
667 /* pagein of a big page is an event. So, ignore page size */ 669 /* pagein of a big page is an event. So, ignore page size */
668 if (nr_pages > 0) 670 if (nr_pages > 0)
@@ -4513,30 +4515,24 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4513 * from old cgroup. 4515 * from old cgroup.
4514 */ 4516 */
4515static int mem_cgroup_move_account(struct page *page, 4517static int mem_cgroup_move_account(struct page *page,
4516 unsigned int nr_pages, 4518 bool compound,
4517 struct mem_cgroup *from, 4519 struct mem_cgroup *from,
4518 struct mem_cgroup *to) 4520 struct mem_cgroup *to)
4519{ 4521{
4520 unsigned long flags; 4522 unsigned long flags;
4523 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4521 int ret; 4524 int ret;
4522 bool anon; 4525 bool anon;
4523 4526
4524 VM_BUG_ON(from == to); 4527 VM_BUG_ON(from == to);
4525 VM_BUG_ON_PAGE(PageLRU(page), page); 4528 VM_BUG_ON_PAGE(PageLRU(page), page);
4526 /* 4529 VM_BUG_ON(compound && !PageTransHuge(page));
4527 * The page is isolated from LRU. So, collapse function
4528 * will not handle this page. But page splitting can happen.
4529 * Do this check under compound_page_lock(). The caller should
4530 * hold it.
4531 */
4532 ret = -EBUSY;
4533 if (nr_pages > 1 && !PageTransHuge(page))
4534 goto out;
4535 4530
4536 /* 4531 /*
4537 * Prevent mem_cgroup_replace_page() from looking at 4532 * Prevent mem_cgroup_replace_page() from looking at
4538 * page->mem_cgroup of its source page while we change it. 4533 * page->mem_cgroup of its source page while we change it.
4539 */ 4534 */
4535 ret = -EBUSY;
4540 if (!trylock_page(page)) 4536 if (!trylock_page(page))
4541 goto out; 4537 goto out;
4542 4538
@@ -4591,9 +4587,9 @@ static int mem_cgroup_move_account(struct page *page,
4591 ret = 0; 4587 ret = 0;
4592 4588
4593 local_irq_disable(); 4589 local_irq_disable();
4594 mem_cgroup_charge_statistics(to, page, nr_pages); 4590 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4595 memcg_check_events(to, page); 4591 memcg_check_events(to, page);
4596 mem_cgroup_charge_statistics(from, page, -nr_pages); 4592 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4597 memcg_check_events(from, page); 4593 memcg_check_events(from, page);
4598 local_irq_enable(); 4594 local_irq_enable();
4599out_unlock: 4595out_unlock:
@@ -4890,7 +4886,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4890 if (target_type == MC_TARGET_PAGE) { 4886 if (target_type == MC_TARGET_PAGE) {
4891 page = target.page; 4887 page = target.page;
4892 if (!isolate_lru_page(page)) { 4888 if (!isolate_lru_page(page)) {
4893 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 4889 if (!mem_cgroup_move_account(page, true,
4894 mc.from, mc.to)) { 4890 mc.from, mc.to)) {
4895 mc.precharge -= HPAGE_PMD_NR; 4891 mc.precharge -= HPAGE_PMD_NR;
4896 mc.moved_charge += HPAGE_PMD_NR; 4892 mc.moved_charge += HPAGE_PMD_NR;
@@ -4919,7 +4915,8 @@ retry:
4919 page = target.page; 4915 page = target.page;
4920 if (isolate_lru_page(page)) 4916 if (isolate_lru_page(page))
4921 goto put; 4917 goto put;
4922 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { 4918 if (!mem_cgroup_move_account(page, false,
4919 mc.from, mc.to)) {
4923 mc.precharge--; 4920 mc.precharge--;
4924 /* we uncharge from mc.from later. */ 4921 /* we uncharge from mc.from later. */
4925 mc.moved_charge++; 4922 mc.moved_charge++;
@@ -5258,10 +5255,11 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5258 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5255 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5259 */ 5256 */
5260int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5257int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5261 gfp_t gfp_mask, struct mem_cgroup **memcgp) 5258 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5259 bool compound)
5262{ 5260{
5263 struct mem_cgroup *memcg = NULL; 5261 struct mem_cgroup *memcg = NULL;
5264 unsigned int nr_pages = 1; 5262 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5265 int ret = 0; 5263 int ret = 0;
5266 5264
5267 if (mem_cgroup_disabled()) 5265 if (mem_cgroup_disabled())
@@ -5291,11 +5289,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5291 } 5289 }
5292 } 5290 }
5293 5291
5294 if (PageTransHuge(page)) {
5295 nr_pages <<= compound_order(page);
5296 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5297 }
5298
5299 if (!memcg) 5292 if (!memcg)
5300 memcg = get_mem_cgroup_from_mm(mm); 5293 memcg = get_mem_cgroup_from_mm(mm);
5301 5294
@@ -5324,9 +5317,9 @@ out:
5324 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5317 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5325 */ 5318 */
5326void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5319void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5327 bool lrucare) 5320 bool lrucare, bool compound)
5328{ 5321{
5329 unsigned int nr_pages = 1; 5322 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5330 5323
5331 VM_BUG_ON_PAGE(!page->mapping, page); 5324 VM_BUG_ON_PAGE(!page->mapping, page);
5332 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5325 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
@@ -5343,13 +5336,8 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5343 5336
5344 commit_charge(page, memcg, lrucare); 5337 commit_charge(page, memcg, lrucare);
5345 5338
5346 if (PageTransHuge(page)) {
5347 nr_pages <<= compound_order(page);
5348 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5349 }
5350
5351 local_irq_disable(); 5339 local_irq_disable();
5352 mem_cgroup_charge_statistics(memcg, page, nr_pages); 5340 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5353 memcg_check_events(memcg, page); 5341 memcg_check_events(memcg, page);
5354 local_irq_enable(); 5342 local_irq_enable();
5355 5343
@@ -5371,9 +5359,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5371 * 5359 *
5372 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5360 * Cancel a charge transaction started by mem_cgroup_try_charge().
5373 */ 5361 */
5374void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) 5362void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5363 bool compound)
5375{ 5364{
5376 unsigned int nr_pages = 1; 5365 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5377 5366
5378 if (mem_cgroup_disabled()) 5367 if (mem_cgroup_disabled())
5379 return; 5368 return;
@@ -5385,11 +5374,6 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5385 if (!memcg) 5374 if (!memcg)
5386 return; 5375 return;
5387 5376
5388 if (PageTransHuge(page)) {
5389 nr_pages <<= compound_order(page);
5390 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5391 }
5392
5393 cancel_charge(memcg, nr_pages); 5377 cancel_charge(memcg, nr_pages);
5394} 5378}
5395 5379
@@ -5750,7 +5734,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5750 * only synchronisation we have for udpating the per-CPU variables. 5734 * only synchronisation we have for udpating the per-CPU variables.
5751 */ 5735 */
5752 VM_BUG_ON(!irqs_disabled()); 5736 VM_BUG_ON(!irqs_disabled());
5753 mem_cgroup_charge_statistics(memcg, page, -1); 5737 mem_cgroup_charge_statistics(memcg, page, false, -1);
5754 memcg_check_events(memcg, page); 5738 memcg_check_events(memcg, page);
5755} 5739}
5756 5740
diff --git a/mm/memory.c b/mm/memory.c
index f964d190ce83..a021c295e88d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2087,7 +2087,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2087 cow_user_page(new_page, old_page, address, vma); 2087 cow_user_page(new_page, old_page, address, vma);
2088 } 2088 }
2089 2089
2090 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) 2090 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
2091 goto oom_free_new; 2091 goto oom_free_new;
2092 2092
2093 __SetPageUptodate(new_page); 2093 __SetPageUptodate(new_page);
@@ -2119,7 +2119,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2119 */ 2119 */
2120 ptep_clear_flush_notify(vma, address, page_table); 2120 ptep_clear_flush_notify(vma, address, page_table);
2121 page_add_new_anon_rmap(new_page, vma, address, false); 2121 page_add_new_anon_rmap(new_page, vma, address, false);
2122 mem_cgroup_commit_charge(new_page, memcg, false); 2122 mem_cgroup_commit_charge(new_page, memcg, false, false);
2123 lru_cache_add_active_or_unevictable(new_page, vma); 2123 lru_cache_add_active_or_unevictable(new_page, vma);
2124 /* 2124 /*
2125 * We call the notify macro here because, when using secondary 2125 * We call the notify macro here because, when using secondary
@@ -2158,7 +2158,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2158 new_page = old_page; 2158 new_page = old_page;
2159 page_copied = 1; 2159 page_copied = 1;
2160 } else { 2160 } else {
2161 mem_cgroup_cancel_charge(new_page, memcg); 2161 mem_cgroup_cancel_charge(new_page, memcg, false);
2162 } 2162 }
2163 2163
2164 if (new_page) 2164 if (new_page)
@@ -2533,7 +2533,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2533 goto out_page; 2533 goto out_page;
2534 } 2534 }
2535 2535
2536 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { 2536 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) {
2537 ret = VM_FAULT_OOM; 2537 ret = VM_FAULT_OOM;
2538 goto out_page; 2538 goto out_page;
2539 } 2539 }
@@ -2575,10 +2575,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2575 set_pte_at(mm, address, page_table, pte); 2575 set_pte_at(mm, address, page_table, pte);
2576 if (page == swapcache) { 2576 if (page == swapcache) {
2577 do_page_add_anon_rmap(page, vma, address, exclusive); 2577 do_page_add_anon_rmap(page, vma, address, exclusive);
2578 mem_cgroup_commit_charge(page, memcg, true); 2578 mem_cgroup_commit_charge(page, memcg, true, false);
2579 } else { /* ksm created a completely new copy */ 2579 } else { /* ksm created a completely new copy */
2580 page_add_new_anon_rmap(page, vma, address, false); 2580 page_add_new_anon_rmap(page, vma, address, false);
2581 mem_cgroup_commit_charge(page, memcg, false); 2581 mem_cgroup_commit_charge(page, memcg, false, false);
2582 lru_cache_add_active_or_unevictable(page, vma); 2582 lru_cache_add_active_or_unevictable(page, vma);
2583 } 2583 }
2584 2584
@@ -2613,7 +2613,7 @@ unlock:
2613out: 2613out:
2614 return ret; 2614 return ret;
2615out_nomap: 2615out_nomap:
2616 mem_cgroup_cancel_charge(page, memcg); 2616 mem_cgroup_cancel_charge(page, memcg, false);
2617 pte_unmap_unlock(page_table, ptl); 2617 pte_unmap_unlock(page_table, ptl);
2618out_page: 2618out_page:
2619 unlock_page(page); 2619 unlock_page(page);
@@ -2707,7 +2707,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2707 if (!page) 2707 if (!page)
2708 goto oom; 2708 goto oom;
2709 2709
2710 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) 2710 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false))
2711 goto oom_free_page; 2711 goto oom_free_page;
2712 2712
2713 /* 2713 /*
@@ -2728,7 +2728,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2728 /* Deliver the page fault to userland, check inside PT lock */ 2728 /* Deliver the page fault to userland, check inside PT lock */
2729 if (userfaultfd_missing(vma)) { 2729 if (userfaultfd_missing(vma)) {
2730 pte_unmap_unlock(page_table, ptl); 2730 pte_unmap_unlock(page_table, ptl);
2731 mem_cgroup_cancel_charge(page, memcg); 2731 mem_cgroup_cancel_charge(page, memcg, false);
2732 page_cache_release(page); 2732 page_cache_release(page);
2733 return handle_userfault(vma, address, flags, 2733 return handle_userfault(vma, address, flags,
2734 VM_UFFD_MISSING); 2734 VM_UFFD_MISSING);
@@ -2736,7 +2736,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2736 2736
2737 inc_mm_counter_fast(mm, MM_ANONPAGES); 2737 inc_mm_counter_fast(mm, MM_ANONPAGES);
2738 page_add_new_anon_rmap(page, vma, address, false); 2738 page_add_new_anon_rmap(page, vma, address, false);
2739 mem_cgroup_commit_charge(page, memcg, false); 2739 mem_cgroup_commit_charge(page, memcg, false, false);
2740 lru_cache_add_active_or_unevictable(page, vma); 2740 lru_cache_add_active_or_unevictable(page, vma);
2741setpte: 2741setpte:
2742 set_pte_at(mm, address, page_table, entry); 2742 set_pte_at(mm, address, page_table, entry);
@@ -2747,7 +2747,7 @@ unlock:
2747 pte_unmap_unlock(page_table, ptl); 2747 pte_unmap_unlock(page_table, ptl);
2748 return 0; 2748 return 0;
2749release: 2749release:
2750 mem_cgroup_cancel_charge(page, memcg); 2750 mem_cgroup_cancel_charge(page, memcg, false);
2751 page_cache_release(page); 2751 page_cache_release(page);
2752 goto unlock; 2752 goto unlock;
2753oom_free_page: 2753oom_free_page:
@@ -3000,7 +3000,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3000 if (!new_page) 3000 if (!new_page)
3001 return VM_FAULT_OOM; 3001 return VM_FAULT_OOM;
3002 3002
3003 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) { 3003 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
3004 page_cache_release(new_page); 3004 page_cache_release(new_page);
3005 return VM_FAULT_OOM; 3005 return VM_FAULT_OOM;
3006 } 3006 }
@@ -3029,7 +3029,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3029 goto uncharge_out; 3029 goto uncharge_out;
3030 } 3030 }
3031 do_set_pte(vma, address, new_page, pte, true, true); 3031 do_set_pte(vma, address, new_page, pte, true, true);
3032 mem_cgroup_commit_charge(new_page, memcg, false); 3032 mem_cgroup_commit_charge(new_page, memcg, false, false);
3033 lru_cache_add_active_or_unevictable(new_page, vma); 3033 lru_cache_add_active_or_unevictable(new_page, vma);
3034 pte_unmap_unlock(pte, ptl); 3034 pte_unmap_unlock(pte, ptl);
3035 if (fault_page) { 3035 if (fault_page) {
@@ -3044,7 +3044,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3044 } 3044 }
3045 return ret; 3045 return ret;
3046uncharge_out: 3046uncharge_out:
3047 mem_cgroup_cancel_charge(new_page, memcg); 3047 mem_cgroup_cancel_charge(new_page, memcg, false);
3048 page_cache_release(new_page); 3048 page_cache_release(new_page);
3049 return ret; 3049 return ret;
3050} 3050}
diff --git a/mm/shmem.c b/mm/shmem.c
index d271932f9ef9..b98e1011858c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -810,7 +810,8 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
810 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 810 * the shmem_swaplist_mutex which might hold up shmem_writepage().
811 * Charged back to the user (not to caller) when swap account is used. 811 * Charged back to the user (not to caller) when swap account is used.
812 */ 812 */
813 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); 813 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
814 false);
814 if (error) 815 if (error)
815 goto out; 816 goto out;
816 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 817 /* No radix_tree_preload: swap entry keeps a place for page in tree */
@@ -833,9 +834,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
833 if (error) { 834 if (error) {
834 if (error != -ENOMEM) 835 if (error != -ENOMEM)
835 error = 0; 836 error = 0;
836 mem_cgroup_cancel_charge(page, memcg); 837 mem_cgroup_cancel_charge(page, memcg, false);
837 } else 838 } else
838 mem_cgroup_commit_charge(page, memcg, true); 839 mem_cgroup_commit_charge(page, memcg, true, false);
839out: 840out:
840 unlock_page(page); 841 unlock_page(page);
841 page_cache_release(page); 842 page_cache_release(page);
@@ -1218,7 +1219,8 @@ repeat:
1218 goto failed; 1219 goto failed;
1219 } 1220 }
1220 1221
1221 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); 1222 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg,
1223 false);
1222 if (!error) { 1224 if (!error) {
1223 error = shmem_add_to_page_cache(page, mapping, index, 1225 error = shmem_add_to_page_cache(page, mapping, index,
1224 swp_to_radix_entry(swap)); 1226 swp_to_radix_entry(swap));
@@ -1235,14 +1237,14 @@ repeat:
1235 * "repeat": reading a hole and writing should succeed. 1237 * "repeat": reading a hole and writing should succeed.
1236 */ 1238 */
1237 if (error) { 1239 if (error) {
1238 mem_cgroup_cancel_charge(page, memcg); 1240 mem_cgroup_cancel_charge(page, memcg, false);
1239 delete_from_swap_cache(page); 1241 delete_from_swap_cache(page);
1240 } 1242 }
1241 } 1243 }
1242 if (error) 1244 if (error)
1243 goto failed; 1245 goto failed;
1244 1246
1245 mem_cgroup_commit_charge(page, memcg, true); 1247 mem_cgroup_commit_charge(page, memcg, true, false);
1246 1248
1247 spin_lock(&info->lock); 1249 spin_lock(&info->lock);
1248 info->swapped--; 1250 info->swapped--;
@@ -1281,7 +1283,8 @@ repeat:
1281 if (sgp == SGP_WRITE) 1283 if (sgp == SGP_WRITE)
1282 __SetPageReferenced(page); 1284 __SetPageReferenced(page);
1283 1285
1284 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); 1286 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg,
1287 false);
1285 if (error) 1288 if (error)
1286 goto decused; 1289 goto decused;
1287 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1290 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
@@ -1291,10 +1294,10 @@ repeat:
1291 radix_tree_preload_end(); 1294 radix_tree_preload_end();
1292 } 1295 }
1293 if (error) { 1296 if (error) {
1294 mem_cgroup_cancel_charge(page, memcg); 1297 mem_cgroup_cancel_charge(page, memcg, false);
1295 goto decused; 1298 goto decused;
1296 } 1299 }
1297 mem_cgroup_commit_charge(page, memcg, false); 1300 mem_cgroup_commit_charge(page, memcg, false, false);
1298 lru_cache_add_anon(page); 1301 lru_cache_add_anon(page);
1299 1302
1300 spin_lock(&info->lock); 1303 spin_lock(&info->lock);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 058e6f0162eb..efe26bb10adb 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1142,14 +1142,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1142 if (unlikely(!page)) 1142 if (unlikely(!page))
1143 return -ENOMEM; 1143 return -ENOMEM;
1144 1144
1145 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { 1145 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1146 &memcg, false)) {
1146 ret = -ENOMEM; 1147 ret = -ENOMEM;
1147 goto out_nolock; 1148 goto out_nolock;
1148 } 1149 }
1149 1150
1150 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1151 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1151 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { 1152 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
1152 mem_cgroup_cancel_charge(page, memcg); 1153 mem_cgroup_cancel_charge(page, memcg, false);
1153 ret = 0; 1154 ret = 0;
1154 goto out; 1155 goto out;
1155 } 1156 }
@@ -1161,10 +1162,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1161 pte_mkold(mk_pte(page, vma->vm_page_prot))); 1162 pte_mkold(mk_pte(page, vma->vm_page_prot)));
1162 if (page == swapcache) { 1163 if (page == swapcache) {
1163 page_add_anon_rmap(page, vma, addr, false); 1164 page_add_anon_rmap(page, vma, addr, false);
1164 mem_cgroup_commit_charge(page, memcg, true); 1165 mem_cgroup_commit_charge(page, memcg, true, false);
1165 } else { /* ksm created a completely new copy */ 1166 } else { /* ksm created a completely new copy */
1166 page_add_new_anon_rmap(page, vma, addr, false); 1167 page_add_new_anon_rmap(page, vma, addr, false);
1167 mem_cgroup_commit_charge(page, memcg, false); 1168 mem_cgroup_commit_charge(page, memcg, false, false);
1168 lru_cache_add_active_or_unevictable(page, vma); 1169 lru_cache_add_active_or_unevictable(page, vma);
1169 } 1170 }
1170 swap_free(entry); 1171 swap_free(entry);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index ae21a1f309c2..806b0c758c5b 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -63,7 +63,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
63 __SetPageUptodate(page); 63 __SetPageUptodate(page);
64 64
65 ret = -ENOMEM; 65 ret = -ENOMEM;
66 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg)) 66 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
67 goto out_release; 67 goto out_release;
68 68
69 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 69 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
@@ -77,7 +77,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
77 77
78 inc_mm_counter(dst_mm, MM_ANONPAGES); 78 inc_mm_counter(dst_mm, MM_ANONPAGES);
79 page_add_new_anon_rmap(page, dst_vma, dst_addr, false); 79 page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
80 mem_cgroup_commit_charge(page, memcg, false); 80 mem_cgroup_commit_charge(page, memcg, false, false);
81 lru_cache_add_active_or_unevictable(page, dst_vma); 81 lru_cache_add_active_or_unevictable(page, dst_vma);
82 82
83 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 83 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
@@ -91,7 +91,7 @@ out:
91 return ret; 91 return ret;
92out_release_uncharge_unlock: 92out_release_uncharge_unlock:
93 pte_unmap_unlock(dst_pte, ptl); 93 pte_unmap_unlock(dst_pte, ptl);
94 mem_cgroup_cancel_charge(page, memcg); 94 mem_cgroup_cancel_charge(page, memcg, false);
95out_release: 95out_release:
96 page_cache_release(page); 96 page_cache_release(page);
97 goto out; 97 goto out;