summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-15 19:52:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitd281ee6145183594788ab6d5b55f8d144e69eace (patch)
tree08b535efb69098d6cb9eef938ad63316be2d70b4 /mm/memory.c
parentafd9883f93b6d030682d7072852b50c5a1b17b63 (diff)
rmap: add argument to charge compound page
We're going to allow mapping of individual 4k pages of THP compound page. It means we cannot rely on PageTransHuge() check to decide if map/unmap small page or THP. The patch adds new argument to rmap functions to indicate whether we want to operate on whole compound page or only the small page. [n-horiguchi@ah.jp.nec.com: fix mapcount mismatch in hugepage migration] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f9360dde6967..f964d190ce83 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1118,7 +1118,7 @@ again:
1118 mark_page_accessed(page); 1118 mark_page_accessed(page);
1119 } 1119 }
1120 rss[mm_counter(page)]--; 1120 rss[mm_counter(page)]--;
1121 page_remove_rmap(page); 1121 page_remove_rmap(page, false);
1122 if (unlikely(page_mapcount(page) < 0)) 1122 if (unlikely(page_mapcount(page) < 0))
1123 print_bad_pte(vma, addr, ptent, page); 1123 print_bad_pte(vma, addr, ptent, page);
1124 if (unlikely(!__tlb_remove_page(tlb, page))) { 1124 if (unlikely(!__tlb_remove_page(tlb, page))) {
@@ -2118,7 +2118,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2118 * thread doing COW. 2118 * thread doing COW.
2119 */ 2119 */
2120 ptep_clear_flush_notify(vma, address, page_table); 2120 ptep_clear_flush_notify(vma, address, page_table);
2121 page_add_new_anon_rmap(new_page, vma, address); 2121 page_add_new_anon_rmap(new_page, vma, address, false);
2122 mem_cgroup_commit_charge(new_page, memcg, false); 2122 mem_cgroup_commit_charge(new_page, memcg, false);
2123 lru_cache_add_active_or_unevictable(new_page, vma); 2123 lru_cache_add_active_or_unevictable(new_page, vma);
2124 /* 2124 /*
@@ -2151,7 +2151,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2151 * mapcount is visible. So transitively, TLBs to 2151 * mapcount is visible. So transitively, TLBs to
2152 * old page will be flushed before it can be reused. 2152 * old page will be flushed before it can be reused.
2153 */ 2153 */
2154 page_remove_rmap(old_page); 2154 page_remove_rmap(old_page, false);
2155 } 2155 }
2156 2156
2157 /* Free the old page.. */ 2157 /* Free the old page.. */
@@ -2567,7 +2567,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2567 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2567 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2568 flags &= ~FAULT_FLAG_WRITE; 2568 flags &= ~FAULT_FLAG_WRITE;
2569 ret |= VM_FAULT_WRITE; 2569 ret |= VM_FAULT_WRITE;
2570 exclusive = 1; 2570 exclusive = RMAP_EXCLUSIVE;
2571 } 2571 }
2572 flush_icache_page(vma, page); 2572 flush_icache_page(vma, page);
2573 if (pte_swp_soft_dirty(orig_pte)) 2573 if (pte_swp_soft_dirty(orig_pte))
@@ -2577,7 +2577,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2577 do_page_add_anon_rmap(page, vma, address, exclusive); 2577 do_page_add_anon_rmap(page, vma, address, exclusive);
2578 mem_cgroup_commit_charge(page, memcg, true); 2578 mem_cgroup_commit_charge(page, memcg, true);
2579 } else { /* ksm created a completely new copy */ 2579 } else { /* ksm created a completely new copy */
2580 page_add_new_anon_rmap(page, vma, address); 2580 page_add_new_anon_rmap(page, vma, address, false);
2581 mem_cgroup_commit_charge(page, memcg, false); 2581 mem_cgroup_commit_charge(page, memcg, false);
2582 lru_cache_add_active_or_unevictable(page, vma); 2582 lru_cache_add_active_or_unevictable(page, vma);
2583 } 2583 }
@@ -2735,7 +2735,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2735 } 2735 }
2736 2736
2737 inc_mm_counter_fast(mm, MM_ANONPAGES); 2737 inc_mm_counter_fast(mm, MM_ANONPAGES);
2738 page_add_new_anon_rmap(page, vma, address); 2738 page_add_new_anon_rmap(page, vma, address, false);
2739 mem_cgroup_commit_charge(page, memcg, false); 2739 mem_cgroup_commit_charge(page, memcg, false);
2740 lru_cache_add_active_or_unevictable(page, vma); 2740 lru_cache_add_active_or_unevictable(page, vma);
2741setpte: 2741setpte:
@@ -2824,7 +2824,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2824 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2824 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2825 if (anon) { 2825 if (anon) {
2826 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 2826 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2827 page_add_new_anon_rmap(page, vma, address); 2827 page_add_new_anon_rmap(page, vma, address, false);
2828 } else { 2828 } else {
2829 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 2829 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
2830 page_add_file_rmap(page); 2830 page_add_file_rmap(page);