aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-02-22 19:32:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:09 -0500
commitaf34770e55fd899c96d8d73bdc04dbc956096650 (patch)
tree91c9bfc79d765a7919412a2ab73a164c96d05706 /mm
parent9b4f98cdac9683ee9cdb28c582a81223f0c10a3f (diff)
mm: reduce rmap overhead for ex-KSM page copies created on swap faults
When ex-KSM pages are faulted from swap cache, the fault handler is not capable of re-establishing anon_vma-spanning KSM pages. In this case, a copy of the page is created instead, just like during a COW break. These freshly made copies are known to be exclusive to the faulting VMA and there is no reason to go look for this page in parent and sibling processes during rmap operations. Use page_add_new_anon_rmap() for these copies. This also puts them on the proper LRU lists and marks them SwapBacked, so we can get rid of doing this ad-hoc in the KSM copy code. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Simon Jeons <simon.jeons@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Satoru Moriya <satoru.moriya@hds.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memory.c5
2 files changed, 4 insertions, 7 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 51573858938d..e1f1f278075f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1590,13 +1590,7 @@ struct page *ksm_does_need_to_copy(struct page *page,
1590 1590
1591 SetPageDirty(new_page); 1591 SetPageDirty(new_page);
1592 __SetPageUptodate(new_page); 1592 __SetPageUptodate(new_page);
1593 SetPageSwapBacked(new_page);
1594 __set_page_locked(new_page); 1593 __set_page_locked(new_page);
1595
1596 if (!mlocked_vma_newpage(vma, new_page))
1597 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
1598 else
1599 add_page_to_unevictable_list(new_page);
1600 } 1594 }
1601 1595
1602 return new_page; 1596 return new_page;
diff --git a/mm/memory.c b/mm/memory.c
index bb1369f7b9b4..0abd07097ec6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3044,7 +3044,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3044 } 3044 }
3045 flush_icache_page(vma, page); 3045 flush_icache_page(vma, page);
3046 set_pte_at(mm, address, page_table, pte); 3046 set_pte_at(mm, address, page_table, pte);
3047 do_page_add_anon_rmap(page, vma, address, exclusive); 3047 if (swapcache) /* ksm created a completely new copy */
3048 page_add_new_anon_rmap(page, vma, address);
3049 else
3050 do_page_add_anon_rmap(page, vma, address, exclusive);
3048 /* It's better to call commit-charge after rmap is established */ 3051 /* It's better to call commit-charge after rmap is established */
3049 mem_cgroup_commit_charge_swapin(page, ptr); 3052 mem_cgroup_commit_charge_swapin(page, ptr);
3050 3053