aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2013-02-22 19:36:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:23 -0500
commit9e16b7fb1d066d38d01fd57c449f2640c5d208cb (patch)
tree9c9cc35eea61bc16a563cfcdd5468b021daef0fa /mm/swapfile.c
parent5117b3b835f288314a2d4e5512bc1747e3a7c8ed (diff)
mm,ksm: swapoff might need to copy
Before establishing that KSM page migration was the cause of my WARN_ON_ONCE(page_mapped(page))s, I suspected that they came from the lack of a ksm_might_need_to_copy() in swapoff's unuse_pte() - which in many respects is equivalent to faulting in a page. In fact I've never caught that as the cause: but in theory it does at least need the KSM_RUN_UNMERGE check in ksm_might_need_to_copy(), to avoid bringing a KSM page back in when it's not supposed to be. I intended to copy how it's done in do_swap_page(), but have a strong aversion to how "swapcache" ends up being used there: rework it with "page != swapcache". Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Petr Holasek <pholasek@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9b51266413cd..c72c648f750c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -874,11 +874,17 @@ unsigned int count_swap_pages(int type, int free)
874static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 874static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
875 unsigned long addr, swp_entry_t entry, struct page *page) 875 unsigned long addr, swp_entry_t entry, struct page *page)
876{ 876{
877 struct page *swapcache;
877 struct mem_cgroup *memcg; 878 struct mem_cgroup *memcg;
878 spinlock_t *ptl; 879 spinlock_t *ptl;
879 pte_t *pte; 880 pte_t *pte;
880 int ret = 1; 881 int ret = 1;
881 882
883 swapcache = page;
884 page = ksm_might_need_to_copy(page, vma, addr);
885 if (unlikely(!page))
886 return -ENOMEM;
887
882 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, 888 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
883 GFP_KERNEL, &memcg)) { 889 GFP_KERNEL, &memcg)) {
884 ret = -ENOMEM; 890 ret = -ENOMEM;
@@ -897,7 +903,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
897 get_page(page); 903 get_page(page);
898 set_pte_at(vma->vm_mm, addr, pte, 904 set_pte_at(vma->vm_mm, addr, pte,
899 pte_mkold(mk_pte(page, vma->vm_page_prot))); 905 pte_mkold(mk_pte(page, vma->vm_page_prot)));
900 page_add_anon_rmap(page, vma, addr); 906 if (page == swapcache)
907 page_add_anon_rmap(page, vma, addr);
908 else /* ksm created a completely new copy */
909 page_add_new_anon_rmap(page, vma, addr);
901 mem_cgroup_commit_charge_swapin(page, memcg); 910 mem_cgroup_commit_charge_swapin(page, memcg);
902 swap_free(entry); 911 swap_free(entry);
903 /* 912 /*
@@ -908,6 +917,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
908out: 917out:
909 pte_unmap_unlock(pte, ptl); 918 pte_unmap_unlock(pte, ptl);
910out_nolock: 919out_nolock:
920 if (page != swapcache) {
921 unlock_page(page);
922 put_page(page);
923 }
911 return ret; 924 return ret;
912} 925}
913 926