aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2013-02-22 19:36:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:24 -0500
commit56f31801ccdecb420d0d1fd2bf9f337c355214a9 (patch)
tree868442b02e6c83bdd52e4e129b17b90341190b6d /mm
parent9e16b7fb1d066d38d01fd57c449f2640c5d208cb (diff)
mm: cleanup "swapcache" in do_swap_page
I dislike the way in which "swapcache" gets used in do_swap_page(): there is always a page from swapcache there (even if maybe uncached by the time we lock it), but tests are made according to "swapcache". Rework that with "page != swapcache", as has been done in unuse_pte(). Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Petr Holasek <pholasek@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ec8ba011fa7d..705473afc1f4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2954,7 +2954,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2954 unsigned int flags, pte_t orig_pte) 2954 unsigned int flags, pte_t orig_pte)
2955{ 2955{
2956 spinlock_t *ptl; 2956 spinlock_t *ptl;
2957 struct page *page, *swapcache = NULL; 2957 struct page *page, *swapcache;
2958 swp_entry_t entry; 2958 swp_entry_t entry;
2959 pte_t pte; 2959 pte_t pte;
2960 int locked; 2960 int locked;
@@ -3005,9 +3005,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3005 */ 3005 */
3006 ret = VM_FAULT_HWPOISON; 3006 ret = VM_FAULT_HWPOISON;
3007 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 3007 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3008 swapcache = page;
3008 goto out_release; 3009 goto out_release;
3009 } 3010 }
3010 3011
3012 swapcache = page;
3011 locked = lock_page_or_retry(page, mm, flags); 3013 locked = lock_page_or_retry(page, mm, flags);
3012 3014
3013 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 3015 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -3025,16 +3027,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3025 if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) 3027 if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
3026 goto out_page; 3028 goto out_page;
3027 3029
3028 swapcache = page;
3029 page = ksm_might_need_to_copy(page, vma, address); 3030 page = ksm_might_need_to_copy(page, vma, address);
3030 if (unlikely(!page)) { 3031 if (unlikely(!page)) {
3031 ret = VM_FAULT_OOM; 3032 ret = VM_FAULT_OOM;
3032 page = swapcache; 3033 page = swapcache;
3033 swapcache = NULL;
3034 goto out_page; 3034 goto out_page;
3035 } 3035 }
3036 if (page == swapcache)
3037 swapcache = NULL;
3038 3036
3039 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { 3037 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
3040 ret = VM_FAULT_OOM; 3038 ret = VM_FAULT_OOM;
@@ -3078,10 +3076,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3078 } 3076 }
3079 flush_icache_page(vma, page); 3077 flush_icache_page(vma, page);
3080 set_pte_at(mm, address, page_table, pte); 3078 set_pte_at(mm, address, page_table, pte);
3081 if (swapcache) /* ksm created a completely new copy */ 3079 if (page == swapcache)
3082 page_add_new_anon_rmap(page, vma, address);
3083 else
3084 do_page_add_anon_rmap(page, vma, address, exclusive); 3080 do_page_add_anon_rmap(page, vma, address, exclusive);
3081 else /* ksm created a completely new copy */
3082 page_add_new_anon_rmap(page, vma, address);
3085 /* It's better to call commit-charge after rmap is established */ 3083 /* It's better to call commit-charge after rmap is established */
3086 mem_cgroup_commit_charge_swapin(page, ptr); 3084 mem_cgroup_commit_charge_swapin(page, ptr);
3087 3085
@@ -3089,7 +3087,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3089 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 3087 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3090 try_to_free_swap(page); 3088 try_to_free_swap(page);
3091 unlock_page(page); 3089 unlock_page(page);
3092 if (swapcache) { 3090 if (page != swapcache) {
3093 /* 3091 /*
3094 * Hold the lock to avoid the swap entry to be reused 3092 * Hold the lock to avoid the swap entry to be reused
3095 * until we take the PT lock for the pte_same() check 3093 * until we take the PT lock for the pte_same() check
@@ -3122,7 +3120,7 @@ out_page:
3122 unlock_page(page); 3120 unlock_page(page);
3123out_release: 3121out_release:
3124 page_cache_release(page); 3122 page_cache_release(page);
3125 if (swapcache) { 3123 if (page != swapcache) {
3126 unlock_page(swapcache); 3124 unlock_page(swapcache);
3127 page_cache_release(swapcache); 3125 page_cache_release(swapcache);
3128 } 3126 }