aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 14bc4f28a8cc..eade24da9310 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -814,7 +814,7 @@ static int try_to_unuse(unsigned int type)
814 atomic_inc(&new_start_mm->mm_users); 814 atomic_inc(&new_start_mm->mm_users);
815 atomic_inc(&prev_mm->mm_users); 815 atomic_inc(&prev_mm->mm_users);
816 spin_lock(&mmlist_lock); 816 spin_lock(&mmlist_lock);
817 while (*swap_map > 1 && !retval && 817 while (*swap_map > 1 && !retval && !shmem &&
818 (p = p->next) != &start_mm->mmlist) { 818 (p = p->next) != &start_mm->mmlist) {
819 mm = list_entry(p, struct mm_struct, mmlist); 819 mm = list_entry(p, struct mm_struct, mmlist);
820 if (!atomic_inc_not_zero(&mm->mm_users)) 820 if (!atomic_inc_not_zero(&mm->mm_users))
@@ -846,6 +846,13 @@ static int try_to_unuse(unsigned int type)
846 mmput(start_mm); 846 mmput(start_mm);
847 start_mm = new_start_mm; 847 start_mm = new_start_mm;
848 } 848 }
849 if (shmem) {
850 /* page has already been unlocked and released */
851 if (shmem > 0)
852 continue;
853 retval = shmem;
854 break;
855 }
849 if (retval) { 856 if (retval) {
850 unlock_page(page); 857 unlock_page(page);
851 page_cache_release(page); 858 page_cache_release(page);
@@ -884,12 +891,6 @@ static int try_to_unuse(unsigned int type)
884 * read from disk into another page. Splitting into two 891 * read from disk into another page. Splitting into two
885 * pages would be incorrect if swap supported "shared 892 * pages would be incorrect if swap supported "shared
886 * private" pages, but they are handled by tmpfs files. 893 * private" pages, but they are handled by tmpfs files.
887 *
888 * Note shmem_unuse already deleted a swappage from
889 * the swap cache, unless the move to filepage failed:
890 * in which case it left swappage in cache, lowered its
891 * swap count to pass quickly through the loops above,
892 * and now we must reincrement count to try again later.
893 */ 894 */
894 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { 895 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
895 struct writeback_control wbc = { 896 struct writeback_control wbc = {
@@ -900,12 +901,8 @@ static int try_to_unuse(unsigned int type)
900 lock_page(page); 901 lock_page(page);
901 wait_on_page_writeback(page); 902 wait_on_page_writeback(page);
902 } 903 }
903 if (PageSwapCache(page)) { 904 if (PageSwapCache(page))
904 if (shmem) 905 delete_from_swap_cache(page);
905 swap_duplicate(entry);
906 else
907 delete_from_swap_cache(page);
908 }
909 906
910 /* 907 /*
911 * So we could skip searching mms once swap count went 908 * So we could skip searching mms once swap count went