aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-05 01:28:53 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:15 -0500
commit2e0e26c76a35de8f8bec6b2b917518cfeb52888a (patch)
tree1357781bfe035c2401c65b2d0203385a47516bf5
parentcb5f7b9a47963d9238398cd0c2676473e3c6896d (diff)
tmpfs: open a window in shmem_unuse_inode
There are a couple of reasons (patches follow) why it would be good to open a window for sleep in shmem_unuse_inode, between its search for a matching swap entry, and its handling of the entry found. shmem_unuse_inode must then use igrab to hold the inode against deletion in that window, and its corresponding iput might result in deletion: so it had better unlock_page before the iput, and might as well release the page too. Nor is there any need to hold on to shmem_swaplist_mutex once we know we'll leave the loop. So this unwinding moves from try_to_unuse and shmem_unuse into shmem_unuse_inode, in the case when it finds a match. Let try_to_unuse break on error in the shmem_unuse case, as it does in the unuse_mm case: though at this point in the series, no error to break on. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/shmem.c57
-rw-r--r--mm/swapfile.c23
2 files changed, 45 insertions, 35 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 2e03d6031c24..a0126c437105 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -838,10 +838,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
838 if (size > SHMEM_NR_DIRECT) 838 if (size > SHMEM_NR_DIRECT)
839 size = SHMEM_NR_DIRECT; 839 size = SHMEM_NR_DIRECT;
840 offset = shmem_find_swp(entry, ptr, ptr+size); 840 offset = shmem_find_swp(entry, ptr, ptr+size);
841 if (offset >= 0) { 841 if (offset >= 0)
842 shmem_swp_balance_unmap();
843 goto found; 842 goto found;
844 }
845 if (!info->i_indirect) 843 if (!info->i_indirect)
846 goto lost2; 844 goto lost2;
847 845
@@ -879,11 +877,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
879 if (size > ENTRIES_PER_PAGE) 877 if (size > ENTRIES_PER_PAGE)
880 size = ENTRIES_PER_PAGE; 878 size = ENTRIES_PER_PAGE;
881 offset = shmem_find_swp(entry, ptr, ptr+size); 879 offset = shmem_find_swp(entry, ptr, ptr+size);
880 shmem_swp_unmap(ptr);
882 if (offset >= 0) { 881 if (offset >= 0) {
883 shmem_dir_unmap(dir); 882 shmem_dir_unmap(dir);
884 goto found; 883 goto found;
885 } 884 }
886 shmem_swp_unmap(ptr);
887 } 885 }
888 } 886 }
889lost1: 887lost1:
@@ -893,10 +891,25 @@ lost2:
893 return 0; 891 return 0;
894found: 892found:
895 idx += offset; 893 idx += offset;
896 inode = &info->vfs_inode; 894 inode = igrab(&info->vfs_inode);
897 error = add_to_page_cache(page, inode->i_mapping, idx, GFP_ATOMIC); 895 spin_unlock(&info->lock);
896
897 /* move head to start search for next from here */
898 list_move_tail(&shmem_swaplist, &info->swaplist);
899 mutex_unlock(&shmem_swaplist_mutex);
900
901 error = 1;
902 if (!inode)
903 goto out;
904
905 spin_lock(&info->lock);
906 ptr = shmem_swp_entry(info, idx, NULL);
907 if (ptr && ptr->val == entry.val)
908 error = add_to_page_cache(page, inode->i_mapping,
909 idx, GFP_ATOMIC);
898 if (error == -EEXIST) { 910 if (error == -EEXIST) {
899 struct page *filepage = find_get_page(inode->i_mapping, idx); 911 struct page *filepage = find_get_page(inode->i_mapping, idx);
912 error = 1;
900 if (filepage) { 913 if (filepage) {
901 /* 914 /*
902 * There might be a more uptodate page coming down 915 * There might be a more uptodate page coming down
@@ -911,16 +924,18 @@ found:
911 delete_from_swap_cache(page); 924 delete_from_swap_cache(page);
912 set_page_dirty(page); 925 set_page_dirty(page);
913 info->flags |= SHMEM_PAGEIN; 926 info->flags |= SHMEM_PAGEIN;
914 shmem_swp_set(info, ptr + offset, 0); 927 shmem_swp_set(info, ptr, 0);
928 swap_free(entry);
929 error = 1; /* not an error, but entry was found */
915 } 930 }
916 shmem_swp_unmap(ptr); 931 if (ptr)
932 shmem_swp_unmap(ptr);
917 spin_unlock(&info->lock); 933 spin_unlock(&info->lock);
918 /* 934out:
919 * Decrement swap count even when the entry is left behind: 935 unlock_page(page);
920 * try_to_unuse will skip over mms, then reincrement count. 936 page_cache_release(page);
921 */ 937 iput(inode); /* allows for NULL */
922 swap_free(entry); 938 return error;
923 return 1;
924} 939}
925 940
926/* 941/*
@@ -935,18 +950,16 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
935 mutex_lock(&shmem_swaplist_mutex); 950 mutex_lock(&shmem_swaplist_mutex);
936 list_for_each_safe(p, next, &shmem_swaplist) { 951 list_for_each_safe(p, next, &shmem_swaplist) {
937 info = list_entry(p, struct shmem_inode_info, swaplist); 952 info = list_entry(p, struct shmem_inode_info, swaplist);
938 if (!info->swapped) 953 if (info->swapped)
954 found = shmem_unuse_inode(info, entry, page);
955 else
939 list_del_init(&info->swaplist); 956 list_del_init(&info->swaplist);
940 else if (shmem_unuse_inode(info, entry, page)) {
941 /* move head to start search for next from here */
942 list_move_tail(&shmem_swaplist, &info->swaplist);
943 found = 1;
944 break;
945 }
946 cond_resched(); 957 cond_resched();
958 if (found)
959 goto out;
947 } 960 }
948 mutex_unlock(&shmem_swaplist_mutex); 961 mutex_unlock(&shmem_swaplist_mutex);
949 return found; 962out: return found; /* 0 or 1 or -ENOMEM */
950} 963}
951 964
952/* 965/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 14bc4f28a8cc..eade24da9310 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -814,7 +814,7 @@ static int try_to_unuse(unsigned int type)
814 atomic_inc(&new_start_mm->mm_users); 814 atomic_inc(&new_start_mm->mm_users);
815 atomic_inc(&prev_mm->mm_users); 815 atomic_inc(&prev_mm->mm_users);
816 spin_lock(&mmlist_lock); 816 spin_lock(&mmlist_lock);
817 while (*swap_map > 1 && !retval && 817 while (*swap_map > 1 && !retval && !shmem &&
818 (p = p->next) != &start_mm->mmlist) { 818 (p = p->next) != &start_mm->mmlist) {
819 mm = list_entry(p, struct mm_struct, mmlist); 819 mm = list_entry(p, struct mm_struct, mmlist);
820 if (!atomic_inc_not_zero(&mm->mm_users)) 820 if (!atomic_inc_not_zero(&mm->mm_users))
@@ -846,6 +846,13 @@ static int try_to_unuse(unsigned int type)
846 mmput(start_mm); 846 mmput(start_mm);
847 start_mm = new_start_mm; 847 start_mm = new_start_mm;
848 } 848 }
849 if (shmem) {
850 /* page has already been unlocked and released */
851 if (shmem > 0)
852 continue;
853 retval = shmem;
854 break;
855 }
849 if (retval) { 856 if (retval) {
850 unlock_page(page); 857 unlock_page(page);
851 page_cache_release(page); 858 page_cache_release(page);
@@ -884,12 +891,6 @@ static int try_to_unuse(unsigned int type)
884 * read from disk into another page. Splitting into two 891 * read from disk into another page. Splitting into two
885 * pages would be incorrect if swap supported "shared 892 * pages would be incorrect if swap supported "shared
886 * private" pages, but they are handled by tmpfs files. 893 * private" pages, but they are handled by tmpfs files.
887 *
888 * Note shmem_unuse already deleted a swappage from
889 * the swap cache, unless the move to filepage failed:
890 * in which case it left swappage in cache, lowered its
891 * swap count to pass quickly through the loops above,
892 * and now we must reincrement count to try again later.
893 */ 894 */
894 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { 895 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
895 struct writeback_control wbc = { 896 struct writeback_control wbc = {
@@ -900,12 +901,8 @@ static int try_to_unuse(unsigned int type)
900 lock_page(page); 901 lock_page(page);
901 wait_on_page_writeback(page); 902 wait_on_page_writeback(page);
902 } 903 }
903 if (PageSwapCache(page)) { 904 if (PageSwapCache(page))
904 if (shmem) 905 delete_from_swap_cache(page);
905 swap_duplicate(entry);
906 else
907 delete_from_swap_cache(page);
908 }
909 906
910 /* 907 /*
911 * So we could skip searching mms once swap count went 908 * So we could skip searching mms once swap count went