aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-07-25 20:12:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 23:57:11 -0400
commit48f170fb7d7db8789ccc23e051af61f62af5f685 (patch)
treed4dcb91583b7b831a20deed6979be713ad2892b7 /mm/shmem.c
parent27ab700626f048407e9466d389a43c7d3aa45967 (diff)
tmpfs: simplify unuse and writepage
shmem_unuse_inode() and shmem_writepage() contain a little code to cope with pages inserted independently into the filecache, probably by a filesystem stacked on top of tmpfs, then fed to its ->readpage() or ->writepage(). Unionfs was indeed experimenting with working in that way three years ago, but I find no current examples: nowadays the stacking filesystems use vfs interfaces to the lower filesystem. It's now illegal: remove most of that code, adding some WARN_ON_ONCEs. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Erez Zadok <ezk@fsl.cs.sunysb.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c50
1 files changed, 16 insertions, 34 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index bf6e9c11d859..7533574109da 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -972,20 +972,7 @@ found:
972 error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); 972 error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
973 /* which does mem_cgroup_uncharge_cache_page on error */ 973 /* which does mem_cgroup_uncharge_cache_page on error */
974 974
975 if (error == -EEXIST) { 975 if (error != -ENOMEM) {
976 struct page *filepage = find_get_page(mapping, idx);
977 error = 1;
978 if (filepage) {
979 /*
980 * There might be a more uptodate page coming down
981 * from a stacked writepage: forget our swappage if so.
982 */
983 if (PageUptodate(filepage))
984 error = 0;
985 page_cache_release(filepage);
986 }
987 }
988 if (!error) {
989 delete_from_swap_cache(page); 976 delete_from_swap_cache(page);
990 set_page_dirty(page); 977 set_page_dirty(page);
991 info->flags |= SHMEM_PAGEIN; 978 info->flags |= SHMEM_PAGEIN;
@@ -1072,16 +1059,17 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1072 /* 1059 /*
1073 * shmem_backing_dev_info's capabilities prevent regular writeback or 1060 * shmem_backing_dev_info's capabilities prevent regular writeback or
1074 * sync from ever calling shmem_writepage; but a stacking filesystem 1061 * sync from ever calling shmem_writepage; but a stacking filesystem
1075 * may use the ->writepage of its underlying filesystem, in which case 1062 * might use ->writepage of its underlying filesystem, in which case
1076 * tmpfs should write out to swap only in response to memory pressure, 1063 * tmpfs should write out to swap only in response to memory pressure,
1077 * and not for the writeback threads or sync. However, in those cases, 1064 * and not for the writeback threads or sync.
1078 * we do still want to check if there's a redundant swappage to be
1079 * discarded.
1080 */ 1065 */
1081 if (wbc->for_reclaim) 1066 if (!wbc->for_reclaim) {
1082 swap = get_swap_page(); 1067 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1083 else 1068 goto redirty;
1084 swap.val = 0; 1069 }
1070 swap = get_swap_page();
1071 if (!swap.val)
1072 goto redirty;
1085 1073
1086 /* 1074 /*
1087 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1075 * Add inode to shmem_unuse()'s list of swapped-out inodes,
@@ -1092,15 +1080,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1092 * we've taken the spinlock, because shmem_unuse_inode() will 1080 * we've taken the spinlock, because shmem_unuse_inode() will
1093 * prune a !swapped inode from the swaplist under both locks. 1081 * prune a !swapped inode from the swaplist under both locks.
1094 */ 1082 */
1095 if (swap.val) { 1083 mutex_lock(&shmem_swaplist_mutex);
1096 mutex_lock(&shmem_swaplist_mutex); 1084 if (list_empty(&info->swaplist))
1097 if (list_empty(&info->swaplist)) 1085 list_add_tail(&info->swaplist, &shmem_swaplist);
1098 list_add_tail(&info->swaplist, &shmem_swaplist);
1099 }
1100 1086
1101 spin_lock(&info->lock); 1087 spin_lock(&info->lock);
1102 if (swap.val) 1088 mutex_unlock(&shmem_swaplist_mutex);
1103 mutex_unlock(&shmem_swaplist_mutex);
1104 1089
1105 if (index >= info->next_index) { 1090 if (index >= info->next_index) {
1106 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 1091 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
@@ -1108,16 +1093,13 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1108 } 1093 }
1109 entry = shmem_swp_entry(info, index, NULL); 1094 entry = shmem_swp_entry(info, index, NULL);
1110 if (entry->val) { 1095 if (entry->val) {
1111 /* 1096 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1112 * The more uptodate page coming down from a stacked
1113 * writepage should replace our old swappage.
1114 */
1115 free_swap_and_cache(*entry); 1097 free_swap_and_cache(*entry);
1116 shmem_swp_set(info, entry, 0); 1098 shmem_swp_set(info, entry, 0);
1117 } 1099 }
1118 shmem_recalc_inode(inode); 1100 shmem_recalc_inode(inode);
1119 1101
1120 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 1102 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1121 delete_from_page_cache(page); 1103 delete_from_page_cache(page);
1122 shmem_swp_set(info, entry, swap.val); 1104 shmem_swp_set(info, entry, swap.val);
1123 shmem_swp_unmap(entry); 1105 shmem_swp_unmap(entry);