diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-02-05 01:28:50 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:15 -0500 |
commit | 73b1262fa43a778b1e154deea632cdef5009d6a1 (patch) | |
tree | 409f14e78e23cd999e8c3d1e0945f0fa9b67048f | |
parent | f000944d03a5b74ab3c92b2fcdf0e944cc898065 (diff) |
tmpfs: move swap swizzling into shmem
move_to_swap_cache and move_from_swap_cache functions (which swizzle a page
between tmpfs page cache and swap cache, to avoid page copying) are only used
by shmem.c; and our subsequent fix for unionfs needs different treatments in
the two instances of move_from_swap_cache. Move them from swap_state.c into
their callsites shmem_writepage, shmem_unuse_inode and shmem_getpage, making
add_to_swap_cache externally visible.
shmem.c likes to say set_page_dirty where swap_state.c liked to say
SetPageDirty: respect that diversity, which __set_page_dirty_no_writeback
makes moot (and implies we should lose that "shift page from clean_pages to
dirty_pages list" comment: it's on neither).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 15 | ||||
-rw-r--r-- | mm/shmem.c | 16 | ||||
-rw-r--r-- | mm/swap_state.c | 35 |
3 files changed, 17 insertions, 49 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 16fd1209e9fa..353153ea0bd5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -220,11 +220,9 @@ extern struct address_space swapper_space; | |||
220 | #define total_swapcache_pages swapper_space.nrpages | 220 | #define total_swapcache_pages swapper_space.nrpages |
221 | extern void show_swap_cache_info(void); | 221 | extern void show_swap_cache_info(void); |
222 | extern int add_to_swap(struct page *, gfp_t); | 222 | extern int add_to_swap(struct page *, gfp_t); |
223 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); | ||
223 | extern void __delete_from_swap_cache(struct page *); | 224 | extern void __delete_from_swap_cache(struct page *); |
224 | extern void delete_from_swap_cache(struct page *); | 225 | extern void delete_from_swap_cache(struct page *); |
225 | extern int move_to_swap_cache(struct page *, swp_entry_t); | ||
226 | extern int move_from_swap_cache(struct page *, unsigned long, | ||
227 | struct address_space *); | ||
228 | extern void free_page_and_swap_cache(struct page *); | 226 | extern void free_page_and_swap_cache(struct page *); |
229 | extern void free_pages_and_swap_cache(struct page **, int); | 227 | extern void free_pages_and_swap_cache(struct page **, int); |
230 | extern struct page *lookup_swap_cache(swp_entry_t); | 228 | extern struct page *lookup_swap_cache(swp_entry_t); |
@@ -319,15 +317,10 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp) | |||
319 | 317 | ||
320 | #define can_share_swap_page(p) (page_mapcount(p) == 1) | 318 | #define can_share_swap_page(p) (page_mapcount(p) == 1) |
321 | 319 | ||
322 | static inline int move_to_swap_cache(struct page *page, swp_entry_t entry) | 320 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
321 | gfp_t gfp_mask) | ||
323 | { | 322 | { |
324 | return 1; | 323 | return -1; |
325 | } | ||
326 | |||
327 | static inline int move_from_swap_cache(struct page *page, unsigned long index, | ||
328 | struct address_space *mapping) | ||
329 | { | ||
330 | return 1; | ||
331 | } | 324 | } |
332 | 325 | ||
333 | static inline void __delete_from_swap_cache(struct page *page) | 326 | static inline void __delete_from_swap_cache(struct page *page) |
diff --git a/mm/shmem.c b/mm/shmem.c index 7be94342bf06..e577adf4ae85 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -884,7 +884,9 @@ lost2: | |||
884 | found: | 884 | found: |
885 | idx += offset; | 885 | idx += offset; |
886 | inode = &info->vfs_inode; | 886 | inode = &info->vfs_inode; |
887 | if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { | 887 | if (add_to_page_cache(page, inode->i_mapping, idx, GFP_ATOMIC) == 0) { |
888 | delete_from_swap_cache(page); | ||
889 | set_page_dirty(page); | ||
888 | info->flags |= SHMEM_PAGEIN; | 890 | info->flags |= SHMEM_PAGEIN; |
889 | shmem_swp_set(info, ptr + offset, 0); | 891 | shmem_swp_set(info, ptr + offset, 0); |
890 | } | 892 | } |
@@ -972,7 +974,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
972 | BUG_ON(!entry); | 974 | BUG_ON(!entry); |
973 | BUG_ON(entry->val); | 975 | BUG_ON(entry->val); |
974 | 976 | ||
975 | if (move_to_swap_cache(page, swap) == 0) { | 977 | if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { |
978 | remove_from_page_cache(page); | ||
976 | shmem_swp_set(info, entry, swap.val); | 979 | shmem_swp_set(info, entry, swap.val); |
977 | shmem_swp_unmap(entry); | 980 | shmem_swp_unmap(entry); |
978 | spin_unlock(&info->lock); | 981 | spin_unlock(&info->lock); |
@@ -982,6 +985,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
982 | list_move_tail(&info->swaplist, &shmem_swaplist); | 985 | list_move_tail(&info->swaplist, &shmem_swaplist); |
983 | spin_unlock(&shmem_swaplist_lock); | 986 | spin_unlock(&shmem_swaplist_lock); |
984 | } | 987 | } |
988 | swap_duplicate(swap); | ||
989 | page_cache_release(page); /* pagecache ref */ | ||
990 | set_page_dirty(page); | ||
985 | unlock_page(page); | 991 | unlock_page(page); |
986 | return 0; | 992 | return 0; |
987 | } | 993 | } |
@@ -1217,13 +1223,15 @@ repeat: | |||
1217 | SetPageUptodate(filepage); | 1223 | SetPageUptodate(filepage); |
1218 | set_page_dirty(filepage); | 1224 | set_page_dirty(filepage); |
1219 | swap_free(swap); | 1225 | swap_free(swap); |
1220 | } else if (!(error = move_from_swap_cache( | 1226 | } else if (!(error = add_to_page_cache( |
1221 | swappage, idx, mapping))) { | 1227 | swappage, mapping, idx, GFP_ATOMIC))) { |
1222 | info->flags |= SHMEM_PAGEIN; | 1228 | info->flags |= SHMEM_PAGEIN; |
1223 | shmem_swp_set(info, entry, 0); | 1229 | shmem_swp_set(info, entry, 0); |
1224 | shmem_swp_unmap(entry); | 1230 | shmem_swp_unmap(entry); |
1231 | delete_from_swap_cache(swappage); | ||
1225 | spin_unlock(&info->lock); | 1232 | spin_unlock(&info->lock); |
1226 | filepage = swappage; | 1233 | filepage = swappage; |
1234 | set_page_dirty(filepage); | ||
1227 | swap_free(swap); | 1235 | swap_free(swap); |
1228 | } else { | 1236 | } else { |
1229 | shmem_swp_unmap(entry); | 1237 | shmem_swp_unmap(entry); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index c75eda2c9cc5..65b81c92738f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -67,8 +67,7 @@ void show_swap_cache_info(void) | |||
67 | * add_to_swap_cache resembles add_to_page_cache on swapper_space, | 67 | * add_to_swap_cache resembles add_to_page_cache on swapper_space, |
68 | * but sets SwapCache flag and private instead of mapping and index. | 68 | * but sets SwapCache flag and private instead of mapping and index. |
69 | */ | 69 | */ |
70 | static int add_to_swap_cache(struct page *page, swp_entry_t entry, | 70 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) |
71 | gfp_t gfp_mask) | ||
72 | { | 71 | { |
73 | int error; | 72 | int error; |
74 | 73 | ||
@@ -183,38 +182,6 @@ void delete_from_swap_cache(struct page *page) | |||
183 | page_cache_release(page); | 182 | page_cache_release(page); |
184 | } | 183 | } |
185 | 184 | ||
186 | /* | ||
187 | * Strange swizzling function only for use by shmem_writepage | ||
188 | */ | ||
189 | int move_to_swap_cache(struct page *page, swp_entry_t entry) | ||
190 | { | ||
191 | int err = add_to_swap_cache(page, entry, GFP_ATOMIC); | ||
192 | if (!err) { | ||
193 | remove_from_page_cache(page); | ||
194 | page_cache_release(page); /* pagecache ref */ | ||
195 | if (!swap_duplicate(entry)) | ||
196 | BUG(); | ||
197 | SetPageDirty(page); | ||
198 | } | ||
199 | return err; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Strange swizzling function for shmem_getpage (and shmem_unuse) | ||
204 | */ | ||
205 | int move_from_swap_cache(struct page *page, unsigned long index, | ||
206 | struct address_space *mapping) | ||
207 | { | ||
208 | int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC); | ||
209 | if (!err) { | ||
210 | delete_from_swap_cache(page); | ||
211 | /* shift page from clean_pages to dirty_pages list */ | ||
212 | ClearPageDirty(page); | ||
213 | set_page_dirty(page); | ||
214 | } | ||
215 | return err; | ||
216 | } | ||
217 | |||
218 | /* | 185 | /* |
219 | * If we are the only user, then try to free up the swap cache. | 186 | * If we are the only user, then try to free up the swap cache. |
220 | * | 187 | * |