diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-23 05:03:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:50 -0400 |
commit | d75a0fcda2cfc71b50e16dc89e0c32c57d427e85 (patch) | |
tree | cc9dda0a0e53e62c859bf7fcafe7b9c9f6de2352 | |
parent | 0697212a411c1dae03c27845f2de2f3adb32c331 (diff) |
[PATCH] Swapless page migration: rip out swap based logic
Rip the page migration logic out.
Remove all code that has to do with swapping during page migration.
This also guts the ability to migrate pages to swap. No one used that so lets
let it go for good.
Page migration should be a bit broken after this patch.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/rmap.h | 1 | ||||
-rw-r--r-- | mm/migrate.c | 75 | ||||
-rw-r--r-- | mm/rmap.c | 38 | ||||
-rw-r--r-- | mm/swapfile.c | 9 |
4 files changed, 3 insertions, 120 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2d4c81a220db..bf97b0900014 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -91,7 +91,6 @@ static inline void page_dup_rmap(struct page *page) | |||
91 | */ | 91 | */ |
92 | int page_referenced(struct page *, int is_locked); | 92 | int page_referenced(struct page *, int is_locked); |
93 | int try_to_unmap(struct page *, int ignore_refs); | 93 | int try_to_unmap(struct page *, int ignore_refs); |
94 | void remove_from_swap(struct page *page); | ||
95 | 94 | ||
96 | /* | 95 | /* |
97 | * Called from mm/filemap_xip.c to unmap empty zero page | 96 | * Called from mm/filemap_xip.c to unmap empty zero page |
diff --git a/mm/migrate.c b/mm/migrate.c index 0a011e421bb4..81721a061d50 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -70,10 +70,6 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist) | |||
70 | */ | 70 | */ |
71 | int migrate_prep(void) | 71 | int migrate_prep(void) |
72 | { | 72 | { |
73 | /* Must have swap device for migration */ | ||
74 | if (nr_swap_pages <= 0) | ||
75 | return -ENODEV; | ||
76 | |||
77 | /* | 73 | /* |
78 | * Clear the LRU lists so pages can be isolated. | 74 | * Clear the LRU lists so pages can be isolated. |
79 | * Note that pages may be moved off the LRU after we have | 75 | * Note that pages may be moved off the LRU after we have |
@@ -246,52 +242,6 @@ out: | |||
246 | } | 242 | } |
247 | 243 | ||
248 | /* | 244 | /* |
249 | * swapout a single page | ||
250 | * page is locked upon entry, unlocked on exit | ||
251 | */ | ||
252 | static int swap_page(struct page *page) | ||
253 | { | ||
254 | struct address_space *mapping = page_mapping(page); | ||
255 | |||
256 | if (page_mapped(page) && mapping) | ||
257 | if (try_to_unmap(page, 1) != SWAP_SUCCESS) | ||
258 | goto unlock_retry; | ||
259 | |||
260 | if (PageDirty(page)) { | ||
261 | /* Page is dirty, try to write it out here */ | ||
262 | switch(pageout(page, mapping)) { | ||
263 | case PAGE_KEEP: | ||
264 | case PAGE_ACTIVATE: | ||
265 | goto unlock_retry; | ||
266 | |||
267 | case PAGE_SUCCESS: | ||
268 | goto retry; | ||
269 | |||
270 | case PAGE_CLEAN: | ||
271 | ; /* try to free the page below */ | ||
272 | } | ||
273 | } | ||
274 | |||
275 | if (PagePrivate(page)) { | ||
276 | if (!try_to_release_page(page, GFP_KERNEL) || | ||
277 | (!mapping && page_count(page) == 1)) | ||
278 | goto unlock_retry; | ||
279 | } | ||
280 | |||
281 | if (remove_mapping(mapping, page)) { | ||
282 | /* Success */ | ||
283 | unlock_page(page); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | unlock_retry: | ||
288 | unlock_page(page); | ||
289 | |||
290 | retry: | ||
291 | return -EAGAIN; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Replace the page in the mapping. | 245 | * Replace the page in the mapping. |
296 | * | 246 | * |
297 | * The number of remaining references must be: | 247 | * The number of remaining references must be: |
@@ -517,8 +467,7 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
517 | * Two lists are passed to this function. The first list | 467 | * Two lists are passed to this function. The first list |
518 | * contains the pages isolated from the LRU to be migrated. | 468 | * contains the pages isolated from the LRU to be migrated. |
519 | * The second list contains new pages that the pages isolated | 469 | * The second list contains new pages that the pages isolated |
520 | * can be moved to. If the second list is NULL then all | 470 | * can be moved to. |
521 | * pages are swapped out. | ||
522 | * | 471 | * |
523 | * The function returns after 10 attempts or if no pages | 472 | * The function returns after 10 attempts or if no pages |
524 | * are movable anymore because to has become empty | 473 | * are movable anymore because to has become empty |
@@ -574,29 +523,11 @@ redo: | |||
574 | * Only wait on writeback if we have already done a pass where | 523 | * Only wait on writeback if we have already done a pass where |
575 | * we we may have triggered writeouts for lots of pages. | 524 | * we we may have triggered writeouts for lots of pages. |
576 | */ | 525 | */ |
577 | if (pass > 0) { | 526 | if (pass > 0) |
578 | wait_on_page_writeback(page); | 527 | wait_on_page_writeback(page); |
579 | } else { | 528 | else |
580 | if (PageWriteback(page)) | 529 | if (PageWriteback(page)) |
581 | goto unlock_page; | 530 | goto unlock_page; |
582 | } | ||
583 | |||
584 | /* | ||
585 | * Anonymous pages must have swap cache references otherwise | ||
586 | * the information contained in the page maps cannot be | ||
587 | * preserved. | ||
588 | */ | ||
589 | if (PageAnon(page) && !PageSwapCache(page)) { | ||
590 | if (!add_to_swap(page, GFP_KERNEL)) { | ||
591 | rc = -ENOMEM; | ||
592 | goto unlock_page; | ||
593 | } | ||
594 | } | ||
595 | |||
596 | if (!to) { | ||
597 | rc = swap_page(page); | ||
598 | goto next; | ||
599 | } | ||
600 | 531 | ||
601 | /* | 532 | /* |
602 | * Establish swap ptes for anonymous pages or destroy pte | 533 | * Establish swap ptes for anonymous pages or destroy pte |
@@ -205,44 +205,6 @@ out: | |||
205 | return anon_vma; | 205 | return anon_vma; |
206 | } | 206 | } |
207 | 207 | ||
208 | #ifdef CONFIG_MIGRATION | ||
209 | /* | ||
210 | * Remove an anonymous page from swap replacing the swap pte's | ||
211 | * through real pte's pointing to valid pages and then releasing | ||
212 | * the page from the swap cache. | ||
213 | * | ||
214 | * Must hold page lock on page and mmap_sem of one vma that contains | ||
215 | * the page. | ||
216 | */ | ||
217 | void remove_from_swap(struct page *page) | ||
218 | { | ||
219 | struct anon_vma *anon_vma; | ||
220 | struct vm_area_struct *vma; | ||
221 | unsigned long mapping; | ||
222 | |||
223 | if (!PageSwapCache(page)) | ||
224 | return; | ||
225 | |||
226 | mapping = (unsigned long)page->mapping; | ||
227 | |||
228 | if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) | ||
229 | return; | ||
230 | |||
231 | /* | ||
232 | * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. | ||
233 | */ | ||
234 | anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); | ||
235 | spin_lock(&anon_vma->lock); | ||
236 | |||
237 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) | ||
238 | remove_vma_swap(vma, page); | ||
239 | |||
240 | spin_unlock(&anon_vma->lock); | ||
241 | delete_from_swap_cache(page); | ||
242 | } | ||
243 | EXPORT_SYMBOL(remove_from_swap); | ||
244 | #endif | ||
245 | |||
246 | /* | 208 | /* |
247 | * At what user virtual address is page expected in vma? | 209 | * At what user virtual address is page expected in vma? |
248 | */ | 210 | */ |
diff --git a/mm/swapfile.c b/mm/swapfile.c index e3b1362372c2..fbceed67a075 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -618,15 +618,6 @@ static int unuse_mm(struct mm_struct *mm, | |||
618 | return 0; | 618 | return 0; |
619 | } | 619 | } |
620 | 620 | ||
621 | #ifdef CONFIG_MIGRATION | ||
622 | int remove_vma_swap(struct vm_area_struct *vma, struct page *page) | ||
623 | { | ||
624 | swp_entry_t entry = { .val = page_private(page) }; | ||
625 | |||
626 | return unuse_vma(vma, entry, page); | ||
627 | } | ||
628 | #endif | ||
629 | |||
630 | /* | 621 | /* |
631 | * Scan swap_map from current position to next entry still in use. | 622 | * Scan swap_map from current position to next entry still in use. |
632 | * Recycle to start on reaching the end, returning 0 when empty. | 623 | * Recycle to start on reaching the end, returning 0 when empty. |