aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-01-08 04:00:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:12:42 -0500
commitd0d963281ccb22e6f339bfdd75c6b2e31351929f (patch)
treeddeb3d1cf73675d71b21be8893b16cf5014c1bc5
parentd498471133ff1f9586a06820beaeebc575fe2814 (diff)
[PATCH] SwapMig: Switch error handling in migrate_pages to use -Exx
Use -Exxx instead of numeric return codes and cleanup the code in migrate_pages() using -Exx error codes. Consolidate successful migration handling Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/vmscan.c56
1 files changed, 34 insertions, 22 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5eecb514ccea..bf903b2d198f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -606,10 +606,6 @@ int putback_lru_pages(struct list_head *l)
606/* 606/*
607 * swapout a single page 607 * swapout a single page
608 * page is locked upon entry, unlocked on exit 608 * page is locked upon entry, unlocked on exit
609 *
610 * return codes:
611 * 0 = complete
612 * 1 = retry
613 */ 609 */
614static int swap_page(struct page *page) 610static int swap_page(struct page *page)
615{ 611{
@@ -650,7 +646,7 @@ unlock_retry:
650 unlock_page(page); 646 unlock_page(page);
651 647
652retry: 648retry:
653 return 1; 649 return -EAGAIN;
654} 650}
655/* 651/*
656 * migrate_pages 652 * migrate_pages
@@ -669,6 +665,8 @@ retry:
669 * is only swapping out pages and never touches the second 665 * is only swapping out pages and never touches the second
670 * list. The direct migration patchset 666 * list. The direct migration patchset
671 * extends this function to avoid the use of swap. 667 * extends this function to avoid the use of swap.
668 *
669 * Return: Number of pages not migrated when "to" ran empty.
672 */ 670 */
673int migrate_pages(struct list_head *from, struct list_head *to, 671int migrate_pages(struct list_head *from, struct list_head *to,
674 struct list_head *moved, struct list_head *failed) 672 struct list_head *moved, struct list_head *failed)
@@ -679,6 +677,7 @@ int migrate_pages(struct list_head *from, struct list_head *to,
679 struct page *page; 677 struct page *page;
680 struct page *page2; 678 struct page *page2;
681 int swapwrite = current->flags & PF_SWAPWRITE; 679 int swapwrite = current->flags & PF_SWAPWRITE;
680 int rc;
682 681
683 if (!swapwrite) 682 if (!swapwrite)
684 current->flags |= PF_SWAPWRITE; 683 current->flags |= PF_SWAPWRITE;
@@ -689,22 +688,23 @@ redo:
689 list_for_each_entry_safe(page, page2, from, lru) { 688 list_for_each_entry_safe(page, page2, from, lru) {
690 cond_resched(); 689 cond_resched();
691 690
692 if (page_count(page) == 1) { 691 rc = 0;
692 if (page_count(page) == 1)
693 /* page was freed from under us. So we are done. */ 693 /* page was freed from under us. So we are done. */
694 list_move(&page->lru, moved); 694 goto next;
695 continue; 695
696 }
697 /* 696 /*
698 * Skip locked pages during the first two passes to give the 697 * Skip locked pages during the first two passes to give the
699 * functions holding the lock time to release the page. Later we 698 * functions holding the lock time to release the page. Later we
700 * use lock_page() to have a higher chance of acquiring the 699 * use lock_page() to have a higher chance of acquiring the
701 * lock. 700 * lock.
702 */ 701 */
702 rc = -EAGAIN;
703 if (pass > 2) 703 if (pass > 2)
704 lock_page(page); 704 lock_page(page);
705 else 705 else
706 if (TestSetPageLocked(page)) 706 if (TestSetPageLocked(page))
707 goto retry_later; 707 goto next;
708 708
709 /* 709 /*
710 * Only wait on writeback if we have already done a pass where 710 * Only wait on writeback if we have already done a pass where
@@ -713,18 +713,19 @@ redo:
713 if (pass > 0) { 713 if (pass > 0) {
714 wait_on_page_writeback(page); 714 wait_on_page_writeback(page);
715 } else { 715 } else {
716 if (PageWriteback(page)) { 716 if (PageWriteback(page))
717 unlock_page(page); 717 goto unlock_page;
718 goto retry_later;
719 }
720 } 718 }
721 719
720 /*
721 * Anonymous pages must have swap cache references otherwise
722 * the information contained in the page maps cannot be
723 * preserved.
724 */
722 if (PageAnon(page) && !PageSwapCache(page)) { 725 if (PageAnon(page) && !PageSwapCache(page)) {
723 if (!add_to_swap(page, GFP_KERNEL)) { 726 if (!add_to_swap(page, GFP_KERNEL)) {
724 unlock_page(page); 727 rc = -ENOMEM;
725 list_move(&page->lru, failed); 728 goto unlock_page;
726 nr_failed++;
727 continue;
728 } 729 }
729 } 730 }
730 731
@@ -732,12 +733,23 @@ redo:
732 * Page is properly locked and writeback is complete. 733 * Page is properly locked and writeback is complete.
733 * Try to migrate the page. 734 * Try to migrate the page.
734 */ 735 */
735 if (!swap_page(page)) { 736 rc = swap_page(page);
737 goto next;
738
739unlock_page:
740 unlock_page(page);
741
742next:
743 if (rc == -EAGAIN) {
744 retry++;
745 } else if (rc) {
746 /* Permanent failure */
747 list_move(&page->lru, failed);
748 nr_failed++;
749 } else {
750 /* Success */
736 list_move(&page->lru, moved); 751 list_move(&page->lru, moved);
737 continue;
738 } 752 }
739retry_later:
740 retry++;
741 } 753 }
742 if (retry && pass++ < 10) 754 if (retry && pass++ < 10)
743 goto redo; 755 goto redo;