aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-14 22:50:19 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-15 00:43:02 -0500
commit4983da07f1e2e8dc81cb9d640fbf35b899cdbdf2 (patch)
tree9c238114f029d1d22bc55f47e8d95ef7335c540d /mm/vmscan.c
parente843e280cbe218fc8387339806d344708dee348a (diff)
[PATCH] page migration: fail if page is in a vma flagged VM_LOCKED
page migration currently simply retries a couple of times if try_to_unmap() fails without inspecting the return code. However, SWAP_FAIL indicates that the page is in a vma that has the VM_LOCKED flag set (if ignore_refs ==1). We can check for that return code and avoid retrying the migration. migrate_page_remove_references() now needs to return a reason why the failure occured. So switch migrate_page_remove_references to use -Exx style error messages. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7ccf763bb30b..4fe7e3aa02e2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage,
700 * the page. 700 * the page.
701 */ 701 */
702 if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) 702 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
703 return 1; 703 return -EAGAIN;
704 704
705 /* 705 /*
706 * Establish swap ptes for anonymous pages or destroy pte 706 * Establish swap ptes for anonymous pages or destroy pte
@@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage,
721 * If the page was not migrated then the PageSwapCache bit 721 * If the page was not migrated then the PageSwapCache bit
722 * is still set and the operation may continue. 722 * is still set and the operation may continue.
723 */ 723 */
724 try_to_unmap(page, 1); 724 if (try_to_unmap(page, 1) == SWAP_FAIL)
725 /* A vma has VM_LOCKED set -> Permanent failure */
726 return -EPERM;
725 727
726 /* 728 /*
727 * Give up if we were unable to remove all mappings. 729 * Give up if we were unable to remove all mappings.
728 */ 730 */
729 if (page_mapcount(page)) 731 if (page_mapcount(page))
730 return 1; 732 return -EAGAIN;
731 733
732 write_lock_irq(&mapping->tree_lock); 734 write_lock_irq(&mapping->tree_lock);
733 735
@@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage,
738 if (!page_mapping(page) || page_count(page) != nr_refs || 740 if (!page_mapping(page) || page_count(page) != nr_refs ||
739 *radix_pointer != page) { 741 *radix_pointer != page) {
740 write_unlock_irq(&mapping->tree_lock); 742 write_unlock_irq(&mapping->tree_lock);
741 return 1; 743 return -EAGAIN;
742 } 744 }
743 745
744 /* 746 /*
@@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy);
813 */ 815 */
814int migrate_page(struct page *newpage, struct page *page) 816int migrate_page(struct page *newpage, struct page *page)
815{ 817{
818 int rc;
819
816 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 820 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
817 821
818 if (migrate_page_remove_references(newpage, page, 2)) 822 rc = migrate_page_remove_references(newpage, page, 2);
819 return -EAGAIN; 823
824 if (rc)
825 return rc;
820 826
821 migrate_page_copy(newpage, page); 827 migrate_page_copy(newpage, page);
822 828