summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2015-11-05 21:49:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 22:34:48 -0500
commit7db7671f835ccad66db20154ac1274140937d9b7 (patch)
treefce13f03cda6adf11e7be3cdab26ac7eb91a1145 /mm/migrate.c
parent2def7424c9be0069831380823fdb5cf72103b919 (diff)
mm: page migration trylock newpage at same level as oldpage
Clean up page migration a little by moving the trylock of newpage from move_to_new_page() into __unmap_and_move(), where the old page has been locked. Adjust unmap_and_move_huge_page() and balloon_page_migrate() accordingly. But make one kind-of-functional change on the way: whereas trylock of newpage used to BUG() if it failed, now simply return -EAGAIN if so. Cutting out BUG()s is good, right? But, to be honest, this is really to extend the usefulness of the custom put_new_page feature, allowing a pool of new pages to be shared perhaps with racing uses. Use an "else" instead of that "skip_unmap" label. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Rafael Aquini <aquini@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 2f2e2236daf7..6d7774ef0e6c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -727,13 +727,8 @@ static int move_to_new_page(struct page *newpage, struct page *page,
727 struct address_space *mapping; 727 struct address_space *mapping;
728 int rc; 728 int rc;
729 729
730 /* 730 VM_BUG_ON_PAGE(!PageLocked(page), page);
731 * Block others from accessing the page when we get around to 731 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
732 * establishing additional references. We are the only one
733 * holding a reference to the new page at this point.
734 */
735 if (!trylock_page(newpage))
736 BUG();
737 732
738 /* Prepare mapping for the new page.*/ 733 /* Prepare mapping for the new page.*/
739 newpage->index = page->index; 734 newpage->index = page->index;
@@ -774,9 +769,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
774 remove_migration_ptes(page, newpage); 769 remove_migration_ptes(page, newpage);
775 page->mapping = NULL; 770 page->mapping = NULL;
776 } 771 }
777
778 unlock_page(newpage);
779
780 return rc; 772 return rc;
781} 773}
782 774
@@ -861,6 +853,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
861 } 853 }
862 } 854 }
863 855
856 /*
857 * Block others from accessing the new page when we get around to
858 * establishing additional references. We are usually the only one
859 * holding a reference to newpage at this point. We used to have a BUG
860 * here if trylock_page(newpage) fails, but would like to allow for
861 * cases where there might be a race with the previous use of newpage.
862 * This is much like races on refcount of oldpage: just don't BUG().
863 */
864 if (unlikely(!trylock_page(newpage)))
865 goto out_unlock;
866
864 if (unlikely(isolated_balloon_page(page))) { 867 if (unlikely(isolated_balloon_page(page))) {
865 /* 868 /*
866 * A ballooned page does not need any special attention from 869 * A ballooned page does not need any special attention from
@@ -870,7 +873,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
870 * the page migration right away (proteced by page lock). 873 * the page migration right away (proteced by page lock).
871 */ 874 */
872 rc = balloon_page_migrate(newpage, page, mode); 875 rc = balloon_page_migrate(newpage, page, mode);
873 goto out_unlock; 876 goto out_unlock_both;
874 } 877 }
875 878
876 /* 879 /*
@@ -889,30 +892,27 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
889 VM_BUG_ON_PAGE(PageAnon(page), page); 892 VM_BUG_ON_PAGE(PageAnon(page), page);
890 if (page_has_private(page)) { 893 if (page_has_private(page)) {
891 try_to_free_buffers(page); 894 try_to_free_buffers(page);
892 goto out_unlock; 895 goto out_unlock_both;
893 } 896 }
894 goto skip_unmap; 897 } else if (page_mapped(page)) {
895 } 898 /* Establish migration ptes */
896
897 /* Establish migration ptes or remove ptes */
898 if (page_mapped(page)) {
899 try_to_unmap(page, 899 try_to_unmap(page,
900 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 900 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
901 page_was_mapped = 1; 901 page_was_mapped = 1;
902 } 902 }
903 903
904skip_unmap:
905 if (!page_mapped(page)) 904 if (!page_mapped(page))
906 rc = move_to_new_page(newpage, page, page_was_mapped, mode); 905 rc = move_to_new_page(newpage, page, page_was_mapped, mode);
907 906
908 if (rc && page_was_mapped) 907 if (rc && page_was_mapped)
909 remove_migration_ptes(page, page); 908 remove_migration_ptes(page, page);
910 909
910out_unlock_both:
911 unlock_page(newpage);
912out_unlock:
911 /* Drop an anon_vma reference if we took one */ 913 /* Drop an anon_vma reference if we took one */
912 if (anon_vma) 914 if (anon_vma)
913 put_anon_vma(anon_vma); 915 put_anon_vma(anon_vma);
914
915out_unlock:
916 unlock_page(page); 916 unlock_page(page);
917out: 917out:
918 return rc; 918 return rc;
@@ -1056,6 +1056,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1056 if (PageAnon(hpage)) 1056 if (PageAnon(hpage))
1057 anon_vma = page_get_anon_vma(hpage); 1057 anon_vma = page_get_anon_vma(hpage);
1058 1058
1059 if (unlikely(!trylock_page(new_hpage)))
1060 goto put_anon;
1061
1059 if (page_mapped(hpage)) { 1062 if (page_mapped(hpage)) {
1060 try_to_unmap(hpage, 1063 try_to_unmap(hpage,
1061 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1064 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
@@ -1068,6 +1071,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1068 if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped) 1071 if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
1069 remove_migration_ptes(hpage, hpage); 1072 remove_migration_ptes(hpage, hpage);
1070 1073
1074 unlock_page(new_hpage);
1075
1076put_anon:
1071 if (anon_vma) 1077 if (anon_vma)
1072 put_anon_vma(anon_vma); 1078 put_anon_vma(anon_vma);
1073 1079