aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2014-12-12 19:56:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:49 -0500
commit2ebba6b7e1d98724d266ae048d8af4f7ca95cafd (patch)
treee01da37ea8493be9407cdec40e6b3f2b97db05ab /mm/migrate.c
parent5cec38ac866bfb8775638e71a86e4d8cac30caae (diff)
mm: unmapped page migration avoid unmap+remap overhead
Page migration's __unmap_and_move(), and rmap's try_to_unmap(), were created for use on pages almost certainly mapped into userspace. But nowadays compaction often applies them to unmapped page cache pages: which may exacerbate contention on i_mmap_rwsem quite unnecessarily, since try_to_unmap_file() makes no preliminary page_mapped() check. Now check page_mapped() in __unmap_and_move(); and avoid repeating the same overhead in rmap_walk_file() - don't remove_migration_ptes() when we never inserted any. (The PageAnon(page) comment blocks now look even sillier than before, but clean that up on some other occasion. And note in passing that try_to_unmap_one() does not use a migration entry when PageSwapCache, so remove_migration_ptes() will then not update that swap entry to newpage pte: not a big deal, but something else to clean up later.) Davidlohr remarked in "mm,fs: introduce helpers around the i_mmap_mutex" conversion to i_mmap_rwsem, that "The biggest winner of these changes is migration": a part of the reason might be all of that unnecessary taking of i_mmap_mutex in page migration; and it's rather a shame that I didn't get around to sending this patch in before his - this one is much less useful after Davidlohr's conversion to rwsem, but still good. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 01439953abf5..253474c22239 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -746,7 +746,7 @@ static int fallback_migrate_page(struct address_space *mapping,
746 * MIGRATEPAGE_SUCCESS - success 746 * MIGRATEPAGE_SUCCESS - success
747 */ 747 */
748static int move_to_new_page(struct page *newpage, struct page *page, 748static int move_to_new_page(struct page *newpage, struct page *page,
749 int remap_swapcache, enum migrate_mode mode) 749 int page_was_mapped, enum migrate_mode mode)
750{ 750{
751 struct address_space *mapping; 751 struct address_space *mapping;
752 int rc; 752 int rc;
@@ -784,7 +784,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
784 newpage->mapping = NULL; 784 newpage->mapping = NULL;
785 } else { 785 } else {
786 mem_cgroup_migrate(page, newpage, false); 786 mem_cgroup_migrate(page, newpage, false);
787 if (remap_swapcache) 787 if (page_was_mapped)
788 remove_migration_ptes(page, newpage); 788 remove_migration_ptes(page, newpage);
789 page->mapping = NULL; 789 page->mapping = NULL;
790 } 790 }
@@ -798,7 +798,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
798 int force, enum migrate_mode mode) 798 int force, enum migrate_mode mode)
799{ 799{
800 int rc = -EAGAIN; 800 int rc = -EAGAIN;
801 int remap_swapcache = 1; 801 int page_was_mapped = 0;
802 struct anon_vma *anon_vma = NULL; 802 struct anon_vma *anon_vma = NULL;
803 803
804 if (!trylock_page(page)) { 804 if (!trylock_page(page)) {
@@ -870,7 +870,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
870 * migrated but are not remapped when migration 870 * migrated but are not remapped when migration
871 * completes 871 * completes
872 */ 872 */
873 remap_swapcache = 0;
874 } else { 873 } else {
875 goto out_unlock; 874 goto out_unlock;
876 } 875 }
@@ -910,13 +909,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
910 } 909 }
911 910
912 /* Establish migration ptes or remove ptes */ 911 /* Establish migration ptes or remove ptes */
913 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 912 if (page_mapped(page)) {
913 try_to_unmap(page,
914 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
915 page_was_mapped = 1;
916 }
914 917
915skip_unmap: 918skip_unmap:
916 if (!page_mapped(page)) 919 if (!page_mapped(page))
917 rc = move_to_new_page(newpage, page, remap_swapcache, mode); 920 rc = move_to_new_page(newpage, page, page_was_mapped, mode);
918 921
919 if (rc && remap_swapcache) 922 if (rc && page_was_mapped)
920 remove_migration_ptes(page, page); 923 remove_migration_ptes(page, page);
921 924
922 /* Drop an anon_vma reference if we took one */ 925 /* Drop an anon_vma reference if we took one */
@@ -1017,6 +1020,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1017{ 1020{
1018 int rc = 0; 1021 int rc = 0;
1019 int *result = NULL; 1022 int *result = NULL;
1023 int page_was_mapped = 0;
1020 struct page *new_hpage; 1024 struct page *new_hpage;
1021 struct anon_vma *anon_vma = NULL; 1025 struct anon_vma *anon_vma = NULL;
1022 1026
@@ -1047,12 +1051,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1047 if (PageAnon(hpage)) 1051 if (PageAnon(hpage))
1048 anon_vma = page_get_anon_vma(hpage); 1052 anon_vma = page_get_anon_vma(hpage);
1049 1053
1050 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1054 if (page_mapped(hpage)) {
1055 try_to_unmap(hpage,
1056 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1057 page_was_mapped = 1;
1058 }
1051 1059
1052 if (!page_mapped(hpage)) 1060 if (!page_mapped(hpage))
1053 rc = move_to_new_page(new_hpage, hpage, 1, mode); 1061 rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode);
1054 1062
1055 if (rc != MIGRATEPAGE_SUCCESS) 1063 if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
1056 remove_migration_ptes(hpage, hpage); 1064 remove_migration_ptes(hpage, hpage);
1057 1065
1058 if (anon_vma) 1066 if (anon_vma)