summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2017-09-08 19:12:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 21:26:46 -0400
commit2916ecc0f9d435d849c98f4da50e453124c87531 (patch)
treee9da172a4df499b85e62a57bce6e6591d26b32cf /mm/migrate.c
parent858b54dabf4363daa3a97b9a722130a8e7cea8c9 (diff)
mm/migrate: new migrate mode MIGRATE_SYNC_NO_COPY
Introduce a new migration mode that allow to offload the copy to a device DMA engine. This changes the workflow of migration and not all address_space migratepage callback can support this. This is intended to be use by migrate_vma() which itself is use for thing like HMM (see include/linux/hmm.h). No additional per-filesystem migratepage testing is needed. I disables MIGRATE_SYNC_NO_COPY in all problematic migratepage() callback and i added comment in those to explain why (part of this patch). The commit message is unclear it should say that any callback that wish to support this new mode need to be aware of the difference in the migration flow from other mode. Some of these callbacks do extra locking while copying (aio, zsmalloc, balloon, ...) and for DMA to be effective you want to copy multiple pages in one DMA operations. But in the problematic case you can not easily hold the extra lock accross multiple call to this callback. Usual flow is: For each page { 1 - lock page 2 - call migratepage() callback 3 - (extra locking in some migratepage() callback) 4 - migrate page state (freeze refcount, update page cache, buffer head, ...) 5 - copy page 6 - (unlock any extra lock of migratepage() callback) 7 - return from migratepage() callback 8 - unlock page } The new mode MIGRATE_SYNC_NO_COPY: 1 - lock multiple pages For each page { 2 - call migratepage() callback 3 - abort in all problematic migratepage() callback 4 - migrate page state (freeze refcount, update page cache, buffer head, ...) } // finished all calls to migratepage() callback 5 - DMA copy multiple pages 6 - unlock all the pages To support MIGRATE_SYNC_NO_COPY in the problematic case we would need a new callback migratepages() (for instance) that deals with multiple pages in one transaction. Because the problematic cases are not important for current usage I did not wanted to complexify this patchset even more for no good reason. Link: http://lkml.kernel.org/r/20170817000548.32038-14-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Nellans <dnellans@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Sherry Cheung <SCheung@nvidia.com> Cc: Subhash Gutti <sgutti@nvidia.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Bob Liu <liubo95@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c52
1 files changed, 40 insertions, 12 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 1088cef6ef8b..71de36cfb673 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -634,15 +634,10 @@ static void copy_huge_page(struct page *dst, struct page *src)
634/* 634/*
635 * Copy the page to its new location 635 * Copy the page to its new location
636 */ 636 */
637void migrate_page_copy(struct page *newpage, struct page *page) 637void migrate_page_states(struct page *newpage, struct page *page)
638{ 638{
639 int cpupid; 639 int cpupid;
640 640
641 if (PageHuge(page) || PageTransHuge(page))
642 copy_huge_page(newpage, page);
643 else
644 copy_highpage(newpage, page);
645
646 if (PageError(page)) 641 if (PageError(page))
647 SetPageError(newpage); 642 SetPageError(newpage);
648 if (PageReferenced(page)) 643 if (PageReferenced(page))
@@ -696,6 +691,17 @@ void migrate_page_copy(struct page *newpage, struct page *page)
696 691
697 mem_cgroup_migrate(page, newpage); 692 mem_cgroup_migrate(page, newpage);
698} 693}
694EXPORT_SYMBOL(migrate_page_states);
695
696void migrate_page_copy(struct page *newpage, struct page *page)
697{
698 if (PageHuge(page) || PageTransHuge(page))
699 copy_huge_page(newpage, page);
700 else
701 copy_highpage(newpage, page);
702
703 migrate_page_states(newpage, page);
704}
699EXPORT_SYMBOL(migrate_page_copy); 705EXPORT_SYMBOL(migrate_page_copy);
700 706
701/************************************************************ 707/************************************************************
@@ -721,7 +727,10 @@ int migrate_page(struct address_space *mapping,
721 if (rc != MIGRATEPAGE_SUCCESS) 727 if (rc != MIGRATEPAGE_SUCCESS)
722 return rc; 728 return rc;
723 729
724 migrate_page_copy(newpage, page); 730 if (mode != MIGRATE_SYNC_NO_COPY)
731 migrate_page_copy(newpage, page);
732 else
733 migrate_page_states(newpage, page);
725 return MIGRATEPAGE_SUCCESS; 734 return MIGRATEPAGE_SUCCESS;
726} 735}
727EXPORT_SYMBOL(migrate_page); 736EXPORT_SYMBOL(migrate_page);
@@ -771,12 +780,15 @@ int buffer_migrate_page(struct address_space *mapping,
771 780
772 SetPagePrivate(newpage); 781 SetPagePrivate(newpage);
773 782
774 migrate_page_copy(newpage, page); 783 if (mode != MIGRATE_SYNC_NO_COPY)
784 migrate_page_copy(newpage, page);
785 else
786 migrate_page_states(newpage, page);
775 787
776 bh = head; 788 bh = head;
777 do { 789 do {
778 unlock_buffer(bh); 790 unlock_buffer(bh);
779 put_bh(bh); 791 put_bh(bh);
780 bh = bh->b_this_page; 792 bh = bh->b_this_page;
781 793
782 } while (bh != head); 794 } while (bh != head);
@@ -835,8 +847,13 @@ static int fallback_migrate_page(struct address_space *mapping,
835{ 847{
836 if (PageDirty(page)) { 848 if (PageDirty(page)) {
837 /* Only writeback pages in full synchronous migration */ 849 /* Only writeback pages in full synchronous migration */
838 if (mode != MIGRATE_SYNC) 850 switch (mode) {
851 case MIGRATE_SYNC:
852 case MIGRATE_SYNC_NO_COPY:
853 break;
854 default:
839 return -EBUSY; 855 return -EBUSY;
856 }
840 return writeout(mapping, page); 857 return writeout(mapping, page);
841 } 858 }
842 859
@@ -973,7 +990,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
973 * the retry loop is too short and in the sync-light case, 990 * the retry loop is too short and in the sync-light case,
974 * the overhead of stalling is too much 991 * the overhead of stalling is too much
975 */ 992 */
976 if (mode != MIGRATE_SYNC) { 993 switch (mode) {
994 case MIGRATE_SYNC:
995 case MIGRATE_SYNC_NO_COPY:
996 break;
997 default:
977 rc = -EBUSY; 998 rc = -EBUSY;
978 goto out_unlock; 999 goto out_unlock;
979 } 1000 }
@@ -1243,8 +1264,15 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1243 return -ENOMEM; 1264 return -ENOMEM;
1244 1265
1245 if (!trylock_page(hpage)) { 1266 if (!trylock_page(hpage)) {
1246 if (!force || mode != MIGRATE_SYNC) 1267 if (!force)
1247 goto out; 1268 goto out;
1269 switch (mode) {
1270 case MIGRATE_SYNC:
1271 case MIGRATE_SYNC_NO_COPY:
1272 break;
1273 default:
1274 goto out;
1275 }
1248 lock_page(hpage); 1276 lock_page(hpage);
1249 } 1277 }
1250 1278