diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2017-09-08 19:12:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-08 21:26:46 -0400 |
commit | a5430dda8a3a1cdd532e37270e6f36436241b6e7 (patch) | |
tree | 86a2edaf2d6aa11f4e76bcc3b0103cfb0bb2de09 /mm/rmap.c | |
parent | 8c3328f1f36a5efe817ad4e06497af601936a460 (diff) |
mm/migrate: support un-addressable ZONE_DEVICE page in migration
Allow to unmap and restore special swap entry of un-addressable
ZONE_DEVICE memory.
Link: http://lkml.kernel.org/r/20170817000548.32038-17-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Sherry Cheung <SCheung@nvidia.com>
Cc: Subhash Gutti <sgutti@nvidia.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Bob Liu <liubo95@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 26 |
1 files changed, 26 insertions, 0 deletions
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/hugetlb.h> | 63 | #include <linux/hugetlb.h> |
64 | #include <linux/backing-dev.h> | 64 | #include <linux/backing-dev.h> |
65 | #include <linux/page_idle.h> | 65 | #include <linux/page_idle.h> |
66 | #include <linux/memremap.h> | ||
66 | 67 | ||
67 | #include <asm/tlbflush.h> | 68 | #include <asm/tlbflush.h> |
68 | 69 | ||
@@ -1346,6 +1347,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1346 | if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) | 1347 | if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) |
1347 | return true; | 1348 | return true; |
1348 | 1349 | ||
1350 | if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && | ||
1351 | is_zone_device_page(page) && !is_device_private_page(page)) | ||
1352 | return true; | ||
1353 | |||
1349 | if (flags & TTU_SPLIT_HUGE_PMD) { | 1354 | if (flags & TTU_SPLIT_HUGE_PMD) { |
1350 | split_huge_pmd_address(vma, address, | 1355 | split_huge_pmd_address(vma, address, |
1351 | flags & TTU_SPLIT_FREEZE, page); | 1356 | flags & TTU_SPLIT_FREEZE, page); |
@@ -1403,6 +1408,27 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1403 | address = pvmw.address; | 1408 | address = pvmw.address; |
1404 | 1409 | ||
1405 | 1410 | ||
1411 | if (IS_ENABLED(CONFIG_MIGRATION) && | ||
1412 | (flags & TTU_MIGRATION) && | ||
1413 | is_zone_device_page(page)) { | ||
1414 | swp_entry_t entry; | ||
1415 | pte_t swp_pte; | ||
1416 | |||
1417 | pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); | ||
1418 | |||
1419 | /* | ||
1420 | * Store the pfn of the page in a special migration | ||
1421 | * pte. do_swap_page() will wait until the migration | ||
1422 | * pte is removed and then restart fault handling. | ||
1423 | */ | ||
1424 | entry = make_migration_entry(page, 0); | ||
1425 | swp_pte = swp_entry_to_pte(entry); | ||
1426 | if (pte_soft_dirty(pteval)) | ||
1427 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1428 | set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); | ||
1429 | goto discard; | ||
1430 | } | ||
1431 | |||
1406 | if (!(flags & TTU_IGNORE_ACCESS)) { | 1432 | if (!(flags & TTU_IGNORE_ACCESS)) { |
1407 | if (ptep_clear_flush_young_notify(vma, address, | 1433 | if (ptep_clear_flush_young_notify(vma, address, |
1408 | pvmw.pte)) { | 1434 | pvmw.pte)) { |