summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorZi Yan <zi.yan@cs.rutgers.edu>2017-09-08 19:10:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 21:26:45 -0400
commit616b8371539a6c487404c3b8fb04078016dab4ba (patch)
tree73e4967eee56e07ba8bb7f22e4eac37811a09343 /mm/migrate.c
parent9c670ea37947a82cb6d4df69139f7e46ed71a0ac (diff)
mm: thp: enable thp migration in generic path
Add thp migration's core code, including conversions between a PMD entry and a swap entry, setting PMD migration entry, removing PMD migration entry, and waiting on PMD migration entries. This patch makes it possible to support thp migration. If you fail to allocate a destination page as a thp, you just split the source thp as we do now, and then enter the normal page migration. If you succeed to allocate destination thp, you enter thp migration. Subsequent patches actually enable thp migration for each caller of page migration by allowing its get_new_page() callback to allocate thps. [zi.yan@cs.rutgers.edu: fix gcc-4.9.0 -Wmissing-braces warning] Link: http://lkml.kernel.org/r/A0ABA698-7486-46C3-B209-E95A9048B22C@cs.rutgers.edu [akpm@linux-foundation.org: fix x86_64 allnoconfig warning] Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Nellans <dnellans@nvidia.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index e84eeb4e4356..bf5366a2176b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -216,6 +216,15 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
216 new = page - pvmw.page->index + 216 new = page - pvmw.page->index +
217 linear_page_index(vma, pvmw.address); 217 linear_page_index(vma, pvmw.address);
218 218
219#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
220 /* PMD-mapped THP migration entry */
221 if (!pvmw.pte) {
222 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
223 remove_migration_pmd(&pvmw, new);
224 continue;
225 }
226#endif
227
219 get_page(new); 228 get_page(new);
220 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 229 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
221 if (pte_swp_soft_dirty(*pvmw.pte)) 230 if (pte_swp_soft_dirty(*pvmw.pte))
@@ -330,6 +339,27 @@ void migration_entry_wait_huge(struct vm_area_struct *vma,
330 __migration_entry_wait(mm, pte, ptl); 339 __migration_entry_wait(mm, pte, ptl);
331} 340}
332 341
342#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
343void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
344{
345 spinlock_t *ptl;
346 struct page *page;
347
348 ptl = pmd_lock(mm, pmd);
349 if (!is_pmd_migration_entry(*pmd))
350 goto unlock;
351 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
352 if (!get_page_unless_zero(page))
353 goto unlock;
354 spin_unlock(ptl);
355 wait_on_page_locked(page);
356 put_page(page);
357 return;
358unlock:
359 spin_unlock(ptl);
360}
361#endif
362
333#ifdef CONFIG_BLOCK 363#ifdef CONFIG_BLOCK
334/* Returns true if all buffers are successfully locked */ 364/* Returns true if all buffers are successfully locked */
335static bool buffer_migrate_lock_buffers(struct buffer_head *head, 365static bool buffer_migrate_lock_buffers(struct buffer_head *head,
@@ -1088,7 +1118,7 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1088 goto out; 1118 goto out;
1089 } 1119 }
1090 1120
1091 if (unlikely(PageTransHuge(page))) { 1121 if (unlikely(PageTransHuge(page) && !PageTransHuge(newpage))) {
1092 lock_page(page); 1122 lock_page(page);
1093 rc = split_huge_page(page); 1123 rc = split_huge_page(page);
1094 unlock_page(page); 1124 unlock_page(page);