summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-07-10 18:48:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 19:32:31 -0400
commitf4e177d12686bf98b5a047b5187121a71ee0dd8c (patch)
treed278c09e2bc610288c37b79747d727e489752597 /mm/migrate.c
parent108a7ac448caff8e35e8c3f92f65faad893e5aca (diff)
mm/migrate.c: stabilise page count when migrating transparent hugepages
When migrating a transparent hugepage, migrate_misplaced_transhuge_page guards itself against a concurrent fastgup of the page by checking that the page count is equal to 2 before and after installing the new pmd. If the page count changes, then the pmd is reverted back to the original entry, however there is a small window where the new (possibly writable) pmd is installed and the underlying page could be written by userspace. Restoring the old pmd could therefore result in loss of data. This patch fixes the problem by freezing the page count whilst updating the page tables, which protects against a concurrent fastgup without the need to restore the old pmd in the failure case (since the page count can no longer change under our feet). Link: http://lkml.kernel.org/r/1497349722-6731-4-git-send-email-will.deacon@arm.com Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Steve Capper <steve.capper@arm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 8935cbe362ce..627671551873 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1916,7 +1916,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1916 int page_lru = page_is_file_cache(page); 1916 int page_lru = page_is_file_cache(page);
1917 unsigned long mmun_start = address & HPAGE_PMD_MASK; 1917 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1918 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 1918 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1919 pmd_t orig_entry;
1920 1919
1921 /* 1920 /*
1922 * Rate-limit the amount of data that is being migrated to a node. 1921 * Rate-limit the amount of data that is being migrated to a node.
@@ -1959,8 +1958,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1959 /* Recheck the target PMD */ 1958 /* Recheck the target PMD */
1960 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1959 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1961 ptl = pmd_lock(mm, pmd); 1960 ptl = pmd_lock(mm, pmd);
1962 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { 1961 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
1963fail_putback:
1964 spin_unlock(ptl); 1962 spin_unlock(ptl);
1965 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1963 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1966 1964
@@ -1982,7 +1980,6 @@ fail_putback:
1982 goto out_unlock; 1980 goto out_unlock;
1983 } 1981 }
1984 1982
1985 orig_entry = *pmd;
1986 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1983 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1987 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1984 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1988 1985
@@ -1999,15 +1996,7 @@ fail_putback:
1999 set_pmd_at(mm, mmun_start, pmd, entry); 1996 set_pmd_at(mm, mmun_start, pmd, entry);
2000 update_mmu_cache_pmd(vma, address, &entry); 1997 update_mmu_cache_pmd(vma, address, &entry);
2001 1998
2002 if (page_count(page) != 2) { 1999 page_ref_unfreeze(page, 2);
2003 set_pmd_at(mm, mmun_start, pmd, orig_entry);
2004 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
2005 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2006 update_mmu_cache_pmd(vma, address, &entry);
2007 page_remove_rmap(new_page, true);
2008 goto fail_putback;
2009 }
2010
2011 mlock_migrate_page(new_page, page); 2000 mlock_migrate_page(new_page, page);
2012 page_remove_rmap(page, true); 2001 page_remove_rmap(page, true);
2013 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); 2002 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);