aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c38
1 files changed, 31 insertions, 7 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index bb940045fe85..2cabbd5fa5bf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1722,6 +1722,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1722 struct page *new_page = NULL; 1722 struct page *new_page = NULL;
1723 struct mem_cgroup *memcg = NULL; 1723 struct mem_cgroup *memcg = NULL;
1724 int page_lru = page_is_file_cache(page); 1724 int page_lru = page_is_file_cache(page);
1725 pmd_t orig_entry;
1725 1726
1726 /* 1727 /*
1727 * Rate-limit the amount of data that is being migrated to a node. 1728 * Rate-limit the amount of data that is being migrated to a node.
@@ -1756,7 +1757,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1756 1757
1757 /* Recheck the target PMD */ 1758 /* Recheck the target PMD */
1758 ptl = pmd_lock(mm, pmd); 1759 ptl = pmd_lock(mm, pmd);
1759 if (unlikely(!pmd_same(*pmd, entry))) { 1760 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1761fail_putback:
1760 spin_unlock(ptl); 1762 spin_unlock(ptl);
1761 1763
1762 /* Reverse changes made by migrate_page_copy() */ 1764 /* Reverse changes made by migrate_page_copy() */
@@ -1786,16 +1788,34 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1786 */ 1788 */
1787 mem_cgroup_prepare_migration(page, new_page, &memcg); 1789 mem_cgroup_prepare_migration(page, new_page, &memcg);
1788 1790
1791 orig_entry = *pmd;
1789 entry = mk_pmd(new_page, vma->vm_page_prot); 1792 entry = mk_pmd(new_page, vma->vm_page_prot);
1790 entry = pmd_mknonnuma(entry);
1791 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1792 entry = pmd_mkhuge(entry); 1793 entry = pmd_mkhuge(entry);
1794 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1793 1795
1796 /*
1797 * Clear the old entry under pagetable lock and establish the new PTE.
1798 * Any parallel GUP will either observe the old page blocking on the
1799 * page lock, block on the page table lock or observe the new page.
1800 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1801 * guarantee the copy is visible before the pagetable update.
1802 */
1803 flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
1804 page_add_new_anon_rmap(new_page, vma, haddr);
1794 pmdp_clear_flush(vma, haddr, pmd); 1805 pmdp_clear_flush(vma, haddr, pmd);
1795 set_pmd_at(mm, haddr, pmd, entry); 1806 set_pmd_at(mm, haddr, pmd, entry);
1796 page_add_new_anon_rmap(new_page, vma, haddr);
1797 update_mmu_cache_pmd(vma, address, &entry); 1807 update_mmu_cache_pmd(vma, address, &entry);
1808
1809 if (page_count(page) != 2) {
1810 set_pmd_at(mm, haddr, pmd, orig_entry);
1811 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
1812 update_mmu_cache_pmd(vma, address, &entry);
1813 page_remove_rmap(new_page);
1814 goto fail_putback;
1815 }
1816
1798 page_remove_rmap(page); 1817 page_remove_rmap(page);
1818
1799 /* 1819 /*
1800 * Finish the charge transaction under the page table lock to 1820 * Finish the charge transaction under the page table lock to
1801 * prevent split_huge_page() from dividing up the charge 1821 * prevent split_huge_page() from dividing up the charge
@@ -1820,9 +1840,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1820out_fail: 1840out_fail:
1821 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1841 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1822out_dropref: 1842out_dropref:
1823 entry = pmd_mknonnuma(entry); 1843 ptl = pmd_lock(mm, pmd);
1824 set_pmd_at(mm, haddr, pmd, entry); 1844 if (pmd_same(*pmd, entry)) {
1825 update_mmu_cache_pmd(vma, address, &entry); 1845 entry = pmd_mknonnuma(entry);
1846 set_pmd_at(mm, haddr, pmd, entry);
1847 update_mmu_cache_pmd(vma, address, &entry);
1848 }
1849 spin_unlock(ptl);
1826 1850
1827 unlock_page(page); 1851 unlock_page(page);
1828 put_page(page); 1852 put_page(page);