aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-12-18 20:08:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-18 22:04:51 -0500
commitf714f4f20e59ea6eea264a86b9a51fd51b88fc54 (patch)
tree62838c16d35b2be000234942322f42bca868ee46 /mm
parent2b4847e73004c10ae6666c2e27b5c5430aed8698 (diff)
mm: numa: call MMU notifiers on THP migration
MMU notifiers must be called on THP page migration or secondary MMUs will get very confused. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Alex Thorlton <athorlton@sgi.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 2cabbd5fa5bf..be787d506fbb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
36#include <linux/hugetlb_cgroup.h> 36#include <linux/hugetlb_cgroup.h>
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/balloon_compaction.h> 38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h>
39 40
40#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
41 42
@@ -1716,12 +1717,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1716 struct page *page, int node) 1717 struct page *page, int node)
1717{ 1718{
1718 spinlock_t *ptl; 1719 spinlock_t *ptl;
1719 unsigned long haddr = address & HPAGE_PMD_MASK;
1720 pg_data_t *pgdat = NODE_DATA(node); 1720 pg_data_t *pgdat = NODE_DATA(node);
1721 int isolated = 0; 1721 int isolated = 0;
1722 struct page *new_page = NULL; 1722 struct page *new_page = NULL;
1723 struct mem_cgroup *memcg = NULL; 1723 struct mem_cgroup *memcg = NULL;
1724 int page_lru = page_is_file_cache(page); 1724 int page_lru = page_is_file_cache(page);
1725 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1726 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1725 pmd_t orig_entry; 1727 pmd_t orig_entry;
1726 1728
1727 /* 1729 /*
@@ -1756,10 +1758,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1756 WARN_ON(PageLRU(new_page)); 1758 WARN_ON(PageLRU(new_page));
1757 1759
1758 /* Recheck the target PMD */ 1760 /* Recheck the target PMD */
1761 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1759 ptl = pmd_lock(mm, pmd); 1762 ptl = pmd_lock(mm, pmd);
1760 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { 1763 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1761fail_putback: 1764fail_putback:
1762 spin_unlock(ptl); 1765 spin_unlock(ptl);
1766 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1763 1767
1764 /* Reverse changes made by migrate_page_copy() */ 1768 /* Reverse changes made by migrate_page_copy() */
1765 if (TestClearPageActive(new_page)) 1769 if (TestClearPageActive(new_page))
@@ -1800,15 +1804,16 @@ fail_putback:
1800 * The SetPageUptodate on the new page and page_add_new_anon_rmap 1804 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1801 * guarantee the copy is visible before the pagetable update. 1805 * guarantee the copy is visible before the pagetable update.
1802 */ 1806 */
1803 flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 1807 flush_cache_range(vma, mmun_start, mmun_end);
1804 page_add_new_anon_rmap(new_page, vma, haddr); 1808 page_add_new_anon_rmap(new_page, vma, mmun_start);
1805 pmdp_clear_flush(vma, haddr, pmd); 1809 pmdp_clear_flush(vma, mmun_start, pmd);
1806 set_pmd_at(mm, haddr, pmd, entry); 1810 set_pmd_at(mm, mmun_start, pmd, entry);
1811 flush_tlb_range(vma, mmun_start, mmun_end);
1807 update_mmu_cache_pmd(vma, address, &entry); 1812 update_mmu_cache_pmd(vma, address, &entry);
1808 1813
1809 if (page_count(page) != 2) { 1814 if (page_count(page) != 2) {
1810 set_pmd_at(mm, haddr, pmd, orig_entry); 1815 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1811 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 1816 flush_tlb_range(vma, mmun_start, mmun_end);
1812 update_mmu_cache_pmd(vma, address, &entry); 1817 update_mmu_cache_pmd(vma, address, &entry);
1813 page_remove_rmap(new_page); 1818 page_remove_rmap(new_page);
1814 goto fail_putback; 1819 goto fail_putback;
@@ -1823,6 +1828,7 @@ fail_putback:
1823 */ 1828 */
1824 mem_cgroup_end_migration(memcg, page, new_page, true); 1829 mem_cgroup_end_migration(memcg, page, new_page, true);
1825 spin_unlock(ptl); 1830 spin_unlock(ptl);
1831 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1826 1832
1827 unlock_page(new_page); 1833 unlock_page(new_page);
1828 unlock_page(page); 1834 unlock_page(page);
@@ -1843,7 +1849,7 @@ out_dropref:
1843 ptl = pmd_lock(mm, pmd); 1849 ptl = pmd_lock(mm, pmd);
1844 if (pmd_same(*pmd, entry)) { 1850 if (pmd_same(*pmd, entry)) {
1845 entry = pmd_mknonnuma(entry); 1851 entry = pmd_mknonnuma(entry);
1846 set_pmd_at(mm, haddr, pmd, entry); 1852 set_pmd_at(mm, mmun_start, pmd, entry);
1847 update_mmu_cache_pmd(vma, address, &entry); 1853 update_mmu_cache_pmd(vma, address, &entry);
1848 } 1854 }
1849 spin_unlock(ptl); 1855 spin_unlock(ptl);