aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-03-17 17:18:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit458aa76d132dc1c3c60be0f0db99bcc0ce1767fc (patch)
tree58fcb5267a2f29c553479af3ab43968522e2cf7a
parentbcf6691797f425b301f629bb783b7ff2d0bcfa5a (diff)
mm/thp/migration: switch from flush_tlb_range to flush_pmd_tlb_range
We remove one instace of flush_tlb_range here. That was added by commit f714f4f20e59 ("mm: numa: call MMU notifiers on THP migration"). But the pmdp_huge_clear_flush_notify should have done the require flush for us. Hence remove the extra flush. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/pgtable-generic.c14
3 files changed, 22 insertions, 17 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index c370b261c720..9401f4819891 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -783,6 +783,23 @@ static inline int pmd_clear_huge(pmd_t *pmd)
783} 783}
784#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 784#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
785 785
786#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
787#ifdef CONFIG_TRANSPARENT_HUGEPAGE
788/*
789 * ARCHes with special requirements for evicting THP backing TLB entries can
790 * implement this. Otherwise also, it can help optimize normal TLB flush in
791 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
792 * entire TLB TLB if flush span is greater than a threshold, which will
793 * likely be true for a single huge page. Thus a single thp flush will
794 * invalidate the entire TLB which is not desitable.
795 * e.g. see arch/arc: flush_pmd_tlb_range
796 */
797#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
798#else
799#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
800#endif
801#endif
802
786#endif /* !__ASSEMBLY__ */ 803#endif /* !__ASSEMBLY__ */
787 804
788#ifndef io_remap_pfn_range 805#ifndef io_remap_pfn_range
diff --git a/mm/migrate.c b/mm/migrate.c
index 568284ec75d4..fdaf0818fb30 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1773,7 +1773,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1773 put_page(new_page); 1773 put_page(new_page);
1774 goto out_fail; 1774 goto out_fail;
1775 } 1775 }
1776 1776 /*
1777 * We are not sure a pending tlb flush here is for a huge page
1778 * mapping or not. Hence use the tlb range variant
1779 */
1777 if (mm_tlb_flush_pending(mm)) 1780 if (mm_tlb_flush_pending(mm))
1778 flush_tlb_range(vma, mmun_start, mmun_end); 1781 flush_tlb_range(vma, mmun_start, mmun_end);
1779 1782
@@ -1829,12 +1832,11 @@ fail_putback:
1829 page_add_anon_rmap(new_page, vma, mmun_start, true); 1832 page_add_anon_rmap(new_page, vma, mmun_start, true);
1830 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd); 1833 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
1831 set_pmd_at(mm, mmun_start, pmd, entry); 1834 set_pmd_at(mm, mmun_start, pmd, entry);
1832 flush_tlb_range(vma, mmun_start, mmun_end);
1833 update_mmu_cache_pmd(vma, address, &entry); 1835 update_mmu_cache_pmd(vma, address, &entry);
1834 1836
1835 if (page_count(page) != 2) { 1837 if (page_count(page) != 2) {
1836 set_pmd_at(mm, mmun_start, pmd, orig_entry); 1838 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1837 flush_tlb_range(vma, mmun_start, mmun_end); 1839 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
1838 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); 1840 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
1839 update_mmu_cache_pmd(vma, address, &entry); 1841 update_mmu_cache_pmd(vma, address, &entry);
1840 page_remove_rmap(new_page, true); 1842 page_remove_rmap(new_page, true);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 06a005b979a7..71c5f9109f2a 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -84,20 +84,6 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
84 84
85#ifdef CONFIG_TRANSPARENT_HUGEPAGE 85#ifdef CONFIG_TRANSPARENT_HUGEPAGE
86 86
87#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
88
89/*
90 * ARCHes with special requirements for evicting THP backing TLB entries can
91 * implement this. Otherwise also, it can help optimize normal TLB flush in
92 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
93 * entire TLB if flush span is greater than a threshold, which will
94 * likely be true for a single huge page. Thus a single thp flush will
95 * invalidate the entire TLB which is not desirable.
96 * e.g. see arch/arc: flush_pmd_tlb_range
97 */
98#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
99#endif
100
101#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 87#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
102int pmdp_set_access_flags(struct vm_area_struct *vma, 88int pmdp_set_access_flags(struct vm_area_struct *vma,
103 unsigned long address, pmd_t *pmdp, 89 unsigned long address, pmd_t *pmdp,