summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-08-31 08:46:08 -0400
committerIngo Molnar <mingo@kernel.org>2019-04-03 04:32:40 -0400
commited6a79352cad00e9a49d6e438be40e45107207bf (patch)
treea1ed733ba7eacb57d93e6bb825a24b63769a11c9
parentdea2434c23c102b3e7d320849ec1cfeb432edb60 (diff)
asm-generic/tlb, arch: Provide CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
Move the mmu_gather::page_size things into the generic code instead of PowerPC specific bits. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nick Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arm/include/asm/tlb.h3
-rw-r--r--arch/ia64/include/asm/tlb.h3
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/tlb.h17
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/sh/include/asm/tlb.h4
-rw-r--r--arch/um/include/asm/tlb.h4
-rw-r--r--include/asm-generic/tlb.h32
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmu_gather.c5
14 files changed, 39 insertions, 49 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 33687dddd86a..cdc7f3d5d278 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -386,6 +386,9 @@ config HAVE_RCU_TABLE_FREE
386config HAVE_RCU_TABLE_INVALIDATE 386config HAVE_RCU_TABLE_INVALIDATE
387 bool 387 bool
388 388
389config HAVE_MMU_GATHER_PAGE_SIZE
390 bool
391
389config ARCH_HAVE_NMI_SAFE_CMPXCHG 392config ARCH_HAVE_NMI_SAFE_CMPXCHG
390 bool 393 bool
391 394
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f854148c8d7c..d644c3c7c6f3 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -286,8 +286,7 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
286 286
287#define tlb_migrate_finish(mm) do { } while (0) 287#define tlb_migrate_finish(mm) do { } while (0)
288 288
289#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change 289static inline void tlb_change_page_size(struct mmu_gather *tlb,
290static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
291 unsigned int page_size) 290 unsigned int page_size)
292{ 291{
293} 292}
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 516355a774bf..bf8985f5f876 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -282,8 +282,7 @@ do { \
282#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ 282#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
283 tlb_remove_tlb_entry(tlb, ptep, address) 283 tlb_remove_tlb_entry(tlb, ptep, address)
284 284
285#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change 285static inline void tlb_change_page_size(struct mmu_gather *tlb,
286static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
287 unsigned int page_size) 286 unsigned int page_size)
288{ 287{
289} 288}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2d0be82c3061..a7aa4feabc09 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -218,6 +218,7 @@ config PPC
218 select HAVE_PERF_REGS 218 select HAVE_PERF_REGS
219 select HAVE_PERF_USER_STACK_DUMP 219 select HAVE_PERF_USER_STACK_DUMP
220 select HAVE_RCU_TABLE_FREE if SMP 220 select HAVE_RCU_TABLE_FREE if SMP
221 select HAVE_MMU_GATHER_PAGE_SIZE
221 select HAVE_REGS_AND_STACK_ACCESS_API 222 select HAVE_REGS_AND_STACK_ACCESS_API
222 select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN 223 select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
223 select HAVE_SYSCALL_TRACEPOINTS 224 select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e24c67d5ba75..b018e9f9b491 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -27,7 +27,6 @@
27#define tlb_start_vma(tlb, vma) do { } while (0) 27#define tlb_start_vma(tlb, vma) do { } while (0)
28#define tlb_end_vma(tlb, vma) do { } while (0) 28#define tlb_end_vma(tlb, vma) do { } while (0)
29#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry 29#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
30#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
31 30
32extern void tlb_flush(struct mmu_gather *tlb); 31extern void tlb_flush(struct mmu_gather *tlb);
33 32
@@ -46,22 +45,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
46#endif 45#endif
47} 46}
48 47
49static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
50 unsigned int page_size)
51{
52 if (!tlb->page_size)
53 tlb->page_size = page_size;
54 else if (tlb->page_size != page_size) {
55 if (!tlb->fullmm)
56 tlb_flush_mmu(tlb);
57 /*
58 * update the page size after flush for the new
59 * mmu_gather.
60 */
61 tlb->page_size = page_size;
62 }
63}
64
65#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
66static inline int mm_is_core_local(struct mm_struct *mm) 49static inline int mm_is_core_local(struct mm_struct *mm)
67{ 50{
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b31c779cf581..9941a1442a88 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -180,9 +180,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
180#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ 180#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
181 tlb_remove_tlb_entry(tlb, ptep, address) 181 tlb_remove_tlb_entry(tlb, ptep, address)
182 182
183#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change 183static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
184static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
185 unsigned int page_size)
186{ 184{
187} 185}
188 186
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 77abe192fb43..af7c9d891cf8 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -127,9 +127,7 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
127 return tlb_remove_page(tlb, page); 127 return tlb_remove_page(tlb, page);
128} 128}
129 129
130#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change 130static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
131static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
132 unsigned int page_size)
133{ 131{
134} 132}
135 133
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index dce6db147f24..6463f3ab1767 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -146,9 +146,7 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
146#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ 146#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
147 tlb_remove_tlb_entry(tlb, ptep, address) 147 tlb_remove_tlb_entry(tlb, ptep, address)
148 148
149#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change 149static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
150static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
151 unsigned int page_size)
152{ 150{
153} 151}
154 152
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f1594ba8b2de..e75620e41ba4 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -61,7 +61,7 @@
61 * tlb_remove_page() and tlb_remove_page_size() imply the call to 61 * tlb_remove_page() and tlb_remove_page_size() imply the call to
62 * tlb_flush_mmu() when required and has no return value. 62 * tlb_flush_mmu() when required and has no return value.
63 * 63 *
64 * - tlb_remove_check_page_size_change() 64 * - tlb_change_page_size()
65 * 65 *
66 * call before __tlb_remove_page*() to set the current page-size; implies a 66 * call before __tlb_remove_page*() to set the current page-size; implies a
67 * possible tlb_flush_mmu() call. 67 * possible tlb_flush_mmu() call.
@@ -114,6 +114,11 @@
114 * 114 *
115 * Additionally there are a few opt-in features: 115 * Additionally there are a few opt-in features:
116 * 116 *
117 * HAVE_MMU_GATHER_PAGE_SIZE
118 *
119 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
120 * changes the size and provides mmu_gather::page_size to tlb_flush().
121 *
117 * HAVE_RCU_TABLE_FREE 122 * HAVE_RCU_TABLE_FREE
118 * 123 *
119 * This provides tlb_remove_table(), to be used instead of tlb_remove_page() 124 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
@@ -239,11 +244,15 @@ struct mmu_gather {
239 unsigned int cleared_puds : 1; 244 unsigned int cleared_puds : 1;
240 unsigned int cleared_p4ds : 1; 245 unsigned int cleared_p4ds : 1;
241 246
247 unsigned int batch_count;
248
242 struct mmu_gather_batch *active; 249 struct mmu_gather_batch *active;
243 struct mmu_gather_batch local; 250 struct mmu_gather_batch local;
244 struct page *__pages[MMU_GATHER_BUNDLE]; 251 struct page *__pages[MMU_GATHER_BUNDLE];
245 unsigned int batch_count; 252
246 int page_size; 253#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
254 unsigned int page_size;
255#endif
247}; 256};
248 257
249void arch_tlb_gather_mmu(struct mmu_gather *tlb, 258void arch_tlb_gather_mmu(struct mmu_gather *tlb,
@@ -309,21 +318,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
309 return tlb_remove_page_size(tlb, page, PAGE_SIZE); 318 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
310} 319}
311 320
312#ifndef tlb_remove_check_page_size_change 321static inline void tlb_change_page_size(struct mmu_gather *tlb,
313#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
314static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
315 unsigned int page_size) 322 unsigned int page_size)
316{ 323{
317 /* 324#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
318 * We don't care about page size change, just update 325 if (tlb->page_size && tlb->page_size != page_size) {
319 * mmu_gather page size here so that debug checks 326 if (!tlb->fullmm)
320 * doesn't throw false warning. 327 tlb_flush_mmu(tlb);
321 */ 328 }
322#ifdef CONFIG_DEBUG_VM 329
323 tlb->page_size = page_size; 330 tlb->page_size = page_size;
324#endif 331#endif
325} 332}
326#endif
327 333
328static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) 334static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
329{ 335{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 404acdcd0455..76b75112a259 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1641,7 +1641,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1641 struct mm_struct *mm = tlb->mm; 1641 struct mm_struct *mm = tlb->mm;
1642 bool ret = false; 1642 bool ret = false;
1643 1643
1644 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 1644 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1645 1645
1646 ptl = pmd_trans_huge_lock(pmd, vma); 1646 ptl = pmd_trans_huge_lock(pmd, vma);
1647 if (!ptl) 1647 if (!ptl)
@@ -1717,7 +1717,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1717 pmd_t orig_pmd; 1717 pmd_t orig_pmd;
1718 spinlock_t *ptl; 1718 spinlock_t *ptl;
1719 1719
1720 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 1720 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1721 1721
1722 ptl = __pmd_trans_huge_lock(pmd, vma); 1722 ptl = __pmd_trans_huge_lock(pmd, vma);
1723 if (!ptl) 1723 if (!ptl)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 97b1e0290c66..3fc37a626b52 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3353 * This is a hugetlb vma, all the pte entries should point 3353 * This is a hugetlb vma, all the pte entries should point
3354 * to huge page. 3354 * to huge page.
3355 */ 3355 */
3356 tlb_remove_check_page_size_change(tlb, sz); 3356 tlb_change_page_size(tlb, sz);
3357 tlb_start_vma(tlb, vma); 3357 tlb_start_vma(tlb, vma);
3358 3358
3359 /* 3359 /*
diff --git a/mm/madvise.c b/mm/madvise.c
index 21a7881a2db4..bb3a4554d5d5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
328 if (pmd_trans_unstable(pmd)) 328 if (pmd_trans_unstable(pmd))
329 return 0; 329 return 0;
330 330
331 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 331 tlb_change_page_size(tlb, PAGE_SIZE);
332 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 332 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
333 flush_tlb_batched_pending(mm); 333 flush_tlb_batched_pending(mm);
334 arch_enter_lazy_mmu_mode(); 334 arch_enter_lazy_mmu_mode();
diff --git a/mm/memory.c b/mm/memory.c
index ab650c21bccd..1aa5c03566f1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb,
356 * We add page table cache pages with PAGE_SIZE, 356 * We add page table cache pages with PAGE_SIZE,
357 * (see pte_free_tlb()), flush the tlb if we need 357 * (see pte_free_tlb()), flush the tlb if we need
358 */ 358 */
359 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 359 tlb_change_page_size(tlb, PAGE_SIZE);
360 pgd = pgd_offset(tlb->mm, addr); 360 pgd = pgd_offset(tlb->mm, addr);
361 do { 361 do {
362 next = pgd_addr_end(addr, end); 362 next = pgd_addr_end(addr, end);
@@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1046 pte_t *pte; 1046 pte_t *pte;
1047 swp_entry_t entry; 1047 swp_entry_t entry;
1048 1048
1049 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 1049 tlb_change_page_size(tlb, PAGE_SIZE);
1050again: 1050again:
1051 init_rss_vec(rss); 1051 init_rss_vec(rss);
1052 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1052 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index f2f03c655807..14dfc97155e4 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -58,7 +58,9 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
58#ifdef CONFIG_HAVE_RCU_TABLE_FREE 58#ifdef CONFIG_HAVE_RCU_TABLE_FREE
59 tlb->batch = NULL; 59 tlb->batch = NULL;
60#endif 60#endif
61#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
61 tlb->page_size = 0; 62 tlb->page_size = 0;
63#endif
62 64
63 __tlb_reset_range(tlb); 65 __tlb_reset_range(tlb);
64} 66}
@@ -121,7 +123,10 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
121 struct mmu_gather_batch *batch; 123 struct mmu_gather_batch *batch;
122 124
123 VM_BUG_ON(!tlb->end); 125 VM_BUG_ON(!tlb->end);
126
127#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
124 VM_WARN_ON(tlb->page_size != page_size); 128 VM_WARN_ON(tlb->page_size != page_size);
129#endif
125 130
126 batch = tlb->active; 131 batch = tlb->active;
127 /* 132 /*