summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-08-31 08:46:08 -0400
committerIngo Molnar <mingo@kernel.org>2019-04-03 04:32:40 -0400
commited6a79352cad00e9a49d6e438be40e45107207bf (patch)
treea1ed733ba7eacb57d93e6bb825a24b63769a11c9 /include/asm-generic
parentdea2434c23c102b3e7d320849ec1cfeb432edb60 (diff)
asm-generic/tlb, arch: Provide CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
Move the mmu_gather::page_size things into the generic code instead of PowerPC specific bits. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nick Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/tlb.h32
1 files changed, 19 insertions, 13 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f1594ba8b2de..e75620e41ba4 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -61,7 +61,7 @@
61 * tlb_remove_page() and tlb_remove_page_size() imply the call to 61 * tlb_remove_page() and tlb_remove_page_size() imply the call to
62 * tlb_flush_mmu() when required and has no return value. 62 * tlb_flush_mmu() when required and has no return value.
63 * 63 *
64 * - tlb_remove_check_page_size_change() 64 * - tlb_change_page_size()
65 * 65 *
66 * call before __tlb_remove_page*() to set the current page-size; implies a 66 * call before __tlb_remove_page*() to set the current page-size; implies a
67 * possible tlb_flush_mmu() call. 67 * possible tlb_flush_mmu() call.
@@ -114,6 +114,11 @@
114 * 114 *
115 * Additionally there are a few opt-in features: 115 * Additionally there are a few opt-in features:
116 * 116 *
117 * HAVE_MMU_GATHER_PAGE_SIZE
118 *
119 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
120 * changes the size and provides mmu_gather::page_size to tlb_flush().
121 *
117 * HAVE_RCU_TABLE_FREE 122 * HAVE_RCU_TABLE_FREE
118 * 123 *
119 * This provides tlb_remove_table(), to be used instead of tlb_remove_page() 124 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
@@ -239,11 +244,15 @@ struct mmu_gather {
239 unsigned int cleared_puds : 1; 244 unsigned int cleared_puds : 1;
240 unsigned int cleared_p4ds : 1; 245 unsigned int cleared_p4ds : 1;
241 246
247 unsigned int batch_count;
248
242 struct mmu_gather_batch *active; 249 struct mmu_gather_batch *active;
243 struct mmu_gather_batch local; 250 struct mmu_gather_batch local;
244 struct page *__pages[MMU_GATHER_BUNDLE]; 251 struct page *__pages[MMU_GATHER_BUNDLE];
245 unsigned int batch_count; 252
246 int page_size; 253#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
254 unsigned int page_size;
255#endif
247}; 256};
248 257
249void arch_tlb_gather_mmu(struct mmu_gather *tlb, 258void arch_tlb_gather_mmu(struct mmu_gather *tlb,
@@ -309,21 +318,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
309 return tlb_remove_page_size(tlb, page, PAGE_SIZE); 318 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
310} 319}
311 320
312#ifndef tlb_remove_check_page_size_change 321static inline void tlb_change_page_size(struct mmu_gather *tlb,
313#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
314static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
315 unsigned int page_size) 322 unsigned int page_size)
316{ 323{
317 /* 324#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
318 * We don't care about page size change, just update 325 if (tlb->page_size && tlb->page_size != page_size) {
319 * mmu_gather page size here so that debug checks 326 if (!tlb->fullmm)
320 * doesn't throw false warning. 327 tlb_flush_mmu(tlb);
321 */ 328 }
322#ifdef CONFIG_DEBUG_VM 329
323 tlb->page_size = page_size; 330 tlb->page_size = page_size;
324#endif 331#endif
325} 332}
326#endif
327 333
328static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) 334static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
329{ 335{