summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2018-09-18 08:51:50 -0400
committerIngo Molnar <mingo@kernel.org>2019-04-03 04:32:55 -0400
commit952a31c9e6fa963eabf3692f31a769e59f4c8303 (patch)
tree43bfc72419969559a86bcc4f3ab93ad0c3c08e70
parent6137fed0823247e32306bde2b48cac627c24f894 (diff)
asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y
Add the Kconfig option HAVE_MMU_GATHER_NO_GATHER to the generic mmu_gather code. If the option is set the mmu_gather will not track individual pages for delayed page free anymore. A platform that enables the option needs to provide its own implementation of the __tlb_remove_page_size() function to free pages. No change in behavior intended. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: aneesh.kumar@linux.vnet.ibm.com Cc: heiko.carstens@de.ibm.com Cc: linux@armlinux.org.uk Cc: npiggin@gmail.com Link: http://lkml.kernel.org/r/20180918125151.31744-2-schwidefsky@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/Kconfig3
-rw-r--r--include/asm-generic/tlb.h9
-rw-r--r--mm/mmu_gather.c107
3 files changed, 70 insertions, 49 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 04b3e8b94cfe..a826843470ed 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -389,6 +389,9 @@ config HAVE_RCU_TABLE_NO_INVALIDATE
389config HAVE_MMU_GATHER_PAGE_SIZE 389config HAVE_MMU_GATHER_PAGE_SIZE
390 bool 390 bool
391 391
392config HAVE_MMU_GATHER_NO_GATHER
393 bool
394
392config ARCH_HAVE_NMI_SAFE_CMPXCHG 395config ARCH_HAVE_NMI_SAFE_CMPXCHG
393 bool 396 bool
394 397
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 81799e6a4304..af20aa8255cd 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -191,6 +191,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
191 191
192#endif 192#endif
193 193
194#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
194/* 195/*
195 * If we can't allocate a page to make a big batch of page pointers 196 * If we can't allocate a page to make a big batch of page pointers
196 * to work on, then just handle a few from the on-stack structure. 197 * to work on, then just handle a few from the on-stack structure.
@@ -215,6 +216,10 @@ struct mmu_gather_batch {
215 */ 216 */
216#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) 217#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
217 218
219extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
220 int page_size);
221#endif
222
218/* 223/*
219 * struct mmu_gather is an opaque type used by the mm code for passing around 224 * struct mmu_gather is an opaque type used by the mm code for passing around
220 * any data needed by arch specific code for tlb_remove_page. 225 * any data needed by arch specific code for tlb_remove_page.
@@ -261,6 +266,7 @@ struct mmu_gather {
261 266
262 unsigned int batch_count; 267 unsigned int batch_count;
263 268
269#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
264 struct mmu_gather_batch *active; 270 struct mmu_gather_batch *active;
265 struct mmu_gather_batch local; 271 struct mmu_gather_batch local;
266 struct page *__pages[MMU_GATHER_BUNDLE]; 272 struct page *__pages[MMU_GATHER_BUNDLE];
@@ -268,6 +274,7 @@ struct mmu_gather {
268#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE 274#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
269 unsigned int page_size; 275 unsigned int page_size;
270#endif 276#endif
277#endif
271}; 278};
272 279
273void arch_tlb_gather_mmu(struct mmu_gather *tlb, 280void arch_tlb_gather_mmu(struct mmu_gather *tlb,
@@ -276,8 +283,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb);
276void arch_tlb_finish_mmu(struct mmu_gather *tlb, 283void arch_tlb_finish_mmu(struct mmu_gather *tlb,
277 unsigned long start, unsigned long end, bool force); 284 unsigned long start, unsigned long end, bool force);
278void tlb_flush_mmu_free(struct mmu_gather *tlb); 285void tlb_flush_mmu_free(struct mmu_gather *tlb);
279extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
280 int page_size);
281 286
282static inline void __tlb_adjust_range(struct mmu_gather *tlb, 287static inline void __tlb_adjust_range(struct mmu_gather *tlb,
283 unsigned long address, 288 unsigned long address,
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 2a5322d52b0a..ab220edcd7ef 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -13,6 +13,8 @@
13 13
14#ifdef HAVE_GENERIC_MMU_GATHER 14#ifdef HAVE_GENERIC_MMU_GATHER
15 15
16#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
17
16static bool tlb_next_batch(struct mmu_gather *tlb) 18static bool tlb_next_batch(struct mmu_gather *tlb)
17{ 19{
18 struct mmu_gather_batch *batch; 20 struct mmu_gather_batch *batch;
@@ -41,6 +43,56 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
41 return true; 43 return true;
42} 44}
43 45
46static void tlb_batch_pages_flush(struct mmu_gather *tlb)
47{
48 struct mmu_gather_batch *batch;
49
50 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
51 free_pages_and_swap_cache(batch->pages, batch->nr);
52 batch->nr = 0;
53 }
54 tlb->active = &tlb->local;
55}
56
57static void tlb_batch_list_free(struct mmu_gather *tlb)
58{
59 struct mmu_gather_batch *batch, *next;
60
61 for (batch = tlb->local.next; batch; batch = next) {
62 next = batch->next;
63 free_pages((unsigned long)batch, 0);
64 }
65 tlb->local.next = NULL;
66}
67
68bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
69{
70 struct mmu_gather_batch *batch;
71
72 VM_BUG_ON(!tlb->end);
73
74#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
75 VM_WARN_ON(tlb->page_size != page_size);
76#endif
77
78 batch = tlb->active;
79 /*
80 * Add the page and check if we are full. If so
81 * force a flush.
82 */
83 batch->pages[batch->nr++] = page;
84 if (batch->nr == batch->max) {
85 if (!tlb_next_batch(tlb))
86 return true;
87 batch = tlb->active;
88 }
89 VM_BUG_ON_PAGE(batch->nr > batch->max, page);
90
91 return false;
92}
93
94#endif /* HAVE_MMU_GATHER_NO_GATHER */
95
44void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 96void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
45 unsigned long start, unsigned long end) 97 unsigned long start, unsigned long end)
46{ 98{
@@ -48,12 +100,15 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
48 100
49 /* Is it from 0 to ~0? */ 101 /* Is it from 0 to ~0? */
50 tlb->fullmm = !(start | (end+1)); 102 tlb->fullmm = !(start | (end+1));
103
104#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
51 tlb->need_flush_all = 0; 105 tlb->need_flush_all = 0;
52 tlb->local.next = NULL; 106 tlb->local.next = NULL;
53 tlb->local.nr = 0; 107 tlb->local.nr = 0;
54 tlb->local.max = ARRAY_SIZE(tlb->__pages); 108 tlb->local.max = ARRAY_SIZE(tlb->__pages);
55 tlb->active = &tlb->local; 109 tlb->active = &tlb->local;
56 tlb->batch_count = 0; 110 tlb->batch_count = 0;
111#endif
57 112
58#ifdef CONFIG_HAVE_RCU_TABLE_FREE 113#ifdef CONFIG_HAVE_RCU_TABLE_FREE
59 tlb->batch = NULL; 114 tlb->batch = NULL;
@@ -67,16 +122,12 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
67 122
68void tlb_flush_mmu_free(struct mmu_gather *tlb) 123void tlb_flush_mmu_free(struct mmu_gather *tlb)
69{ 124{
70 struct mmu_gather_batch *batch;
71
72#ifdef CONFIG_HAVE_RCU_TABLE_FREE 125#ifdef CONFIG_HAVE_RCU_TABLE_FREE
73 tlb_table_flush(tlb); 126 tlb_table_flush(tlb);
74#endif 127#endif
75 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 128#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
76 free_pages_and_swap_cache(batch->pages, batch->nr); 129 tlb_batch_pages_flush(tlb);
77 batch->nr = 0; 130#endif
78 }
79 tlb->active = &tlb->local;
80} 131}
81 132
82void tlb_flush_mmu(struct mmu_gather *tlb) 133void tlb_flush_mmu(struct mmu_gather *tlb)
@@ -92,8 +143,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
92void arch_tlb_finish_mmu(struct mmu_gather *tlb, 143void arch_tlb_finish_mmu(struct mmu_gather *tlb,
93 unsigned long start, unsigned long end, bool force) 144 unsigned long start, unsigned long end, bool force)
94{ 145{
95 struct mmu_gather_batch *batch, *next;
96
97 if (force) { 146 if (force) {
98 __tlb_reset_range(tlb); 147 __tlb_reset_range(tlb);
99 __tlb_adjust_range(tlb, start, end - start); 148 __tlb_adjust_range(tlb, start, end - start);
@@ -103,45 +152,9 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
103 152
104 /* keep the page table cache within bounds */ 153 /* keep the page table cache within bounds */
105 check_pgt_cache(); 154 check_pgt_cache();
106 155#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
107 for (batch = tlb->local.next; batch; batch = next) { 156 tlb_batch_list_free(tlb);
108 next = batch->next;
109 free_pages((unsigned long)batch, 0);
110 }
111 tlb->local.next = NULL;
112}
113
114/* __tlb_remove_page
115 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
116 * handling the additional races in SMP caused by other CPUs caching valid
117 * mappings in their TLBs. Returns the number of free page slots left.
118 * When out of page slots we must call tlb_flush_mmu().
119 *returns true if the caller should flush.
120 */
121bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
122{
123 struct mmu_gather_batch *batch;
124
125 VM_BUG_ON(!tlb->end);
126
127#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
128 VM_WARN_ON(tlb->page_size != page_size);
129#endif 157#endif
130
131 batch = tlb->active;
132 /*
133 * Add the page and check if we are full. If so
134 * force a flush.
135 */
136 batch->pages[batch->nr++] = page;
137 if (batch->nr == batch->max) {
138 if (!tlb_next_batch(tlb))
139 return true;
140 batch = tlb->active;
141 }
142 VM_BUG_ON_PAGE(batch->nr > batch->max, page);
143
144 return false;
145} 158}
146 159
147#endif /* HAVE_GENERIC_MMU_GATHER */ 160#endif /* HAVE_GENERIC_MMU_GATHER */