summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-12-12 19:42:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:07 -0500
commit692a68c1544d6be4ba7c6e929e9c7b2ba0447b91 (patch)
treea8d1ee7cb797cb64a260c229b8ce67926657166a
parent07e326610e5634e5038fce32fff370949eb42101 (diff)
mm: remove the page size change check in tlb_remove_page
Now that we check for page size change early in the loop, we can partially revert e9d55e157034a ("mm: change the interface for __tlb_remove_page"). This simplies the code much, by removing the need to track the last address with which we adjusted the range. We also go back to the older way of filling the mmu_gather array, ie, we add an entry and then check whether the gather batch is full. Link: http://lkml.kernel.org/r/20161026084839.27299-6-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/include/asm/tlb.h13
-rw-r--r--arch/ia64/include/asm/tlb.h16
-rw-r--r--arch/s390/include/asm/tlb.h6
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--include/asm-generic/tlb.h28
-rw-r--r--mm/memory.c21
7 files changed, 15 insertions, 81 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index a9d6de4746ea..3f2eb76243e3 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -213,18 +213,17 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
213 213
214static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 214static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
215{ 215{
216 tlb->pages[tlb->nr++] = page;
217 VM_WARN_ON(tlb->nr > tlb->max);
216 if (tlb->nr == tlb->max) 218 if (tlb->nr == tlb->max)
217 return true; 219 return true;
218 tlb->pages[tlb->nr++] = page;
219 return false; 220 return false;
220} 221}
221 222
222static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 223static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
223{ 224{
224 if (__tlb_remove_page(tlb, page)) { 225 if (__tlb_remove_page(tlb, page))
225 tlb_flush_mmu(tlb); 226 tlb_flush_mmu(tlb);
226 __tlb_remove_page(tlb, page);
227 }
228} 227}
229 228
230static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, 229static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
@@ -233,12 +232,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
233 return __tlb_remove_page(tlb, page); 232 return __tlb_remove_page(tlb, page);
234} 233}
235 234
236static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
237 struct page *page)
238{
239 return __tlb_remove_page(tlb, page);
240}
241
242static inline void tlb_remove_page_size(struct mmu_gather *tlb, 235static inline void tlb_remove_page_size(struct mmu_gather *tlb,
243 struct page *page, int page_size) 236 struct page *page, int page_size)
244{ 237{
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index bfe6295aa746..fced197b9626 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -207,15 +207,15 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
207 */ 207 */
208static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 208static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
209{ 209{
210 if (tlb->nr == tlb->max)
211 return true;
212
213 tlb->need_flush = 1; 210 tlb->need_flush = 1;
214 211
215 if (!tlb->nr && tlb->pages == tlb->local) 212 if (!tlb->nr && tlb->pages == tlb->local)
216 __tlb_alloc_page(tlb); 213 __tlb_alloc_page(tlb);
217 214
218 tlb->pages[tlb->nr++] = page; 215 tlb->pages[tlb->nr++] = page;
216 VM_WARN_ON(tlb->nr > tlb->max);
217 if (tlb->nr == tlb->max)
218 return true;
219 return false; 219 return false;
220} 220}
221 221
@@ -236,10 +236,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
236 236
237static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 237static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
238{ 238{
239 if (__tlb_remove_page(tlb, page)) { 239 if (__tlb_remove_page(tlb, page))
240 tlb_flush_mmu(tlb); 240 tlb_flush_mmu(tlb);
241 __tlb_remove_page(tlb, page);
242 }
243} 241}
244 242
245static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, 243static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
@@ -248,12 +246,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
248 return __tlb_remove_page(tlb, page); 246 return __tlb_remove_page(tlb, page);
249} 247}
250 248
251static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
252 struct page *page)
253{
254 return __tlb_remove_page(tlb, page);
255}
256
257static inline void tlb_remove_page_size(struct mmu_gather *tlb, 249static inline void tlb_remove_page_size(struct mmu_gather *tlb,
258 struct page *page, int page_size) 250 struct page *page, int page_size)
259{ 251{
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 28b159c87c38..853b2a3d8dee 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -104,12 +104,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
104 return __tlb_remove_page(tlb, page); 104 return __tlb_remove_page(tlb, page);
105} 105}
106 106
107static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
108 struct page *page)
109{
110 return __tlb_remove_page(tlb, page);
111}
112
113static inline void tlb_remove_page_size(struct mmu_gather *tlb, 107static inline void tlb_remove_page_size(struct mmu_gather *tlb,
114 struct page *page, int page_size) 108 struct page *page, int page_size)
115{ 109{
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 0f988b3e484b..46e0d635e36f 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -118,12 +118,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
118 return __tlb_remove_page(tlb, page); 118 return __tlb_remove_page(tlb, page);
119} 119}
120 120
121static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
122 struct page *page)
123{
124 return __tlb_remove_page(tlb, page);
125}
126
127static inline void tlb_remove_page_size(struct mmu_gather *tlb, 121static inline void tlb_remove_page_size(struct mmu_gather *tlb,
128 struct page *page, int page_size) 122 struct page *page, int page_size)
129{ 123{
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 8258dd4bb13c..600a2e9bfee2 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -116,12 +116,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
116 return __tlb_remove_page(tlb, page); 116 return __tlb_remove_page(tlb, page);
117} 117}
118 118
119static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
120 struct page *page)
121{
122 return __tlb_remove_page(tlb, page);
123}
124
125static inline void tlb_remove_page_size(struct mmu_gather *tlb, 119static inline void tlb_remove_page_size(struct mmu_gather *tlb,
126 struct page *page, int page_size) 120 struct page *page, int page_size)
127{ 121{
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 256c9de71fdb..7eed8cf3130a 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -107,11 +107,6 @@ struct mmu_gather {
107 struct mmu_gather_batch local; 107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE]; 108 struct page *__pages[MMU_GATHER_BUNDLE];
109 unsigned int batch_count; 109 unsigned int batch_count;
110 /*
111 * __tlb_adjust_range will track the new addr here,
112 * that that we can adjust the range after the flush
113 */
114 unsigned long addr;
115 int page_size; 110 int page_size;
116}; 111};
117 112
@@ -130,12 +125,6 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
130{ 125{
131 tlb->start = min(tlb->start, address); 126 tlb->start = min(tlb->start, address);
132 tlb->end = max(tlb->end, address + range_size); 127 tlb->end = max(tlb->end, address + range_size);
133 /*
134 * Track the last address with which we adjusted the range. This
135 * will be used later to adjust again after a mmu_flush due to
136 * failed __tlb_remove_page
137 */
138 tlb->addr = address;
139} 128}
140 129
141static inline void __tlb_reset_range(struct mmu_gather *tlb) 130static inline void __tlb_reset_range(struct mmu_gather *tlb)
@@ -151,15 +140,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
151static inline void tlb_remove_page_size(struct mmu_gather *tlb, 140static inline void tlb_remove_page_size(struct mmu_gather *tlb,
152 struct page *page, int page_size) 141 struct page *page, int page_size)
153{ 142{
154 if (__tlb_remove_page_size(tlb, page, page_size)) { 143 if (__tlb_remove_page_size(tlb, page, page_size))
155 tlb_flush_mmu(tlb); 144 tlb_flush_mmu(tlb);
156 tlb->page_size = page_size;
157 __tlb_adjust_range(tlb, tlb->addr, page_size);
158 __tlb_remove_page_size(tlb, page, page_size);
159 }
160} 145}
161 146
162static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 147static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
163{ 148{
164 return __tlb_remove_page_size(tlb, page, PAGE_SIZE); 149 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
165} 150}
@@ -173,15 +158,6 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
173 return tlb_remove_page_size(tlb, page, PAGE_SIZE); 158 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
174} 159}
175 160
176static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
177{
178 /* active->nr should be zero when we call this */
179 VM_BUG_ON_PAGE(tlb->active->nr, page);
180 tlb->page_size = PAGE_SIZE;
181 __tlb_adjust_range(tlb, tlb->addr, PAGE_SIZE);
182 return __tlb_remove_page(tlb, page);
183}
184
185#ifndef tlb_remove_check_page_size_change 161#ifndef tlb_remove_check_page_size_change
186#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change 162#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
187static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, 163static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
diff --git a/mm/memory.c b/mm/memory.c
index eae20eb66bfc..0a72f821ccdc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -300,15 +300,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
300 struct mmu_gather_batch *batch; 300 struct mmu_gather_batch *batch;
301 301
302 VM_BUG_ON(!tlb->end); 302 VM_BUG_ON(!tlb->end);
303 303 VM_WARN_ON(tlb->page_size != page_size);
304 if (!tlb->page_size)
305 tlb->page_size = page_size;
306 else {
307 if (page_size != tlb->page_size)
308 return true;
309 }
310 304
311 batch = tlb->active; 305 batch = tlb->active;
306 /*
307 * Add the page and check if we are full. If so
308 * force a flush.
309 */
310 batch->pages[batch->nr++] = page;
312 if (batch->nr == batch->max) { 311 if (batch->nr == batch->max) {
313 if (!tlb_next_batch(tlb)) 312 if (!tlb_next_batch(tlb))
314 return true; 313 return true;
@@ -316,7 +315,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
316 } 315 }
317 VM_BUG_ON_PAGE(batch->nr > batch->max, page); 316 VM_BUG_ON_PAGE(batch->nr > batch->max, page);
318 317
319 batch->pages[batch->nr++] = page;
320 return false; 318 return false;
321} 319}
322 320
@@ -1122,7 +1120,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1122 pte_t *start_pte; 1120 pte_t *start_pte;
1123 pte_t *pte; 1121 pte_t *pte;
1124 swp_entry_t entry; 1122 swp_entry_t entry;
1125 struct page *pending_page = NULL;
1126 1123
1127 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 1124 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
1128again: 1125again:
@@ -1177,7 +1174,6 @@ again:
1177 print_bad_pte(vma, addr, ptent, page); 1174 print_bad_pte(vma, addr, ptent, page);
1178 if (unlikely(__tlb_remove_page(tlb, page))) { 1175 if (unlikely(__tlb_remove_page(tlb, page))) {
1179 force_flush = 1; 1176 force_flush = 1;
1180 pending_page = page;
1181 addr += PAGE_SIZE; 1177 addr += PAGE_SIZE;
1182 break; 1178 break;
1183 } 1179 }
@@ -1218,11 +1214,6 @@ again:
1218 if (force_flush) { 1214 if (force_flush) {
1219 force_flush = 0; 1215 force_flush = 0;
1220 tlb_flush_mmu_free(tlb); 1216 tlb_flush_mmu_free(tlb);
1221 if (pending_page) {
1222 /* remove the page with new size */
1223 __tlb_remove_pte_page(tlb, pending_page);
1224 pending_page = NULL;
1225 }
1226 if (addr != end) 1217 if (addr != end)
1227 goto again; 1218 goto again;
1228 } 1219 }