summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-12-12 19:42:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:07 -0500
commit692a68c1544d6be4ba7c6e929e9c7b2ba0447b91 (patch)
treea8d1ee7cb797cb64a260c229b8ce67926657166a /mm/memory.c
parent07e326610e5634e5038fce32fff370949eb42101 (diff)
mm: remove the page size change check in tlb_remove_page
Now that we check for page size change early in the loop, we can partially revert e9d55e157034a ("mm: change the interface for __tlb_remove_page"). This simplies the code much, by removing the need to track the last address with which we adjusted the range. We also go back to the older way of filling the mmu_gather array, ie, we add an entry and then check whether the gather batch is full. Link: http://lkml.kernel.org/r/20161026084839.27299-6-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c
index eae20eb66bfc..0a72f821ccdc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -300,15 +300,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
300 struct mmu_gather_batch *batch; 300 struct mmu_gather_batch *batch;
301 301
302 VM_BUG_ON(!tlb->end); 302 VM_BUG_ON(!tlb->end);
303 303 VM_WARN_ON(tlb->page_size != page_size);
304 if (!tlb->page_size)
305 tlb->page_size = page_size;
306 else {
307 if (page_size != tlb->page_size)
308 return true;
309 }
310 304
311 batch = tlb->active; 305 batch = tlb->active;
306 /*
307 * Add the page and check if we are full. If so
308 * force a flush.
309 */
310 batch->pages[batch->nr++] = page;
312 if (batch->nr == batch->max) { 311 if (batch->nr == batch->max) {
313 if (!tlb_next_batch(tlb)) 312 if (!tlb_next_batch(tlb))
314 return true; 313 return true;
@@ -316,7 +315,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
316 } 315 }
317 VM_BUG_ON_PAGE(batch->nr > batch->max, page); 316 VM_BUG_ON_PAGE(batch->nr > batch->max, page);
318 317
319 batch->pages[batch->nr++] = page;
320 return false; 318 return false;
321} 319}
322 320
@@ -1122,7 +1120,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1122 pte_t *start_pte; 1120 pte_t *start_pte;
1123 pte_t *pte; 1121 pte_t *pte;
1124 swp_entry_t entry; 1122 swp_entry_t entry;
1125 struct page *pending_page = NULL;
1126 1123
1127 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 1124 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
1128again: 1125again:
@@ -1177,7 +1174,6 @@ again:
1177 print_bad_pte(vma, addr, ptent, page); 1174 print_bad_pte(vma, addr, ptent, page);
1178 if (unlikely(__tlb_remove_page(tlb, page))) { 1175 if (unlikely(__tlb_remove_page(tlb, page))) {
1179 force_flush = 1; 1176 force_flush = 1;
1180 pending_page = page;
1181 addr += PAGE_SIZE; 1177 addr += PAGE_SIZE;
1182 break; 1178 break;
1183 } 1179 }
@@ -1218,11 +1214,6 @@ again:
1218 if (force_flush) { 1214 if (force_flush) {
1219 force_flush = 0; 1215 force_flush = 0;
1220 tlb_flush_mmu_free(tlb); 1216 tlb_flush_mmu_free(tlb);
1221 if (pending_page) {
1222 /* remove the page with new size */
1223 __tlb_remove_pte_page(tlb, pending_page);
1224 pending_page = NULL;
1225 }
1226 if (addr != end) 1217 if (addr != end)
1227 goto again; 1218 goto again;
1228 } 1219 }