summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-07-26 18:24:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commite77b0852b551ffd8b29fa0225e1ef62c195e3160 (patch)
treea9f49cde2ef4814f78423addd7af592c549a0403 /mm/memory.c
parente9d55e157034a9efd99405c99c1565d64619d82b (diff)
mm/mmu_gather: track page size with mmu gather and force flush if page size change
This allows an arch which needs to do special handing with respect to different page size when flushing tlb to implement the same in mmu gather. Link: http://lkml.kernel.org/r/1465049193-22197-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 12f31501c323..a329149e1c54 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -233,6 +233,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
233#ifdef CONFIG_HAVE_RCU_TABLE_FREE 233#ifdef CONFIG_HAVE_RCU_TABLE_FREE
234 tlb->batch = NULL; 234 tlb->batch = NULL;
235#endif 235#endif
236 tlb->page_size = 0;
236 237
237 __tlb_reset_range(tlb); 238 __tlb_reset_range(tlb);
238} 239}
@@ -294,12 +295,19 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
294 * When out of page slots we must call tlb_flush_mmu(). 295 * When out of page slots we must call tlb_flush_mmu().
295 *returns true if the caller should flush. 296 *returns true if the caller should flush.
296 */ 297 */
297bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 298bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
298{ 299{
299 struct mmu_gather_batch *batch; 300 struct mmu_gather_batch *batch;
300 301
301 VM_BUG_ON(!tlb->end); 302 VM_BUG_ON(!tlb->end);
302 303
304 if (!tlb->page_size)
305 tlb->page_size = page_size;
306 else {
307 if (page_size != tlb->page_size)
308 return true;
309 }
310
303 batch = tlb->active; 311 batch = tlb->active;
304 if (batch->nr == batch->max) { 312 if (batch->nr == batch->max) {
305 if (!tlb_next_batch(tlb)) 313 if (!tlb_next_batch(tlb))