aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-01-12 14:10:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-01-12 21:20:40 -0500
commit721c21c17ab958abf19a8fc611c3bd4743680e38 (patch)
tree592da7d79c7d01a4a4a1ad0353430dee1f147115 /mm
parenteaa27f34e91a14cdceed26ed6c6793ec1d186115 (diff)
mm: mmu_gather: use tlb->end != 0 only for TLB invalidation
When batching up address ranges for TLB invalidation, we check tlb->end != 0 to indicate that some pages have actually been unmapped. As of commit f045bbb9fa1b ("mmu_gather: fix over-eager tlb_flush_mmu_free() calling"), we use the same check for freeing these pages in order to avoid a performance regression where we call free_pages_and_swap_cache even when no pages are actually queued up. Unfortunately, the range could have been reset (tlb->end = 0) by tlb_end_vma, which has been shown to cause memory leaks on arm64. Furthermore, investigation into these leaks revealed that the fullmm case on task exit no longer invalidates the TLB, by virtue of tlb->end == 0 (in 3.18, need_flush would have been set). This patch resolves the problem by reverting commit f045bbb9fa1b, using instead tlb->local.nr as the predicate for page freeing in tlb_flush_mmu_free and ensuring that tlb->end is initialised to a non-zero value in the fullmm case. Tested-by: Mark Langsdorf <mlangsdo@redhat.com> Tested-by: Dave Hansen <dave@sr71.net> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c6565f00fb38..54f3a9b00956 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
235 235
236static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 236static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
237{ 237{
238 if (!tlb->end)
239 return;
240
238 tlb_flush(tlb); 241 tlb_flush(tlb);
239 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
240#ifdef CONFIG_HAVE_RCU_TABLE_FREE 243#ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
247{ 250{
248 struct mmu_gather_batch *batch; 251 struct mmu_gather_batch *batch;
249 252
250 for (batch = &tlb->local; batch; batch = batch->next) { 253 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
251 free_pages_and_swap_cache(batch->pages, batch->nr); 254 free_pages_and_swap_cache(batch->pages, batch->nr);
252 batch->nr = 0; 255 batch->nr = 0;
253 } 256 }
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
256 259
257void tlb_flush_mmu(struct mmu_gather *tlb) 260void tlb_flush_mmu(struct mmu_gather *tlb)
258{ 261{
259 if (!tlb->end)
260 return;
261
262 tlb_flush_mmu_tlbonly(tlb); 262 tlb_flush_mmu_tlbonly(tlb);
263 tlb_flush_mmu_free(tlb); 263 tlb_flush_mmu_free(tlb);
264} 264}