aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVineet Gupta <Vineet.Gupta1@synopsys.com>2013-07-03 18:03:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:34 -0400
commite6c495a96ce02574e765d5140039a64c8d4e8c9e (patch)
tree0a1511f52ece251d786f02088b113b3f742a3b49 /mm
parentf60e2a968e2bebe34986f49251017f72b725d8c0 (diff)
mm: fix the TLB range flushed when __tlb_remove_page() runs out of slots
zap_pte_range loops from @addr to @end. In the middle, if it runs out of batching slots, TLB entries needs to be flushed for @start to @interim, NOT @interim to @end. Since ARC port doesn't use page free batching I can't test it myself but this seems like the right thing to do. Observed this when working on a fix for the issue at thread: http://www.spinics.net/lists/linux-arch/msg21736.html Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a101bbcacfd7..407533219673 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1101 spinlock_t *ptl; 1101 spinlock_t *ptl;
1102 pte_t *start_pte; 1102 pte_t *start_pte;
1103 pte_t *pte; 1103 pte_t *pte;
1104 unsigned long range_start = addr;
1104 1105
1105again: 1106again:
1106 init_rss_vec(rss); 1107 init_rss_vec(rss);
@@ -1206,12 +1207,14 @@ again:
1206 force_flush = 0; 1207 force_flush = 0;
1207 1208
1208#ifdef HAVE_GENERIC_MMU_GATHER 1209#ifdef HAVE_GENERIC_MMU_GATHER
1209 tlb->start = addr; 1210 tlb->start = range_start;
1210 tlb->end = end; 1211 tlb->end = addr;
1211#endif 1212#endif
1212 tlb_flush_mmu(tlb); 1213 tlb_flush_mmu(tlb);
1213 if (addr != end) 1214 if (addr != end) {
1215 range_start = addr;
1214 goto again; 1216 goto again;
1217 }
1215 } 1218 }
1216 1219
1217 return addr; 1220 return addr;