aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-08-15 14:42:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-08-16 11:52:46 -0400
commit2b047252d087be7f2ba088b4933cd904f92e6fce (patch)
treeb240af27ca0530f7b26f1314968f01140a72a5f8 /mm/memory.c
parentf1d6e17f540af37bb1891480143669ba7636c4cf (diff)
Fix TLB gather virtual address range invalidation corner cases
Ben Tebulin reported: "Since v3.7.2 on two independent machines a very specific Git repository fails in 9/10 cases on git-fsck due to an SHA1/memory failures. This only occurs on a very specific repository and can be reproduced stably on two independent laptops. Git mailing list ran out of ideas and for me this looks like some very exotic kernel issue" and bisected the failure to the backport of commit 53a59fc67f97 ("mm: limit mmu_gather batching to fix soft lockups on !CONFIG_PREEMPT"). That commit itself is not actually buggy, but what it does is to make it much more likely to hit the partial TLB invalidation case, since it introduces a new case in tlb_next_batch() that previously only ever happened when running out of memory. The real bug is that the TLB gather virtual memory range setup is subtly buggered. It was introduced in commit 597e1c3580b7 ("mm/mmu_gather: enable tlb flush range in generic mmu_gather"), and the range handling was already fixed at least once in commit e6c495a96ce0 ("mm: fix the TLB range flushed when __tlb_remove_page() runs out of slots"), but that fix was not complete. The problem with the TLB gather virtual address range is that it isn't set up by the initial tlb_gather_mmu() initialization (which didn't get the TLB range information), but it is set up ad-hoc later by the functions that actually flush the TLB. And so any such case that forgot to update the TLB range entries would potentially miss TLB invalidates. Rather than try to figure out exactly which particular ad-hoc range setup was missing (I personally suspect it's the hugetlb case in zap_huge_pmd(), which didn't have the same logic as zap_pte_range() did), this patch just gets rid of the problem at the source: make the TLB range information available to tlb_gather_mmu(), and initialize it when initializing all the other tlb gather fields. This makes the patch larger, but conceptually much simpler. And the end result is much more understandable; even if you want to play games with partial ranges when invalidating the TLB contents in chunks, now the range information is always there, and anybody who doesn't want to bother with it won't introduce subtle bugs. Ben verified that this fixes his problem. Reported-bisected-and-tested-by: Ben Tebulin <tebulin@googlemail.com> Build-testing-by: Stephen Rothwell <sfr@canb.auug.org.au> Build-testing-by: Richard Weinberger <richard.weinberger@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 40268410732a..af84bc0ec17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
209 * tear-down from @mm. The @fullmm argument is used when @mm is without 209 * tear-down from @mm. The @fullmm argument is used when @mm is without
210 * users and we're going to destroy the full address space (exit/execve). 210 * users and we're going to destroy the full address space (exit/execve).
211 */ 211 */
212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
213{ 213{
214 tlb->mm = mm; 214 tlb->mm = mm;
215 215
216 tlb->fullmm = fullmm; 216 /* Is it from 0 to ~0? */
217 tlb->fullmm = !(start | (end+1));
217 tlb->need_flush_all = 0; 218 tlb->need_flush_all = 0;
218 tlb->start = -1UL; 219 tlb->start = start;
219 tlb->end = 0; 220 tlb->end = end;
220 tlb->need_flush = 0; 221 tlb->need_flush = 0;
221 tlb->local.next = NULL; 222 tlb->local.next = NULL;
222 tlb->local.nr = 0; 223 tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
256{ 257{
257 struct mmu_gather_batch *batch, *next; 258 struct mmu_gather_batch *batch, *next;
258 259
259 tlb->start = start;
260 tlb->end = end;
261 tlb_flush_mmu(tlb); 260 tlb_flush_mmu(tlb);
262 261
263 /* keep the page table cache within bounds */ 262 /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1099 spinlock_t *ptl; 1098 spinlock_t *ptl;
1100 pte_t *start_pte; 1099 pte_t *start_pte;
1101 pte_t *pte; 1100 pte_t *pte;
1102 unsigned long range_start = addr;
1103 1101
1104again: 1102again:
1105 init_rss_vec(rss); 1103 init_rss_vec(rss);
@@ -1205,17 +1203,25 @@ again:
1205 * and page-free while holding it. 1203 * and page-free while holding it.
1206 */ 1204 */
1207 if (force_flush) { 1205 if (force_flush) {
1206 unsigned long old_end;
1207
1208 force_flush = 0; 1208 force_flush = 0;
1209 1209
1210#ifdef HAVE_GENERIC_MMU_GATHER 1210 /*
1211 tlb->start = range_start; 1211 * Flush the TLB just for the previous segment,
1212 * then update the range to be the remaining
1213 * TLB range.
1214 */
1215 old_end = tlb->end;
1212 tlb->end = addr; 1216 tlb->end = addr;
1213#endif 1217
1214 tlb_flush_mmu(tlb); 1218 tlb_flush_mmu(tlb);
1215 if (addr != end) { 1219
1216 range_start = addr; 1220 tlb->start = addr;
1221 tlb->end = old_end;
1222
1223 if (addr != end)
1217 goto again; 1224 goto again;
1218 }
1219 } 1225 }
1220 1226
1221 return addr; 1227 return addr;
@@ -1400,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1400 unsigned long end = start + size; 1406 unsigned long end = start + size;
1401 1407
1402 lru_add_drain(); 1408 lru_add_drain();
1403 tlb_gather_mmu(&tlb, mm, 0); 1409 tlb_gather_mmu(&tlb, mm, start, end);
1404 update_hiwater_rss(mm); 1410 update_hiwater_rss(mm);
1405 mmu_notifier_invalidate_range_start(mm, start, end); 1411 mmu_notifier_invalidate_range_start(mm, start, end);
1406 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1412 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1426,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1426 unsigned long end = address + size; 1432 unsigned long end = address + size;
1427 1433
1428 lru_add_drain(); 1434 lru_add_drain();
1429 tlb_gather_mmu(&tlb, mm, 0); 1435 tlb_gather_mmu(&tlb, mm, address, end);
1430 update_hiwater_rss(mm); 1436 update_hiwater_rss(mm);
1431 mmu_notifier_invalidate_range_start(mm, address, end); 1437 mmu_notifier_invalidate_range_start(mm, address, end);
1432 unmap_single_vma(&tlb, vma, address, end, details); 1438 unmap_single_vma(&tlb, vma, address, end, details);