aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-09-04 18:47:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 19:54:41 -0400
commitd950c9477d51f0cefc2ed3cf76e695d46af0d9c1 (patch)
treed6ba89db328ec09b2140a84dcdf1bd8ca5cbeaa4 /mm/rmap.c
parent72b252aed506b8f1a03f7abd29caef4cdf6a043b (diff)
mm: defer flush of writable TLB entries
If a PTE is unmapped and it's dirty then it was writable recently. Due to deferred TLB flushing, it's best to assume a writable TLB cache entry exists. With that assumption, the TLB must be flushed before any IO can start or the page is freed to avoid lost writes or data corruption. This patch defers flushing of potentially writable TLBs as long as possible. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Dave Hansen <dave.hansen@intel.com> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 326d5d89e45c..0db38e7d0a72 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -626,16 +626,34 @@ void try_to_unmap_flush(void)
626 } 626 }
627 cpumask_clear(&tlb_ubc->cpumask); 627 cpumask_clear(&tlb_ubc->cpumask);
628 tlb_ubc->flush_required = false; 628 tlb_ubc->flush_required = false;
629 tlb_ubc->writable = false;
629 put_cpu(); 630 put_cpu();
630} 631}
631 632
633/* Flush iff there are potentially writable TLB entries that can race with IO */
634void try_to_unmap_flush_dirty(void)
635{
636 struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
637
638 if (tlb_ubc->writable)
639 try_to_unmap_flush();
640}
641
632static void set_tlb_ubc_flush_pending(struct mm_struct *mm, 642static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
633 struct page *page) 643 struct page *page, bool writable)
634{ 644{
635 struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; 645 struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
636 646
637 cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); 647 cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
638 tlb_ubc->flush_required = true; 648 tlb_ubc->flush_required = true;
649
650 /*
651 * If the PTE was dirty then it's best to assume it's writable. The
652 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
653 * before the page is queued for IO.
654 */
655 if (writable)
656 tlb_ubc->writable = true;
639} 657}
640 658
641/* 659/*
@@ -658,7 +676,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
658} 676}
659#else 677#else
660static void set_tlb_ubc_flush_pending(struct mm_struct *mm, 678static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
661 struct page *page) 679 struct page *page, bool writable)
662{ 680{
663} 681}
664 682
@@ -1315,11 +1333,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1315 */ 1333 */
1316 pteval = ptep_get_and_clear(mm, address, pte); 1334 pteval = ptep_get_and_clear(mm, address, pte);
1317 1335
1318 /* Potentially writable TLBs must be flushed before IO */ 1336 set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
1319 if (pte_dirty(pteval))
1320 flush_tlb_page(vma, address);
1321 else
1322 set_tlb_ubc_flush_pending(mm, page);
1323 } else { 1337 } else {
1324 pteval = ptep_clear_flush(vma, address, pte); 1338 pteval = ptep_clear_flush(vma, address, pte);
1325 } 1339 }