diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2017-08-31 17:17:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-31 19:12:59 -0400 |
commit | a4d1a885251382250ec315482bdd8ca52dd61e6a (patch) | |
tree | d2222816dada2668a08d2263f410fe8892b0bec2 /fs/dax.c | |
parent | 42ff72cf27027fa28dd79acabe01d9196f1480a7 (diff) |
dax: update to new mmu_notifier semantic
Replace all mmu_notifier_invalidate_page() calls by *_invalidate_range()
and make sure it is bracketed by calls to *_invalidate_range_start()/end().
Note that because we can not presume the pmd value or pte value we have
to assume the worst and unconditionaly report an invalidation as
happening.
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Bernhard Held <berny156@gmx.de>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: axie <axie@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 19 |
1 files changed, 11 insertions, 8 deletions
@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
646 | pte_t pte, *ptep = NULL; | 646 | pte_t pte, *ptep = NULL; |
647 | pmd_t *pmdp = NULL; | 647 | pmd_t *pmdp = NULL; |
648 | spinlock_t *ptl; | 648 | spinlock_t *ptl; |
649 | bool changed; | ||
650 | 649 | ||
651 | i_mmap_lock_read(mapping); | 650 | i_mmap_lock_read(mapping); |
652 | vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { | 651 | vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { |
653 | unsigned long address; | 652 | unsigned long address, start, end; |
654 | 653 | ||
655 | cond_resched(); | 654 | cond_resched(); |
656 | 655 | ||
@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
658 | continue; | 657 | continue; |
659 | 658 | ||
660 | address = pgoff_address(index, vma); | 659 | address = pgoff_address(index, vma); |
661 | changed = false; | 660 | |
662 | if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) | 661 | /* |
662 | * Note because we provide start/end to follow_pte_pmd it will | ||
663 | * call mmu_notifier_invalidate_range_start() on our behalf | ||
664 | * before taking any lock. | ||
665 | */ | ||
666 | if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) | ||
663 | continue; | 667 | continue; |
664 | 668 | ||
665 | if (pmdp) { | 669 | if (pmdp) { |
@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
676 | pmd = pmd_wrprotect(pmd); | 680 | pmd = pmd_wrprotect(pmd); |
677 | pmd = pmd_mkclean(pmd); | 681 | pmd = pmd_mkclean(pmd); |
678 | set_pmd_at(vma->vm_mm, address, pmdp, pmd); | 682 | set_pmd_at(vma->vm_mm, address, pmdp, pmd); |
679 | changed = true; | 683 | mmu_notifier_invalidate_range(vma->vm_mm, start, end); |
680 | unlock_pmd: | 684 | unlock_pmd: |
681 | spin_unlock(ptl); | 685 | spin_unlock(ptl); |
682 | #endif | 686 | #endif |
@@ -691,13 +695,12 @@ unlock_pmd: | |||
691 | pte = pte_wrprotect(pte); | 695 | pte = pte_wrprotect(pte); |
692 | pte = pte_mkclean(pte); | 696 | pte = pte_mkclean(pte); |
693 | set_pte_at(vma->vm_mm, address, ptep, pte); | 697 | set_pte_at(vma->vm_mm, address, ptep, pte); |
694 | changed = true; | 698 | mmu_notifier_invalidate_range(vma->vm_mm, start, end); |
695 | unlock_pte: | 699 | unlock_pte: |
696 | pte_unmap_unlock(ptep, ptl); | 700 | pte_unmap_unlock(ptep, ptl); |
697 | } | 701 | } |
698 | 702 | ||
699 | if (changed) | 703 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); |
700 | mmu_notifier_invalidate_page(vma->vm_mm, address); | ||
701 | } | 704 | } |
702 | i_mmap_unlock_read(mapping); | 705 | i_mmap_unlock_read(mapping); |
703 | } | 706 | } |