summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2018-12-28 03:38:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:50 -0500
commitac46d4f3c43241ffa23d5bf36153a0830c0e02cc (patch)
tree8fb9505fd38170afe5c9ba8ff0ed54bbe6caaf4d /fs
parent5d6527a784f7a6d247961e046e830de8d71b47d1 (diff)
mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end cakks. No functional changes with this patch. [akpm@linux-foundation.org: coding style fixes] Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Felix Kuehling <felix.kuehling@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> From: Jérôme Glisse <jglisse@redhat.com> Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3 fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/dax.c8
-rw-r--r--fs/proc/task_mmu.c7
2 files changed, 10 insertions, 5 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 48132eca3761..262e14f29933 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -779,7 +779,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
779 779
780 i_mmap_lock_read(mapping); 780 i_mmap_lock_read(mapping);
781 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 781 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
782 unsigned long address, start, end; 782 struct mmu_notifier_range range;
783 unsigned long address;
783 784
784 cond_resched(); 785 cond_resched();
785 786
@@ -793,7 +794,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
793 * call mmu_notifier_invalidate_range_start() on our behalf 794 * call mmu_notifier_invalidate_range_start() on our behalf
794 * before taking any lock. 795 * before taking any lock.
795 */ 796 */
796 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 797 if (follow_pte_pmd(vma->vm_mm, address, &range,
798 &ptep, &pmdp, &ptl))
797 continue; 799 continue;
798 800
799 /* 801 /*
@@ -835,7 +837,7 @@ unlock_pte:
835 pte_unmap_unlock(ptep, ptl); 837 pte_unmap_unlock(ptep, ptl);
836 } 838 }
837 839
838 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 840 mmu_notifier_invalidate_range_end(&range);
839 } 841 }
840 i_mmap_unlock_read(mapping); 842 i_mmap_unlock_read(mapping);
841} 843}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 47c3764c469b..b3ddceb003bc 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1096,6 +1096,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1096 return -ESRCH; 1096 return -ESRCH;
1097 mm = get_task_mm(task); 1097 mm = get_task_mm(task);
1098 if (mm) { 1098 if (mm) {
1099 struct mmu_notifier_range range;
1099 struct clear_refs_private cp = { 1100 struct clear_refs_private cp = {
1100 .type = type, 1101 .type = type,
1101 }; 1102 };
@@ -1139,11 +1140,13 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1139 downgrade_write(&mm->mmap_sem); 1140 downgrade_write(&mm->mmap_sem);
1140 break; 1141 break;
1141 } 1142 }
1142 mmu_notifier_invalidate_range_start(mm, 0, -1); 1143
1144 mmu_notifier_range_init(&range, mm, 0, -1UL);
1145 mmu_notifier_invalidate_range_start(&range);
1143 } 1146 }
1144 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); 1147 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
1145 if (type == CLEAR_REFS_SOFT_DIRTY) 1148 if (type == CLEAR_REFS_SOFT_DIRTY)
1146 mmu_notifier_invalidate_range_end(mm, 0, -1); 1149 mmu_notifier_invalidate_range_end(&range);
1147 tlb_finish_mmu(&tlb, 0, -1); 1150 tlb_finish_mmu(&tlb, 0, -1);
1148 up_read(&mm->mmap_sem); 1151 up_read(&mm->mmap_sem);
1149out_mm: 1152out_mm: