aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmu_notifier.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2014-11-12 21:46:09 -0500
committerOded Gabbay <oded.gabbay@amd.com>2014-11-12 21:46:09 -0500
commit0f0a327fa12cd55de5e7f8c05a70ac3d047f405e (patch)
tree076ca47f46695a8f8a831e56044880afd87eb900 /mm/mmu_notifier.c
parent34ee645e83b60ae3d5955f70ab9ab9a159136673 (diff)
mmu_notifier: add the callback for mmu_notifier_invalidate_range()
Now that the mmu_notifier_invalidate_range() calls are in place, add the callback to allow subsystems to register against it. Signed-off-by: Joerg Roedel <jroedel@suse.de> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Jay Cornwall <Jay.Cornwall@amd.com> Cc: Oded Gabbay <Oded.Gabbay@amd.com> Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
Diffstat (limited to 'mm/mmu_notifier.c')
-rw-r--r--mm/mmu_notifier.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2c8da9825fe3..3b9b3d0741b2 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -193,6 +193,16 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
193 193
194 id = srcu_read_lock(&srcu); 194 id = srcu_read_lock(&srcu);
195 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 195 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
196 /*
197 * Call invalidate_range here too to avoid the need for the
198 * subsystem of having to register an invalidate_range_end
199 * call-back when there is invalidate_range already. Usually a
200 * subsystem registers either invalidate_range_start()/end() or
201 * invalidate_range(), so this will be no additional overhead
202 * (besides the pointer check).
203 */
204 if (mn->ops->invalidate_range)
205 mn->ops->invalidate_range(mn, mm, start, end);
196 if (mn->ops->invalidate_range_end) 206 if (mn->ops->invalidate_range_end)
197 mn->ops->invalidate_range_end(mn, mm, start, end); 207 mn->ops->invalidate_range_end(mn, mm, start, end);
198 } 208 }
@@ -200,6 +210,21 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
200} 210}
201EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); 211EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
202 212
213void __mmu_notifier_invalidate_range(struct mm_struct *mm,
214 unsigned long start, unsigned long end)
215{
216 struct mmu_notifier *mn;
217 int id;
218
219 id = srcu_read_lock(&srcu);
220 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
221 if (mn->ops->invalidate_range)
222 mn->ops->invalidate_range(mn, mm, start, end);
223 }
224 srcu_read_unlock(&srcu, id);
225}
226EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
227
203static int do_mmu_notifier_register(struct mmu_notifier *mn, 228static int do_mmu_notifier_register(struct mmu_notifier *mn,
204 struct mm_struct *mm, 229 struct mm_struct *mm,
205 int take_mmap_sem) 230 int take_mmap_sem)