summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2019-08-26 16:14:21 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-09-07 03:27:42 -0400
commit23b68395c7c78a764e8963fc15a7cfd318bf187f (patch)
tree2a0ef471f1dc89ea0dacc7108b0feb43ea18111a
parentf0ade90a8aa1ea523eb366d1d1e8bd3463d9cf8a (diff)
mm/mmu_notifiers: add a lockdep map for invalidate_range_start/end
This is a similar idea to the fs_reclaim fake lockdep lock. It's fairly easy to provoke a specific notifier to be run on a specific range: Just prep it, and then munmap() it. A bit harder, but still doable, is to provoke the mmu notifiers for all the various callchains that might lead to them. But both at the same time is really hard to reliably hit, especially when you want to exercise paths like direct reclaim or compaction, where it's not easy to control what exactly will be unmapped. By introducing a lockdep map to tie them all together we allow lockdep to see a lot more dependencies, without having to actually hit them in a single challchain while testing. On Jason's suggestion this is is rolled out for both invalidate_range_start and invalidate_range_end. They both have the same calling context, hence we can share the same lockdep map. Note that the annotation for invalidate_ranage_start is outside of the mm_has_notifiers(), to make sure lockdep is informed about all paths leading to this context irrespective of whether mmu notifiers are present for a given context. We don't do that on the invalidate_range_end side to avoid paying the overhead twice, there the lockdep annotation is pushed down behind the mm_has_notifiers() check. Link: https://lore.kernel.org/r/20190826201425.17547-2-daniel.vetter@ffwll.ch Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--include/linux/mmu_notifier.h14
-rw-r--r--mm/mmu_notifier.c8
2 files changed, 20 insertions, 2 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 52929e5ef708..4dfe996dafd2 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -42,6 +42,10 @@ enum mmu_notifier_event {
42 42
43#ifdef CONFIG_MMU_NOTIFIER 43#ifdef CONFIG_MMU_NOTIFIER
44 44
45#ifdef CONFIG_LOCKDEP
46extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
47#endif
48
45/* 49/*
46 * The mmu notifier_mm structure is allocated and installed in 50 * The mmu notifier_mm structure is allocated and installed in
47 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected 51 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
@@ -339,20 +343,26 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
339static inline void 343static inline void
340mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) 344mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
341{ 345{
346 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
342 if (mm_has_notifiers(range->mm)) { 347 if (mm_has_notifiers(range->mm)) {
343 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; 348 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
344 __mmu_notifier_invalidate_range_start(range); 349 __mmu_notifier_invalidate_range_start(range);
345 } 350 }
351 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
346} 352}
347 353
348static inline int 354static inline int
349mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) 355mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
350{ 356{
357 int ret = 0;
358
359 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
351 if (mm_has_notifiers(range->mm)) { 360 if (mm_has_notifiers(range->mm)) {
352 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; 361 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
353 return __mmu_notifier_invalidate_range_start(range); 362 ret = __mmu_notifier_invalidate_range_start(range);
354 } 363 }
355 return 0; 364 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
365 return ret;
356} 366}
357 367
358static inline void 368static inline void
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 9e2125ae10a5..05d98167da7b 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -21,6 +21,12 @@
21/* global SRCU for all MMs */ 21/* global SRCU for all MMs */
22DEFINE_STATIC_SRCU(srcu); 22DEFINE_STATIC_SRCU(srcu);
23 23
24#ifdef CONFIG_LOCKDEP
25struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
26 .name = "mmu_notifier_invalidate_range_start"
27};
28#endif
29
24/* 30/*
25 * This function can't run concurrently against mmu_notifier_register 31 * This function can't run concurrently against mmu_notifier_register
26 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 32 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
@@ -184,6 +190,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
184 struct mmu_notifier *mn; 190 struct mmu_notifier *mn;
185 int id; 191 int id;
186 192
193 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
187 id = srcu_read_lock(&srcu); 194 id = srcu_read_lock(&srcu);
188 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { 195 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
189 /* 196 /*
@@ -207,6 +214,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
207 mn->ops->invalidate_range_end(mn, range); 214 mn->ops->invalidate_range_end(mn, range);
208 } 215 }
209 srcu_read_unlock(&srcu, id); 216 srcu_read_unlock(&srcu, id);
217 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
210} 218}
211 219
212void __mmu_notifier_invalidate_range(struct mm_struct *mm, 220void __mmu_notifier_invalidate_range(struct mm_struct *mm,