diff options
| author | Joerg Roedel <jroedel@suse.de> | 2014-11-12 21:46:09 -0500 |
|---|---|---|
| committer | Oded Gabbay <oded.gabbay@amd.com> | 2014-11-12 21:46:09 -0500 |
| commit | 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e (patch) | |
| tree | 076ca47f46695a8f8a831e56044880afd87eb900 /include/linux | |
| parent | 34ee645e83b60ae3d5955f70ab9ab9a159136673 (diff) | |
mmu_notifier: add the callback for mmu_notifier_invalidate_range()
Now that the mmu_notifier_invalidate_range() calls are in place, add the
callback to allow subsystems to register against it.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Jay Cornwall <Jay.Cornwall@amd.com>
Cc: Oded Gabbay <Oded.Gabbay@amd.com>
Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/mmu_notifier.h | 37 |
1 files changed, 32 insertions, 5 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 966da2b4b803..94d19f64cecf 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
| @@ -98,11 +98,11 @@ struct mmu_notifier_ops { | |||
| 98 | /* | 98 | /* |
| 99 | * invalidate_range_start() and invalidate_range_end() must be | 99 | * invalidate_range_start() and invalidate_range_end() must be |
| 100 | * paired and are called only when the mmap_sem and/or the | 100 | * paired and are called only when the mmap_sem and/or the |
| 101 | * locks protecting the reverse maps are held. The subsystem | 101 | * locks protecting the reverse maps are held. If the subsystem |
| 102 | * must guarantee that no additional references are taken to | 102 | * can't guarantee that no additional references are taken to |
| 103 | * the pages in the range established between the call to | 103 | * the pages in the range, it has to implement the |
| 104 | * invalidate_range_start() and the matching call to | 104 | * invalidate_range() notifier to remove any references taken |
| 105 | * invalidate_range_end(). | 105 | * after invalidate_range_start(). |
| 106 | * | 106 | * |
| 107 | * Invalidation of multiple concurrent ranges may be | 107 | * Invalidation of multiple concurrent ranges may be |
| 108 | * optionally permitted by the driver. Either way the | 108 | * optionally permitted by the driver. Either way the |
| @@ -144,6 +144,29 @@ struct mmu_notifier_ops { | |||
| 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, | 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, |
| 145 | struct mm_struct *mm, | 145 | struct mm_struct *mm, |
| 146 | unsigned long start, unsigned long end); | 146 | unsigned long start, unsigned long end); |
| 147 | |||
| 148 | /* | ||
| 149 | * invalidate_range() is either called between | ||
| 150 | * invalidate_range_start() and invalidate_range_end() when the | ||
| 151 | * VM has to free pages that where unmapped, but before the | ||
| 152 | * pages are actually freed, or outside of _start()/_end() when | ||
| 153 | * a (remote) TLB is necessary. | ||
| 154 | * | ||
| 155 | * If invalidate_range() is used to manage a non-CPU TLB with | ||
| 156 | * shared page-tables, it not necessary to implement the | ||
| 157 | * invalidate_range_start()/end() notifiers, as | ||
| 158 | * invalidate_range() alread catches the points in time when an | ||
| 159 | * external TLB range needs to be flushed. | ||
| 160 | * | ||
| 161 | * The invalidate_range() function is called under the ptl | ||
| 162 | * spin-lock and not allowed to sleep. | ||
| 163 | * | ||
| 164 | * Note that this function might be called with just a sub-range | ||
| 165 | * of what was passed to invalidate_range_start()/end(), if | ||
| 166 | * called between those functions. | ||
| 167 | */ | ||
| 168 | void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, | ||
| 169 | unsigned long start, unsigned long end); | ||
| 147 | }; | 170 | }; |
| 148 | 171 | ||
| 149 | /* | 172 | /* |
| @@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |||
| 190 | unsigned long start, unsigned long end); | 213 | unsigned long start, unsigned long end); |
| 191 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | 214 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
| 192 | unsigned long start, unsigned long end); | 215 | unsigned long start, unsigned long end); |
| 216 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
| 217 | unsigned long start, unsigned long end); | ||
| 193 | 218 | ||
| 194 | static inline void mmu_notifier_release(struct mm_struct *mm) | 219 | static inline void mmu_notifier_release(struct mm_struct *mm) |
| 195 | { | 220 | { |
| @@ -245,6 +270,8 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
| 245 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | 270 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, |
| 246 | unsigned long start, unsigned long end) | 271 | unsigned long start, unsigned long end) |
| 247 | { | 272 | { |
| 273 | if (mm_has_notifiers(mm)) | ||
| 274 | __mmu_notifier_invalidate_range(mm, start, end); | ||
| 248 | } | 275 | } |
| 249 | 276 | ||
| 250 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) | 277 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
