aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmu_notifier.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmu_notifier.h')
-rw-r--r--include/linux/mmu_notifier.h88
1 files changed, 83 insertions, 5 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index ab8564b03468..95243d28a0ee 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -98,11 +98,11 @@ struct mmu_notifier_ops {
98 /* 98 /*
99 * invalidate_range_start() and invalidate_range_end() must be 99 * invalidate_range_start() and invalidate_range_end() must be
100 * paired and are called only when the mmap_sem and/or the 100 * paired and are called only when the mmap_sem and/or the
101 * locks protecting the reverse maps are held. The subsystem 101 * locks protecting the reverse maps are held. If the subsystem
102 * must guarantee that no additional references are taken to 102 * can't guarantee that no additional references are taken to
103 * the pages in the range established between the call to 103 * the pages in the range, it has to implement the
104 * invalidate_range_start() and the matching call to 104 * invalidate_range() notifier to remove any references taken
105 * invalidate_range_end(). 105 * after invalidate_range_start().
106 * 106 *
107 * Invalidation of multiple concurrent ranges may be 107 * Invalidation of multiple concurrent ranges may be
108 * optionally permitted by the driver. Either way the 108 * optionally permitted by the driver. Either way the
@@ -144,6 +144,29 @@ struct mmu_notifier_ops {
144 void (*invalidate_range_end)(struct mmu_notifier *mn, 144 void (*invalidate_range_end)(struct mmu_notifier *mn,
145 struct mm_struct *mm, 145 struct mm_struct *mm,
146 unsigned long start, unsigned long end); 146 unsigned long start, unsigned long end);
147
148 /*
149 * invalidate_range() is either called between
150 * invalidate_range_start() and invalidate_range_end() when the
151 * VM has to free pages that where unmapped, but before the
152 * pages are actually freed, or outside of _start()/_end() when
153 * a (remote) TLB is necessary.
154 *
155 * If invalidate_range() is used to manage a non-CPU TLB with
156 * shared page-tables, it not necessary to implement the
157 * invalidate_range_start()/end() notifiers, as
158 * invalidate_range() alread catches the points in time when an
159 * external TLB range needs to be flushed.
160 *
161 * The invalidate_range() function is called under the ptl
162 * spin-lock and not allowed to sleep.
163 *
164 * Note that this function might be called with just a sub-range
165 * of what was passed to invalidate_range_start()/end(), if
166 * called between those functions.
167 */
168 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
169 unsigned long start, unsigned long end);
147}; 170};
148 171
149/* 172/*
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
190 unsigned long start, unsigned long end); 213 unsigned long start, unsigned long end);
191extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 214extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
192 unsigned long start, unsigned long end); 215 unsigned long start, unsigned long end);
216extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
217 unsigned long start, unsigned long end);
193 218
194static inline void mmu_notifier_release(struct mm_struct *mm) 219static inline void mmu_notifier_release(struct mm_struct *mm)
195{ 220{
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
242 __mmu_notifier_invalidate_range_end(mm, start, end); 267 __mmu_notifier_invalidate_range_end(mm, start, end);
243} 268}
244 269
270static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
271 unsigned long start, unsigned long end)
272{
273 if (mm_has_notifiers(mm))
274 __mmu_notifier_invalidate_range(mm, start, end);
275}
276
245static inline void mmu_notifier_mm_init(struct mm_struct *mm) 277static inline void mmu_notifier_mm_init(struct mm_struct *mm)
246{ 278{
247 mm->mmu_notifier_mm = NULL; 279 mm->mmu_notifier_mm = NULL;
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
279 __young; \ 311 __young; \
280}) 312})
281 313
314#define ptep_clear_flush_notify(__vma, __address, __ptep) \
315({ \
316 unsigned long ___addr = __address & PAGE_MASK; \
317 struct mm_struct *___mm = (__vma)->vm_mm; \
318 pte_t ___pte; \
319 \
320 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
321 mmu_notifier_invalidate_range(___mm, ___addr, \
322 ___addr + PAGE_SIZE); \
323 \
324 ___pte; \
325})
326
327#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
328({ \
329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
330 struct mm_struct *___mm = (__vma)->vm_mm; \
331 pmd_t ___pmd; \
332 \
333 ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
334 mmu_notifier_invalidate_range(___mm, ___haddr, \
335 ___haddr + HPAGE_PMD_SIZE); \
336 \
337 ___pmd; \
338})
339
340#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
341({ \
342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
343 pmd_t ___pmd; \
344 \
345 ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
346 mmu_notifier_invalidate_range(__mm, ___haddr, \
347 ___haddr + HPAGE_PMD_SIZE); \
348 \
349 ___pmd; \
350})
351
282/* 352/*
283 * set_pte_at_notify() sets the pte _after_ running the notifier. 353 * set_pte_at_notify() sets the pte _after_ running the notifier.
284 * This is safe to start by updating the secondary MMUs, because the primary MMU 354 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
342{ 412{
343} 413}
344 414
415static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
416 unsigned long start, unsigned long end)
417{
418}
419
345static inline void mmu_notifier_mm_init(struct mm_struct *mm) 420static inline void mmu_notifier_mm_init(struct mm_struct *mm)
346{ 421{
347} 422}
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
352 427
353#define ptep_clear_flush_young_notify ptep_clear_flush_young 428#define ptep_clear_flush_young_notify ptep_clear_flush_young
354#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
430#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_clear_flush_notify pmdp_clear_flush
432#define pmdp_get_and_clear_notify pmdp_get_and_clear
355#define set_pte_at_notify set_pte_at 433#define set_pte_at_notify set_pte_at
356 434
357#endif /* CONFIG_MMU_NOTIFIER */ 435#endif /* CONFIG_MMU_NOTIFIER */