diff options
Diffstat (limited to 'include/linux/mmu_notifier.h')
| -rw-r--r-- | include/linux/mmu_notifier.h | 60 |
1 files changed, 12 insertions, 48 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1d1b1e13f79f..bc823c4c028b 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
| 5 | #include <linux/spinlock.h> | 5 | #include <linux/spinlock.h> |
| 6 | #include <linux/mm_types.h> | 6 | #include <linux/mm_types.h> |
| 7 | #include <linux/srcu.h> | ||
| 7 | 8 | ||
| 8 | struct mmu_notifier; | 9 | struct mmu_notifier; |
| 9 | struct mmu_notifier_ops; | 10 | struct mmu_notifier_ops; |
| @@ -245,50 +246,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 245 | __mmu_notifier_mm_destroy(mm); | 246 | __mmu_notifier_mm_destroy(mm); |
| 246 | } | 247 | } |
| 247 | 248 | ||
| 248 | /* | ||
| 249 | * These two macros will sometime replace ptep_clear_flush. | ||
| 250 | * ptep_clear_flush is implemented as macro itself, so this also is | ||
| 251 | * implemented as a macro until ptep_clear_flush will converted to an | ||
| 252 | * inline function, to diminish the risk of compilation failure. The | ||
| 253 | * invalidate_page method over time can be moved outside the PT lock | ||
| 254 | * and these two macros can be later removed. | ||
| 255 | */ | ||
| 256 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ | ||
| 257 | ({ \ | ||
| 258 | pte_t __pte; \ | ||
| 259 | struct vm_area_struct *___vma = __vma; \ | ||
| 260 | unsigned long ___address = __address; \ | ||
| 261 | __pte = ptep_clear_flush(___vma, ___address, __ptep); \ | ||
| 262 | mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ | ||
| 263 | __pte; \ | ||
| 264 | }) | ||
| 265 | |||
| 266 | #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \ | ||
| 267 | ({ \ | ||
| 268 | pmd_t __pmd; \ | ||
| 269 | struct vm_area_struct *___vma = __vma; \ | ||
| 270 | unsigned long ___address = __address; \ | ||
| 271 | VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \ | ||
| 272 | mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \ | ||
| 273 | (__address)+HPAGE_PMD_SIZE);\ | ||
| 274 | __pmd = pmdp_clear_flush(___vma, ___address, __pmdp); \ | ||
| 275 | mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \ | ||
| 276 | (__address)+HPAGE_PMD_SIZE); \ | ||
| 277 | __pmd; \ | ||
| 278 | }) | ||
| 279 | |||
| 280 | #define pmdp_splitting_flush_notify(__vma, __address, __pmdp) \ | ||
| 281 | ({ \ | ||
| 282 | struct vm_area_struct *___vma = __vma; \ | ||
| 283 | unsigned long ___address = __address; \ | ||
| 284 | VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \ | ||
| 285 | mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \ | ||
| 286 | (__address)+HPAGE_PMD_SIZE);\ | ||
| 287 | pmdp_splitting_flush(___vma, ___address, __pmdp); \ | ||
| 288 | mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \ | ||
| 289 | (__address)+HPAGE_PMD_SIZE); \ | ||
| 290 | }) | ||
| 291 | |||
| 292 | #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ | 249 | #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ |
| 293 | ({ \ | 250 | ({ \ |
| 294 | int __young; \ | 251 | int __young; \ |
| @@ -311,14 +268,24 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 311 | __young; \ | 268 | __young; \ |
| 312 | }) | 269 | }) |
| 313 | 270 | ||
| 271 | /* | ||
| 272 | * set_pte_at_notify() sets the pte _after_ running the notifier. | ||
| 273 | * This is safe to start by updating the secondary MMUs, because the primary MMU | ||
| 274 | * pte invalidate must have already happened with a ptep_clear_flush() before | ||
| 275 | * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is | ||
| 276 | * required when we change both the protection of the mapping from read-only to | ||
| 277 | * read-write and the pfn (like during copy on write page faults). Otherwise the | ||
| 278 | * old page would remain mapped readonly in the secondary MMUs after the new | ||
| 279 | * page is already writable by some CPU through the primary MMU. | ||
| 280 | */ | ||
| 314 | #define set_pte_at_notify(__mm, __address, __ptep, __pte) \ | 281 | #define set_pte_at_notify(__mm, __address, __ptep, __pte) \ |
| 315 | ({ \ | 282 | ({ \ |
| 316 | struct mm_struct *___mm = __mm; \ | 283 | struct mm_struct *___mm = __mm; \ |
| 317 | unsigned long ___address = __address; \ | 284 | unsigned long ___address = __address; \ |
| 318 | pte_t ___pte = __pte; \ | 285 | pte_t ___pte = __pte; \ |
| 319 | \ | 286 | \ |
| 320 | set_pte_at(___mm, ___address, __ptep, ___pte); \ | ||
| 321 | mmu_notifier_change_pte(___mm, ___address, ___pte); \ | 287 | mmu_notifier_change_pte(___mm, ___address, ___pte); \ |
| 288 | set_pte_at(___mm, ___address, __ptep, ___pte); \ | ||
| 322 | }) | 289 | }) |
| 323 | 290 | ||
| 324 | #else /* CONFIG_MMU_NOTIFIER */ | 291 | #else /* CONFIG_MMU_NOTIFIER */ |
| @@ -369,9 +336,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 369 | 336 | ||
| 370 | #define ptep_clear_flush_young_notify ptep_clear_flush_young | 337 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
| 371 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young | 338 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
| 372 | #define ptep_clear_flush_notify ptep_clear_flush | ||
| 373 | #define pmdp_clear_flush_notify pmdp_clear_flush | ||
| 374 | #define pmdp_splitting_flush_notify pmdp_splitting_flush | ||
| 375 | #define set_pte_at_notify set_pte_at | 339 | #define set_pte_at_notify set_pte_at |
| 376 | 340 | ||
| 377 | #endif /* CONFIG_MMU_NOTIFIER */ | 341 | #endif /* CONFIG_MMU_NOTIFIER */ |
