diff options
-rw-r--r-- | include/linux/mmu_notifier.h | 41 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 2 | ||||
-rw-r--r-- | mm/fremap.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 9 | ||||
-rw-r--r-- | mm/hugetlb.c | 7 | ||||
-rw-r--r-- | mm/ksm.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/migrate.c | 3 | ||||
-rw-r--r-- | mm/rmap.c | 2 |
9 files changed, 61 insertions, 12 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 17907908d1df..966da2b4b803 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -284,6 +284,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
284 | __young; \ | 284 | __young; \ |
285 | }) | 285 | }) |
286 | 286 | ||
287 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ | ||
288 | ({ \ | ||
289 | unsigned long ___addr = __address & PAGE_MASK; \ | ||
290 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
291 | pte_t ___pte; \ | ||
292 | \ | ||
293 | ___pte = ptep_clear_flush(__vma, __address, __ptep); \ | ||
294 | mmu_notifier_invalidate_range(___mm, ___addr, \ | ||
295 | ___addr + PAGE_SIZE); \ | ||
296 | \ | ||
297 | ___pte; \ | ||
298 | }) | ||
299 | |||
300 | #define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ | ||
301 | ({ \ | ||
302 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | ||
303 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
304 | pmd_t ___pmd; \ | ||
305 | \ | ||
306 | ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ | ||
307 | mmu_notifier_invalidate_range(___mm, ___haddr, \ | ||
308 | ___haddr + HPAGE_PMD_SIZE); \ | ||
309 | \ | ||
310 | ___pmd; \ | ||
311 | }) | ||
312 | |||
313 | #define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ | ||
314 | ({ \ | ||
315 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | ||
316 | pmd_t ___pmd; \ | ||
317 | \ | ||
318 | ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ | ||
319 | mmu_notifier_invalidate_range(__mm, ___haddr, \ | ||
320 | ___haddr + HPAGE_PMD_SIZE); \ | ||
321 | \ | ||
322 | ___pmd; \ | ||
323 | }) | ||
324 | |||
287 | /* | 325 | /* |
288 | * set_pte_at_notify() sets the pte _after_ running the notifier. | 326 | * set_pte_at_notify() sets the pte _after_ running the notifier. |
289 | * This is safe to start by updating the secondary MMUs, because the primary MMU | 327 | * This is safe to start by updating the secondary MMUs, because the primary MMU |
@@ -362,6 +400,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
362 | 400 | ||
363 | #define ptep_clear_flush_young_notify ptep_clear_flush_young | 401 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
364 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young | 402 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
403 | #define ptep_clear_flush_notify ptep_clear_flush | ||
404 | #define pmdp_clear_flush_notify pmdp_clear_flush | ||
405 | #define pmdp_get_and_clear_notify pmdp_get_and_clear | ||
365 | #define set_pte_at_notify set_pte_at | 406 | #define set_pte_at_notify set_pte_at |
366 | 407 | ||
367 | #endif /* CONFIG_MMU_NOTIFIER */ | 408 | #endif /* CONFIG_MMU_NOTIFIER */ |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1d0af8a2c646..bc143cf56cab 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | 195 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
196 | ptep_clear_flush(vma, addr, ptep); | 196 | ptep_clear_flush_notify(vma, addr, ptep); |
197 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | 197 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
198 | 198 | ||
199 | page_remove_rmap(page); | 199 | page_remove_rmap(page); |
diff --git a/mm/fremap.c b/mm/fremap.c index 72b8fa361433..9129013732d7 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
37 | 37 | ||
38 | if (pte_present(pte)) { | 38 | if (pte_present(pte)) { |
39 | flush_cache_page(vma, addr, pte_pfn(pte)); | 39 | flush_cache_page(vma, addr, pte_pfn(pte)); |
40 | pte = ptep_clear_flush(vma, addr, ptep); | 40 | pte = ptep_clear_flush_notify(vma, addr, ptep); |
41 | page = vm_normal_page(vma, addr, pte); | 41 | page = vm_normal_page(vma, addr, pte); |
42 | if (page) { | 42 | if (page) { |
43 | if (pte_dirty(pte)) | 43 | if (pte_dirty(pte)) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index de984159cf0b..1d89526ed531 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1036,7 +1036,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
1036 | goto out_free_pages; | 1036 | goto out_free_pages; |
1037 | VM_BUG_ON_PAGE(!PageHead(page), page); | 1037 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1038 | 1038 | ||
1039 | pmdp_clear_flush(vma, haddr, pmd); | 1039 | pmdp_clear_flush_notify(vma, haddr, pmd); |
1040 | /* leave pmd empty until pte is filled */ | 1040 | /* leave pmd empty until pte is filled */ |
1041 | 1041 | ||
1042 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | 1042 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
@@ -1179,7 +1179,7 @@ alloc: | |||
1179 | pmd_t entry; | 1179 | pmd_t entry; |
1180 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); | 1180 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); |
1181 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | 1181 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
1182 | pmdp_clear_flush(vma, haddr, pmd); | 1182 | pmdp_clear_flush_notify(vma, haddr, pmd); |
1183 | page_add_new_anon_rmap(new_page, vma, haddr); | 1183 | page_add_new_anon_rmap(new_page, vma, haddr); |
1184 | mem_cgroup_commit_charge(new_page, memcg, false); | 1184 | mem_cgroup_commit_charge(new_page, memcg, false); |
1185 | lru_cache_add_active_or_unevictable(new_page, vma); | 1185 | lru_cache_add_active_or_unevictable(new_page, vma); |
@@ -1512,7 +1512,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1512 | pmd_t entry; | 1512 | pmd_t entry; |
1513 | ret = 1; | 1513 | ret = 1; |
1514 | if (!prot_numa) { | 1514 | if (!prot_numa) { |
1515 | entry = pmdp_get_and_clear(mm, addr, pmd); | 1515 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1516 | if (pmd_numa(entry)) | 1516 | if (pmd_numa(entry)) |
1517 | entry = pmd_mknonnuma(entry); | 1517 | entry = pmd_mknonnuma(entry); |
1518 | entry = pmd_modify(entry, newprot); | 1518 | entry = pmd_modify(entry, newprot); |
@@ -1644,6 +1644,7 @@ static int __split_huge_page_splitting(struct page *page, | |||
1644 | * serialize against split_huge_page*. | 1644 | * serialize against split_huge_page*. |
1645 | */ | 1645 | */ |
1646 | pmdp_splitting_flush(vma, address, pmd); | 1646 | pmdp_splitting_flush(vma, address, pmd); |
1647 | |||
1647 | ret = 1; | 1648 | ret = 1; |
1648 | spin_unlock(ptl); | 1649 | spin_unlock(ptl); |
1649 | } | 1650 | } |
@@ -2834,7 +2835,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, | |||
2834 | pmd_t _pmd; | 2835 | pmd_t _pmd; |
2835 | int i; | 2836 | int i; |
2836 | 2837 | ||
2837 | pmdp_clear_flush(vma, haddr, pmd); | 2838 | pmdp_clear_flush_notify(vma, haddr, pmd); |
2838 | /* leave pmd empty until pte is filled */ | 2839 | /* leave pmd empty until pte is filled */ |
2839 | 2840 | ||
2840 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | 2841 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9fd722769927..2e6add04fa1b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2598,8 +2598,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
2598 | } | 2598 | } |
2599 | set_huge_pte_at(dst, addr, dst_pte, entry); | 2599 | set_huge_pte_at(dst, addr, dst_pte, entry); |
2600 | } else { | 2600 | } else { |
2601 | if (cow) | 2601 | if (cow) { |
2602 | huge_ptep_set_wrprotect(src, addr, src_pte); | 2602 | huge_ptep_set_wrprotect(src, addr, src_pte); |
2603 | mmu_notifier_invalidate_range(src, mmun_start, | ||
2604 | mmun_end); | ||
2605 | } | ||
2603 | entry = huge_ptep_get(src_pte); | 2606 | entry = huge_ptep_get(src_pte); |
2604 | ptepage = pte_page(entry); | 2607 | ptepage = pte_page(entry); |
2605 | get_page(ptepage); | 2608 | get_page(ptepage); |
@@ -2899,6 +2902,7 @@ retry_avoidcopy: | |||
2899 | 2902 | ||
2900 | /* Break COW */ | 2903 | /* Break COW */ |
2901 | huge_ptep_clear_flush(vma, address, ptep); | 2904 | huge_ptep_clear_flush(vma, address, ptep); |
2905 | mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); | ||
2902 | set_huge_pte_at(mm, address, ptep, | 2906 | set_huge_pte_at(mm, address, ptep, |
2903 | make_huge_pte(vma, new_page, 1)); | 2907 | make_huge_pte(vma, new_page, 1)); |
2904 | page_remove_rmap(old_page); | 2908 | page_remove_rmap(old_page); |
@@ -3374,6 +3378,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
3374 | * and that page table be reused and filled with junk. | 3378 | * and that page table be reused and filled with junk. |
3375 | */ | 3379 | */ |
3376 | flush_tlb_range(vma, start, end); | 3380 | flush_tlb_range(vma, start, end); |
3381 | mmu_notifier_invalidate_range(mm, start, end); | ||
3377 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); | 3382 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); |
3378 | mmu_notifier_invalidate_range_end(mm, start, end); | 3383 | mmu_notifier_invalidate_range_end(mm, start, end); |
3379 | 3384 | ||
@@ -892,7 +892,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
892 | * this assure us that no O_DIRECT can happen after the check | 892 | * this assure us that no O_DIRECT can happen after the check |
893 | * or in the middle of the check. | 893 | * or in the middle of the check. |
894 | */ | 894 | */ |
895 | entry = ptep_clear_flush(vma, addr, ptep); | 895 | entry = ptep_clear_flush_notify(vma, addr, ptep); |
896 | /* | 896 | /* |
897 | * Check that no O_DIRECT or similar I/O is in progress on the | 897 | * Check that no O_DIRECT or similar I/O is in progress on the |
898 | * page | 898 | * page |
@@ -960,7 +960,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
960 | page_add_anon_rmap(kpage, vma, addr); | 960 | page_add_anon_rmap(kpage, vma, addr); |
961 | 961 | ||
962 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | 962 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
963 | ptep_clear_flush(vma, addr, ptep); | 963 | ptep_clear_flush_notify(vma, addr, ptep); |
964 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | 964 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
965 | 965 | ||
966 | page_remove_rmap(page); | 966 | page_remove_rmap(page); |
diff --git a/mm/memory.c b/mm/memory.c index 3e503831e042..655fd3d34bb0 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -238,6 +238,7 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | |||
238 | { | 238 | { |
239 | tlb->need_flush = 0; | 239 | tlb->need_flush = 0; |
240 | tlb_flush(tlb); | 240 | tlb_flush(tlb); |
241 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); | ||
241 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 242 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
242 | tlb_table_flush(tlb); | 243 | tlb_table_flush(tlb); |
243 | #endif | 244 | #endif |
@@ -2234,7 +2235,7 @@ gotten: | |||
2234 | * seen in the presence of one thread doing SMC and another | 2235 | * seen in the presence of one thread doing SMC and another |
2235 | * thread doing COW. | 2236 | * thread doing COW. |
2236 | */ | 2237 | */ |
2237 | ptep_clear_flush(vma, address, page_table); | 2238 | ptep_clear_flush_notify(vma, address, page_table); |
2238 | page_add_new_anon_rmap(new_page, vma, address); | 2239 | page_add_new_anon_rmap(new_page, vma, address); |
2239 | mem_cgroup_commit_charge(new_page, memcg, false); | 2240 | mem_cgroup_commit_charge(new_page, memcg, false); |
2240 | lru_cache_add_active_or_unevictable(new_page, vma); | 2241 | lru_cache_add_active_or_unevictable(new_page, vma); |
diff --git a/mm/migrate.c b/mm/migrate.c index 01439953abf5..41945cb0ca38 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1854,7 +1854,7 @@ fail_putback: | |||
1854 | */ | 1854 | */ |
1855 | flush_cache_range(vma, mmun_start, mmun_end); | 1855 | flush_cache_range(vma, mmun_start, mmun_end); |
1856 | page_add_anon_rmap(new_page, vma, mmun_start); | 1856 | page_add_anon_rmap(new_page, vma, mmun_start); |
1857 | pmdp_clear_flush(vma, mmun_start, pmd); | 1857 | pmdp_clear_flush_notify(vma, mmun_start, pmd); |
1858 | set_pmd_at(mm, mmun_start, pmd, entry); | 1858 | set_pmd_at(mm, mmun_start, pmd, entry); |
1859 | flush_tlb_range(vma, mmun_start, mmun_end); | 1859 | flush_tlb_range(vma, mmun_start, mmun_end); |
1860 | update_mmu_cache_pmd(vma, address, &entry); | 1860 | update_mmu_cache_pmd(vma, address, &entry); |
@@ -1862,6 +1862,7 @@ fail_putback: | |||
1862 | if (page_count(page) != 2) { | 1862 | if (page_count(page) != 2) { |
1863 | set_pmd_at(mm, mmun_start, pmd, orig_entry); | 1863 | set_pmd_at(mm, mmun_start, pmd, orig_entry); |
1864 | flush_tlb_range(vma, mmun_start, mmun_end); | 1864 | flush_tlb_range(vma, mmun_start, mmun_end); |
1865 | mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); | ||
1865 | update_mmu_cache_pmd(vma, address, &entry); | 1866 | update_mmu_cache_pmd(vma, address, &entry); |
1866 | page_remove_rmap(new_page); | 1867 | page_remove_rmap(new_page); |
1867 | goto fail_putback; | 1868 | goto fail_putback; |
@@ -1378,7 +1378,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1378 | 1378 | ||
1379 | /* Nuke the page table entry. */ | 1379 | /* Nuke the page table entry. */ |
1380 | flush_cache_page(vma, address, pte_pfn(*pte)); | 1380 | flush_cache_page(vma, address, pte_pfn(*pte)); |
1381 | pteval = ptep_clear_flush(vma, address, pte); | 1381 | pteval = ptep_clear_flush_notify(vma, address, pte); |
1382 | 1382 | ||
1383 | /* If nonlinear, store the file page offset in the pte. */ | 1383 | /* If nonlinear, store the file page offset in the pte. */ |
1384 | if (page->index != linear_page_index(vma, address)) { | 1384 | if (page->index != linear_page_index(vma, address)) { |