diff options
author | Joerg Roedel <jroedel@suse.de> | 2014-11-12 21:46:09 -0500 |
---|---|---|
committer | Oded Gabbay <oded.gabbay@amd.com> | 2014-11-12 21:46:09 -0500 |
commit | 34ee645e83b60ae3d5955f70ab9ab9a159136673 (patch) | |
tree | 49aa485f580cd9594f89f0b761dd685b2c2981af /mm | |
parent | 1897bdc4d33167e9036460631d1349e59d841f2d (diff) |
mmu_notifier: call mmu_notifier_invalidate_range() from VMM
Add calls to the new mmu_notifier_invalidate_range() function to all
places in the VMM that need it.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Jay Cornwall <Jay.Cornwall@amd.com>
Cc: Oded Gabbay <Oded.Gabbay@amd.com>
Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/fremap.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 9 | ||||
-rw-r--r-- | mm/hugetlb.c | 7 | ||||
-rw-r--r-- | mm/ksm.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/migrate.c | 3 | ||||
-rw-r--r-- | mm/rmap.c | 2 |
7 files changed, 19 insertions, 11 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index 72b8fa361433..9129013732d7 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
37 | 37 | ||
38 | if (pte_present(pte)) { | 38 | if (pte_present(pte)) { |
39 | flush_cache_page(vma, addr, pte_pfn(pte)); | 39 | flush_cache_page(vma, addr, pte_pfn(pte)); |
40 | pte = ptep_clear_flush(vma, addr, ptep); | 40 | pte = ptep_clear_flush_notify(vma, addr, ptep); |
41 | page = vm_normal_page(vma, addr, pte); | 41 | page = vm_normal_page(vma, addr, pte); |
42 | if (page) { | 42 | if (page) { |
43 | if (pte_dirty(pte)) | 43 | if (pte_dirty(pte)) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index de984159cf0b..1d89526ed531 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1036,7 +1036,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
1036 | goto out_free_pages; | 1036 | goto out_free_pages; |
1037 | VM_BUG_ON_PAGE(!PageHead(page), page); | 1037 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1038 | 1038 | ||
1039 | pmdp_clear_flush(vma, haddr, pmd); | 1039 | pmdp_clear_flush_notify(vma, haddr, pmd); |
1040 | /* leave pmd empty until pte is filled */ | 1040 | /* leave pmd empty until pte is filled */ |
1041 | 1041 | ||
1042 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | 1042 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
@@ -1179,7 +1179,7 @@ alloc: | |||
1179 | pmd_t entry; | 1179 | pmd_t entry; |
1180 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); | 1180 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); |
1181 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | 1181 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
1182 | pmdp_clear_flush(vma, haddr, pmd); | 1182 | pmdp_clear_flush_notify(vma, haddr, pmd); |
1183 | page_add_new_anon_rmap(new_page, vma, haddr); | 1183 | page_add_new_anon_rmap(new_page, vma, haddr); |
1184 | mem_cgroup_commit_charge(new_page, memcg, false); | 1184 | mem_cgroup_commit_charge(new_page, memcg, false); |
1185 | lru_cache_add_active_or_unevictable(new_page, vma); | 1185 | lru_cache_add_active_or_unevictable(new_page, vma); |
@@ -1512,7 +1512,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1512 | pmd_t entry; | 1512 | pmd_t entry; |
1513 | ret = 1; | 1513 | ret = 1; |
1514 | if (!prot_numa) { | 1514 | if (!prot_numa) { |
1515 | entry = pmdp_get_and_clear(mm, addr, pmd); | 1515 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1516 | if (pmd_numa(entry)) | 1516 | if (pmd_numa(entry)) |
1517 | entry = pmd_mknonnuma(entry); | 1517 | entry = pmd_mknonnuma(entry); |
1518 | entry = pmd_modify(entry, newprot); | 1518 | entry = pmd_modify(entry, newprot); |
@@ -1644,6 +1644,7 @@ static int __split_huge_page_splitting(struct page *page, | |||
1644 | * serialize against split_huge_page*. | 1644 | * serialize against split_huge_page*. |
1645 | */ | 1645 | */ |
1646 | pmdp_splitting_flush(vma, address, pmd); | 1646 | pmdp_splitting_flush(vma, address, pmd); |
1647 | |||
1647 | ret = 1; | 1648 | ret = 1; |
1648 | spin_unlock(ptl); | 1649 | spin_unlock(ptl); |
1649 | } | 1650 | } |
@@ -2834,7 +2835,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, | |||
2834 | pmd_t _pmd; | 2835 | pmd_t _pmd; |
2835 | int i; | 2836 | int i; |
2836 | 2837 | ||
2837 | pmdp_clear_flush(vma, haddr, pmd); | 2838 | pmdp_clear_flush_notify(vma, haddr, pmd); |
2838 | /* leave pmd empty until pte is filled */ | 2839 | /* leave pmd empty until pte is filled */ |
2839 | 2840 | ||
2840 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | 2841 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9fd722769927..2e6add04fa1b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2598,8 +2598,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
2598 | } | 2598 | } |
2599 | set_huge_pte_at(dst, addr, dst_pte, entry); | 2599 | set_huge_pte_at(dst, addr, dst_pte, entry); |
2600 | } else { | 2600 | } else { |
2601 | if (cow) | 2601 | if (cow) { |
2602 | huge_ptep_set_wrprotect(src, addr, src_pte); | 2602 | huge_ptep_set_wrprotect(src, addr, src_pte); |
2603 | mmu_notifier_invalidate_range(src, mmun_start, | ||
2604 | mmun_end); | ||
2605 | } | ||
2603 | entry = huge_ptep_get(src_pte); | 2606 | entry = huge_ptep_get(src_pte); |
2604 | ptepage = pte_page(entry); | 2607 | ptepage = pte_page(entry); |
2605 | get_page(ptepage); | 2608 | get_page(ptepage); |
@@ -2899,6 +2902,7 @@ retry_avoidcopy: | |||
2899 | 2902 | ||
2900 | /* Break COW */ | 2903 | /* Break COW */ |
2901 | huge_ptep_clear_flush(vma, address, ptep); | 2904 | huge_ptep_clear_flush(vma, address, ptep); |
2905 | mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); | ||
2902 | set_huge_pte_at(mm, address, ptep, | 2906 | set_huge_pte_at(mm, address, ptep, |
2903 | make_huge_pte(vma, new_page, 1)); | 2907 | make_huge_pte(vma, new_page, 1)); |
2904 | page_remove_rmap(old_page); | 2908 | page_remove_rmap(old_page); |
@@ -3374,6 +3378,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
3374 | * and that page table be reused and filled with junk. | 3378 | * and that page table be reused and filled with junk. |
3375 | */ | 3379 | */ |
3376 | flush_tlb_range(vma, start, end); | 3380 | flush_tlb_range(vma, start, end); |
3381 | mmu_notifier_invalidate_range(mm, start, end); | ||
3377 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); | 3382 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); |
3378 | mmu_notifier_invalidate_range_end(mm, start, end); | 3383 | mmu_notifier_invalidate_range_end(mm, start, end); |
3379 | 3384 | ||
@@ -892,7 +892,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
892 | * this assure us that no O_DIRECT can happen after the check | 892 | * this assure us that no O_DIRECT can happen after the check |
893 | * or in the middle of the check. | 893 | * or in the middle of the check. |
894 | */ | 894 | */ |
895 | entry = ptep_clear_flush(vma, addr, ptep); | 895 | entry = ptep_clear_flush_notify(vma, addr, ptep); |
896 | /* | 896 | /* |
897 | * Check that no O_DIRECT or similar I/O is in progress on the | 897 | * Check that no O_DIRECT or similar I/O is in progress on the |
898 | * page | 898 | * page |
@@ -960,7 +960,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
960 | page_add_anon_rmap(kpage, vma, addr); | 960 | page_add_anon_rmap(kpage, vma, addr); |
961 | 961 | ||
962 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | 962 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
963 | ptep_clear_flush(vma, addr, ptep); | 963 | ptep_clear_flush_notify(vma, addr, ptep); |
964 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | 964 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
965 | 965 | ||
966 | page_remove_rmap(page); | 966 | page_remove_rmap(page); |
diff --git a/mm/memory.c b/mm/memory.c index 3e503831e042..655fd3d34bb0 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -238,6 +238,7 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | |||
238 | { | 238 | { |
239 | tlb->need_flush = 0; | 239 | tlb->need_flush = 0; |
240 | tlb_flush(tlb); | 240 | tlb_flush(tlb); |
241 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); | ||
241 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 242 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
242 | tlb_table_flush(tlb); | 243 | tlb_table_flush(tlb); |
243 | #endif | 244 | #endif |
@@ -2234,7 +2235,7 @@ gotten: | |||
2234 | * seen in the presence of one thread doing SMC and another | 2235 | * seen in the presence of one thread doing SMC and another |
2235 | * thread doing COW. | 2236 | * thread doing COW. |
2236 | */ | 2237 | */ |
2237 | ptep_clear_flush(vma, address, page_table); | 2238 | ptep_clear_flush_notify(vma, address, page_table); |
2238 | page_add_new_anon_rmap(new_page, vma, address); | 2239 | page_add_new_anon_rmap(new_page, vma, address); |
2239 | mem_cgroup_commit_charge(new_page, memcg, false); | 2240 | mem_cgroup_commit_charge(new_page, memcg, false); |
2240 | lru_cache_add_active_or_unevictable(new_page, vma); | 2241 | lru_cache_add_active_or_unevictable(new_page, vma); |
diff --git a/mm/migrate.c b/mm/migrate.c index 01439953abf5..41945cb0ca38 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1854,7 +1854,7 @@ fail_putback: | |||
1854 | */ | 1854 | */ |
1855 | flush_cache_range(vma, mmun_start, mmun_end); | 1855 | flush_cache_range(vma, mmun_start, mmun_end); |
1856 | page_add_anon_rmap(new_page, vma, mmun_start); | 1856 | page_add_anon_rmap(new_page, vma, mmun_start); |
1857 | pmdp_clear_flush(vma, mmun_start, pmd); | 1857 | pmdp_clear_flush_notify(vma, mmun_start, pmd); |
1858 | set_pmd_at(mm, mmun_start, pmd, entry); | 1858 | set_pmd_at(mm, mmun_start, pmd, entry); |
1859 | flush_tlb_range(vma, mmun_start, mmun_end); | 1859 | flush_tlb_range(vma, mmun_start, mmun_end); |
1860 | update_mmu_cache_pmd(vma, address, &entry); | 1860 | update_mmu_cache_pmd(vma, address, &entry); |
@@ -1862,6 +1862,7 @@ fail_putback: | |||
1862 | if (page_count(page) != 2) { | 1862 | if (page_count(page) != 2) { |
1863 | set_pmd_at(mm, mmun_start, pmd, orig_entry); | 1863 | set_pmd_at(mm, mmun_start, pmd, orig_entry); |
1864 | flush_tlb_range(vma, mmun_start, mmun_end); | 1864 | flush_tlb_range(vma, mmun_start, mmun_end); |
1865 | mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); | ||
1865 | update_mmu_cache_pmd(vma, address, &entry); | 1866 | update_mmu_cache_pmd(vma, address, &entry); |
1866 | page_remove_rmap(new_page); | 1867 | page_remove_rmap(new_page); |
1867 | goto fail_putback; | 1868 | goto fail_putback; |
@@ -1378,7 +1378,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1378 | 1378 | ||
1379 | /* Nuke the page table entry. */ | 1379 | /* Nuke the page table entry. */ |
1380 | flush_cache_page(vma, address, pte_pfn(*pte)); | 1380 | flush_cache_page(vma, address, pte_pfn(*pte)); |
1381 | pteval = ptep_clear_flush(vma, address, pte); | 1381 | pteval = ptep_clear_flush_notify(vma, address, pte); |
1382 | 1382 | ||
1383 | /* If nonlinear, store the file page offset in the pte. */ | 1383 | /* If nonlinear, store the file page offset in the pte. */ |
1384 | if (page->index != linear_page_index(vma, address)) { | 1384 | if (page->index != linear_page_index(vma, address)) { |