summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2019-05-13 20:20:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:49 -0400
commit6f4f13e8d9e27cefd2cd88dd4fd80aa6d68b9131 (patch)
treee61a4a3df243e6760646954c6c3c3fc8f0fd021f
parentd87f055b94ea9270c491b5e650dd776ecc30d7c9 (diff)
mm/mmu_notifier: contextual information for event triggering invalidation
CPU page table update can happens for many reasons, not only as a result of a syscall (munmap(), mprotect(), mremap(), madvise(), ...) but also as a result of kernel activities (memory compression, reclaim, migration, ...). Users of mmu notifier API track changes to the CPU page table and take specific action for them. While current API only provide range of virtual address affected by the change, not why the changes is happening. This patchset do the initial mechanical convertion of all the places that calls mmu_notifier_range_init to also provide the default MMU_NOTIFY_UNMAP event as well as the vma if it is know (most invalidation happens against a given vma). Passing down the vma allows the users of mmu notifier to inspect the new vma page protection. The MMU_NOTIFY_UNMAP is always the safe default as users of mmu notifier should assume that every for the range is going away when that event happens. A latter patch do convert mm call path to use a more appropriate events for each call. This is done as 2 patches so that no call site is forgotten especialy as it uses this following coccinelle patch: %<---------------------------------------------------------------------- @@ identifier I1, I2, I3, I4; @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *I1, +enum mmu_notifier_event event, +unsigned flags, +struct vm_area_struct *vma, struct mm_struct *I2, unsigned long I3, unsigned long I4) { ... } @@ @@ -#define mmu_notifier_range_init(range, mm, start, end) +#define mmu_notifier_range_init(range, event, flags, vma, mm, start, end) @@ expression E1, E3, E4; identifier I1; @@ <... mmu_notifier_range_init(E1, +MMU_NOTIFY_UNMAP, 0, I1, I1->vm_mm, E3, E4) ...> @@ expression E1, E2, E3, E4; identifier FN, VMA; @@ FN(..., struct vm_area_struct *VMA, ...) { <... mmu_notifier_range_init(E1, +MMU_NOTIFY_UNMAP, 0, VMA, E2, E3, E4) ...> } @@ expression E1, E2, E3, E4; identifier FN, VMA; @@ FN(...) { struct vm_area_struct *VMA; <... mmu_notifier_range_init(E1, +MMU_NOTIFY_UNMAP, 0, VMA, E2, E3, E4) ...> } @@ expression E1, E2, E3, E4; identifier FN; @@ FN(...) { <... mmu_notifier_range_init(E1, +MMU_NOTIFY_UNMAP, 0, NULL, E2, E3, E4) ...> } ---------------------------------------------------------------------->% Applied with: spatch --all-includes --sp-file mmu-notifier.spatch fs/proc/task_mmu.c --in-place spatch --sp-file mmu-notifier.spatch --dir kernel/events/ --in-place spatch --sp-file mmu-notifier.spatch --dir mm --in-place Link: http://lkml.kernel.org/r/20190326164747.24405-6-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Cc: Christian König <christian.koenig@amd.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Christian Koenig <christian.koenig@amd.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--include/linux/mmu_notifier.h5
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/khugepaged.c3
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/madvise.c3
-rw-r--r--mm/memory.c25
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/rmap.c6
14 files changed, 62 insertions, 30 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 95ca1fe7283c..ea464f2b9867 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1169,7 +1169,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1169 break; 1169 break;
1170 } 1170 }
1171 1171
1172 mmu_notifier_range_init(&range, mm, 0, -1UL); 1172 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
1173 NULL, mm, 0, -1UL);
1173 mmu_notifier_invalidate_range_start(&range); 1174 mmu_notifier_invalidate_range_start(&range);
1174 } 1175 }
1175 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); 1176 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 2386e71ac1b8..62f94cd85455 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -356,6 +356,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
356 356
357 357
358static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, 358static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
359 enum mmu_notifier_event event,
360 unsigned flags,
361 struct vm_area_struct *vma,
359 struct mm_struct *mm, 362 struct mm_struct *mm,
360 unsigned long start, 363 unsigned long start,
361 unsigned long end) 364 unsigned long end)
@@ -491,7 +494,7 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
491 range->end = end; 494 range->end = end;
492} 495}
493 496
494#define mmu_notifier_range_init(range, mm, start, end) \ 497#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
495 _mmu_notifier_range_init(range, start, end) 498 _mmu_notifier_range_init(range, start, end)
496 499
497static inline bool 500static inline bool
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4ca7364c956d..e34b699f3865 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -161,7 +161,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
161 struct mmu_notifier_range range; 161 struct mmu_notifier_range range;
162 struct mem_cgroup *memcg; 162 struct mem_cgroup *memcg;
163 163
164 mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE); 164 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, addr,
165 addr + PAGE_SIZE);
165 166
166 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); 167 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
167 168
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 50c665b12cf1..428b5794f4b8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1224,7 +1224,8 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
1224 cond_resched(); 1224 cond_resched();
1225 } 1225 }
1226 1226
1227 mmu_notifier_range_init(&range, vma->vm_mm, haddr, 1227 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1228 haddr,
1228 haddr + HPAGE_PMD_SIZE); 1229 haddr + HPAGE_PMD_SIZE);
1229 mmu_notifier_invalidate_range_start(&range); 1230 mmu_notifier_invalidate_range_start(&range);
1230 1231
@@ -1388,7 +1389,8 @@ alloc:
1388 vma, HPAGE_PMD_NR); 1389 vma, HPAGE_PMD_NR);
1389 __SetPageUptodate(new_page); 1390 __SetPageUptodate(new_page);
1390 1391
1391 mmu_notifier_range_init(&range, vma->vm_mm, haddr, 1392 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1393 haddr,
1392 haddr + HPAGE_PMD_SIZE); 1394 haddr + HPAGE_PMD_SIZE);
1393 mmu_notifier_invalidate_range_start(&range); 1395 mmu_notifier_invalidate_range_start(&range);
1394 1396
@@ -2064,7 +2066,8 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2064 spinlock_t *ptl; 2066 spinlock_t *ptl;
2065 struct mmu_notifier_range range; 2067 struct mmu_notifier_range range;
2066 2068
2067 mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK, 2069 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
2070 address & HPAGE_PUD_MASK,
2068 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 2071 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2069 mmu_notifier_invalidate_range_start(&range); 2072 mmu_notifier_invalidate_range_start(&range);
2070 ptl = pud_lock(vma->vm_mm, pud); 2073 ptl = pud_lock(vma->vm_mm, pud);
@@ -2282,7 +2285,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2282 spinlock_t *ptl; 2285 spinlock_t *ptl;
2283 struct mmu_notifier_range range; 2286 struct mmu_notifier_range range;
2284 2287
2285 mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK, 2288 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
2289 address & HPAGE_PMD_MASK,
2286 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2290 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2287 mmu_notifier_invalidate_range_start(&range); 2291 mmu_notifier_invalidate_range_start(&range);
2288 ptl = pmd_lock(vma->vm_mm, pmd); 2292 ptl = pmd_lock(vma->vm_mm, pmd);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 98a3c7c224cb..89d206d6ecf3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3294,7 +3294,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3294 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 3294 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3295 3295
3296 if (cow) { 3296 if (cow) {
3297 mmu_notifier_range_init(&range, src, vma->vm_start, 3297 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, src,
3298 vma->vm_start,
3298 vma->vm_end); 3299 vma->vm_end);
3299 mmu_notifier_invalidate_range_start(&range); 3300 mmu_notifier_invalidate_range_start(&range);
3300 } 3301 }
@@ -3406,7 +3407,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3406 /* 3407 /*
3407 * If sharing possible, alert mmu notifiers of worst case. 3408 * If sharing possible, alert mmu notifiers of worst case.
3408 */ 3409 */
3409 mmu_notifier_range_init(&range, mm, start, end); 3410 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3411 end);
3410 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 3412 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3411 mmu_notifier_invalidate_range_start(&range); 3413 mmu_notifier_invalidate_range_start(&range);
3412 address = start; 3414 address = start;
@@ -3673,7 +3675,8 @@ retry_avoidcopy:
3673 pages_per_huge_page(h)); 3675 pages_per_huge_page(h));
3674 __SetPageUptodate(new_page); 3676 __SetPageUptodate(new_page);
3675 3677
3676 mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h)); 3678 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, haddr,
3679 haddr + huge_page_size(h));
3677 mmu_notifier_invalidate_range_start(&range); 3680 mmu_notifier_invalidate_range_start(&range);
3678 3681
3679 /* 3682 /*
@@ -4408,7 +4411,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4408 * start/end. Set range.start/range.end to cover the maximum possible 4411 * start/end. Set range.start/range.end to cover the maximum possible
4409 * range if PMD sharing is possible. 4412 * range if PMD sharing is possible.
4410 */ 4413 */
4411 mmu_notifier_range_init(&range, mm, start, end); 4414 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
4415 end);
4412 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 4416 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4413 4417
4414 BUG_ON(address >= end); 4418 BUG_ON(address >= end);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 7ba7a1e4fa79..14581dbf62a5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1016,7 +1016,8 @@ static void collapse_huge_page(struct mm_struct *mm,
1016 pte = pte_offset_map(pmd, address); 1016 pte = pte_offset_map(pmd, address);
1017 pte_ptl = pte_lockptr(mm, pmd); 1017 pte_ptl = pte_lockptr(mm, pmd);
1018 1018
1019 mmu_notifier_range_init(&range, mm, address, address + HPAGE_PMD_SIZE); 1019 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, NULL, mm,
1020 address, address + HPAGE_PMD_SIZE);
1020 mmu_notifier_invalidate_range_start(&range); 1021 mmu_notifier_invalidate_range_start(&range);
1021 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1022 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1022 /* 1023 /*
diff --git a/mm/ksm.c b/mm/ksm.c
index fc64874dc6f4..01f5fe2c90cf 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1066,7 +1066,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1066 1066
1067 BUG_ON(PageTransCompound(page)); 1067 BUG_ON(PageTransCompound(page));
1068 1068
1069 mmu_notifier_range_init(&range, mm, pvmw.address, 1069 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm,
1070 pvmw.address,
1070 pvmw.address + PAGE_SIZE); 1071 pvmw.address + PAGE_SIZE);
1071 mmu_notifier_invalidate_range_start(&range); 1072 mmu_notifier_invalidate_range_start(&range);
1072 1073
@@ -1154,7 +1155,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
1154 if (!pmd) 1155 if (!pmd)
1155 goto out; 1156 goto out;
1156 1157
1157 mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE); 1158 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, addr,
1159 addr + PAGE_SIZE);
1158 mmu_notifier_invalidate_range_start(&range); 1160 mmu_notifier_invalidate_range_start(&range);
1159 1161
1160 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 1162 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
diff --git a/mm/madvise.c b/mm/madvise.c
index bb3a4554d5d5..1c52bdf1b696 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -472,7 +472,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
472 range.end = min(vma->vm_end, end_addr); 472 range.end = min(vma->vm_end, end_addr);
473 if (range.end <= vma->vm_start) 473 if (range.end <= vma->vm_start)
474 return -EINVAL; 474 return -EINVAL;
475 mmu_notifier_range_init(&range, mm, range.start, range.end); 475 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm,
476 range.start, range.end);
476 477
477 lru_add_drain(); 478 lru_add_drain();
478 tlb_gather_mmu(&tlb, mm, range.start, range.end); 479 tlb_gather_mmu(&tlb, mm, range.start, range.end);
diff --git a/mm/memory.c b/mm/memory.c
index f7d962d7de19..90672674c582 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1010,7 +1010,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1010 is_cow = is_cow_mapping(vma->vm_flags); 1010 is_cow = is_cow_mapping(vma->vm_flags);
1011 1011
1012 if (is_cow) { 1012 if (is_cow) {
1013 mmu_notifier_range_init(&range, src_mm, addr, end); 1013 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma,
1014 src_mm, addr, end);
1014 mmu_notifier_invalidate_range_start(&range); 1015 mmu_notifier_invalidate_range_start(&range);
1015 } 1016 }
1016 1017
@@ -1334,7 +1335,8 @@ void unmap_vmas(struct mmu_gather *tlb,
1334{ 1335{
1335 struct mmu_notifier_range range; 1336 struct mmu_notifier_range range;
1336 1337
1337 mmu_notifier_range_init(&range, vma->vm_mm, start_addr, end_addr); 1338 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1339 start_addr, end_addr);
1338 mmu_notifier_invalidate_range_start(&range); 1340 mmu_notifier_invalidate_range_start(&range);
1339 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1341 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1340 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); 1342 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
@@ -1356,7 +1358,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1356 struct mmu_gather tlb; 1358 struct mmu_gather tlb;
1357 1359
1358 lru_add_drain(); 1360 lru_add_drain();
1359 mmu_notifier_range_init(&range, vma->vm_mm, start, start + size); 1361 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1362 start, start + size);
1360 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); 1363 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1361 update_hiwater_rss(vma->vm_mm); 1364 update_hiwater_rss(vma->vm_mm);
1362 mmu_notifier_invalidate_range_start(&range); 1365 mmu_notifier_invalidate_range_start(&range);
@@ -1382,7 +1385,8 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1382 struct mmu_gather tlb; 1385 struct mmu_gather tlb;
1383 1386
1384 lru_add_drain(); 1387 lru_add_drain();
1385 mmu_notifier_range_init(&range, vma->vm_mm, address, address + size); 1388 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1389 address, address + size);
1386 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); 1390 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1387 update_hiwater_rss(vma->vm_mm); 1391 update_hiwater_rss(vma->vm_mm);
1388 mmu_notifier_invalidate_range_start(&range); 1392 mmu_notifier_invalidate_range_start(&range);
@@ -2279,7 +2283,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2279 2283
2280 __SetPageUptodate(new_page); 2284 __SetPageUptodate(new_page);
2281 2285
2282 mmu_notifier_range_init(&range, mm, vmf->address & PAGE_MASK, 2286 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm,
2287 vmf->address & PAGE_MASK,
2283 (vmf->address & PAGE_MASK) + PAGE_SIZE); 2288 (vmf->address & PAGE_MASK) + PAGE_SIZE);
2284 mmu_notifier_invalidate_range_start(&range); 2289 mmu_notifier_invalidate_range_start(&range);
2285 2290
@@ -4104,8 +4109,9 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4104 goto out; 4109 goto out;
4105 4110
4106 if (range) { 4111 if (range) {
4107 mmu_notifier_range_init(range, mm, address & PMD_MASK, 4112 mmu_notifier_range_init(range, MMU_NOTIFY_UNMAP, 0,
4108 (address & PMD_MASK) + PMD_SIZE); 4113 NULL, mm, address & PMD_MASK,
4114 (address & PMD_MASK) + PMD_SIZE);
4109 mmu_notifier_invalidate_range_start(range); 4115 mmu_notifier_invalidate_range_start(range);
4110 } 4116 }
4111 *ptlp = pmd_lock(mm, pmd); 4117 *ptlp = pmd_lock(mm, pmd);
@@ -4122,8 +4128,9 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4122 goto out; 4128 goto out;
4123 4129
4124 if (range) { 4130 if (range) {
4125 mmu_notifier_range_init(range, mm, address & PAGE_MASK, 4131 mmu_notifier_range_init(range, MMU_NOTIFY_UNMAP, 0, NULL, mm,
4126 (address & PAGE_MASK) + PAGE_SIZE); 4132 address & PAGE_MASK,
4133 (address & PAGE_MASK) + PAGE_SIZE);
4127 mmu_notifier_invalidate_range_start(range); 4134 mmu_notifier_invalidate_range_start(range);
4128 } 4135 }
4129 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4136 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
diff --git a/mm/migrate.c b/mm/migrate.c
index a1770403ff7f..855bdb3b3333 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2356,7 +2356,8 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
2356 mm_walk.mm = migrate->vma->vm_mm; 2356 mm_walk.mm = migrate->vma->vm_mm;
2357 mm_walk.private = migrate; 2357 mm_walk.private = migrate;
2358 2358
2359 mmu_notifier_range_init(&range, mm_walk.mm, migrate->start, 2359 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, NULL, mm_walk.mm,
2360 migrate->start,
2360 migrate->end); 2361 migrate->end);
2361 mmu_notifier_invalidate_range_start(&range); 2362 mmu_notifier_invalidate_range_start(&range);
2362 walk_page_range(migrate->start, migrate->end, &mm_walk); 2363 walk_page_range(migrate->start, migrate->end, &mm_walk);
@@ -2764,6 +2765,8 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
2764 notified = true; 2765 notified = true;
2765 2766
2766 mmu_notifier_range_init(&range, 2767 mmu_notifier_range_init(&range,
2768 MMU_NOTIFY_UNMAP, 0,
2769 NULL,
2767 migrate->vma->vm_mm, 2770 migrate->vma->vm_mm,
2768 addr, migrate->end); 2771 addr, migrate->end);
2769 mmu_notifier_invalidate_range_start(&range); 2772 mmu_notifier_invalidate_range_start(&range);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 028c724dcb1a..b10984052ae9 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -185,7 +185,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
185 185
186 /* invoke the mmu notifier if the pmd is populated */ 186 /* invoke the mmu notifier if the pmd is populated */
187 if (!range.start) { 187 if (!range.start) {
188 mmu_notifier_range_init(&range, vma->vm_mm, addr, end); 188 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
189 vma, vma->vm_mm, addr, end);
189 mmu_notifier_invalidate_range_start(&range); 190 mmu_notifier_invalidate_range_start(&range);
190 } 191 }
191 192
diff --git a/mm/mremap.c b/mm/mremap.c
index e3edef6b7a12..fc241d23cd97 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -249,7 +249,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
249 old_end = old_addr + len; 249 old_end = old_addr + len;
250 flush_cache_range(vma, old_addr, old_end); 250 flush_cache_range(vma, old_addr, old_end);
251 251
252 mmu_notifier_range_init(&range, vma->vm_mm, old_addr, old_end); 252 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
253 old_addr, old_end);
253 mmu_notifier_invalidate_range_start(&range); 254 mmu_notifier_invalidate_range_start(&range);
254 255
255 for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 256 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3a2484884cfd..539c91d0b26a 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -531,7 +531,8 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
531 struct mmu_notifier_range range; 531 struct mmu_notifier_range range;
532 struct mmu_gather tlb; 532 struct mmu_gather tlb;
533 533
534 mmu_notifier_range_init(&range, mm, vma->vm_start, 534 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
535 vma, mm, vma->vm_start,
535 vma->vm_end); 536 vma->vm_end);
536 tlb_gather_mmu(&tlb, mm, range.start, range.end); 537 tlb_gather_mmu(&tlb, mm, range.start, range.end);
537 if (mmu_notifier_invalidate_range_start_nonblock(&range)) { 538 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 76c8dfd3ae1c..288e636b7813 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -896,7 +896,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
896 * We have to assume the worse case ie pmd for invalidation. Note that 896 * We have to assume the worse case ie pmd for invalidation. Note that
897 * the page can not be free from this function. 897 * the page can not be free from this function.
898 */ 898 */
899 mmu_notifier_range_init(&range, vma->vm_mm, address, 899 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
900 address,
900 min(vma->vm_end, address + 901 min(vma->vm_end, address +
901 (PAGE_SIZE << compound_order(page)))); 902 (PAGE_SIZE << compound_order(page))));
902 mmu_notifier_invalidate_range_start(&range); 903 mmu_notifier_invalidate_range_start(&range);
@@ -1371,7 +1372,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1371 * Note that the page can not be free in this function as call of 1372 * Note that the page can not be free in this function as call of
1372 * try_to_unmap() must hold a reference on the page. 1373 * try_to_unmap() must hold a reference on the page.
1373 */ 1374 */
1374 mmu_notifier_range_init(&range, vma->vm_mm, address, 1375 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1376 address,
1375 min(vma->vm_end, address + 1377 min(vma->vm_end, address +
1376 (PAGE_SIZE << compound_order(page)))); 1378 (PAGE_SIZE << compound_order(page))));
1377 if (PageHuge(page)) { 1379 if (PageHuge(page)) {