diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 19:55:46 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 19:55:46 -0500 |
| commit | f346b0becb1bc62e45495f9cdbae3eef35d0b635 (patch) | |
| tree | ae79f3dfb8e031da51d38f0f095f89d7d23f3643 /mm/mremap.c | |
| parent | 00d59fde8532b2d42e80909d2e58678755e04da9 (diff) | |
| parent | 0f4991e8fd48987ae476a92cdee6bfec4aff31b8 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- large KASAN update to use arm's "software tag-based mode"
- a few misc things
- sh updates
- ocfs2 updates
- just about all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits)
kernel/fork.c: mark 'stack_vm_area' with __maybe_unused
memcg, oom: notify on oom killer invocation from the charge path
mm, swap: fix swapoff with KSM pages
include/linux/gfp.h: fix typo
mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm
hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
memory_hotplug: add missing newlines to debugging output
mm: remove __hugepage_set_anon_rmap()
include/linux/vmstat.h: remove unused page state adjustment macro
mm/page_alloc.c: allow error injection
mm: migrate: drop unused argument of migrate_page_move_mapping()
blkdev: avoid migration stalls for blkdev pages
mm: migrate: provide buffer_migrate_page_norefs()
mm: migrate: move migrate_page_lock_buffers()
mm: migrate: lock buffers before migrate_page_move_mapping()
mm: migration: factor out code to compute expected number of page references
mm, page_alloc: enable pcpu_drain with zone capability
kmemleak: add config to select auto scan
mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init
...
Diffstat (limited to 'mm/mremap.c')
| -rw-r--r-- | mm/mremap.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/mremap.c b/mm/mremap.c index 7f9f9180e401..def01d86e36f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
| @@ -197,16 +197,14 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
| 197 | bool need_rmap_locks) | 197 | bool need_rmap_locks) |
| 198 | { | 198 | { |
| 199 | unsigned long extent, next, old_end; | 199 | unsigned long extent, next, old_end; |
| 200 | struct mmu_notifier_range range; | ||
| 200 | pmd_t *old_pmd, *new_pmd; | 201 | pmd_t *old_pmd, *new_pmd; |
| 201 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
| 202 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
| 203 | 202 | ||
| 204 | old_end = old_addr + len; | 203 | old_end = old_addr + len; |
| 205 | flush_cache_range(vma, old_addr, old_end); | 204 | flush_cache_range(vma, old_addr, old_end); |
| 206 | 205 | ||
| 207 | mmun_start = old_addr; | 206 | mmu_notifier_range_init(&range, vma->vm_mm, old_addr, old_end); |
| 208 | mmun_end = old_end; | 207 | mmu_notifier_invalidate_range_start(&range); |
| 209 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); | ||
| 210 | 208 | ||
| 211 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { | 209 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
| 212 | cond_resched(); | 210 | cond_resched(); |
| @@ -247,7 +245,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
| 247 | new_pmd, new_addr, need_rmap_locks); | 245 | new_pmd, new_addr, need_rmap_locks); |
| 248 | } | 246 | } |
| 249 | 247 | ||
| 250 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 248 | mmu_notifier_invalidate_range_end(&range); |
| 251 | 249 | ||
| 252 | return len + old_addr - old_end; /* how much done */ | 250 | return len + old_addr - old_end; /* how much done */ |
| 253 | } | 251 | } |
