diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-14 14:14:33 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-14 14:14:33 -0400 |
| commit | f353078f028fbfe9acd4b747b4a19c69ef6846cd (patch) | |
| tree | 3a3c7426d5fe6b42db04559e5631436222e4761a /mm | |
| parent | e181ae0c5db9544de9c53239eb22bc012ce75033 (diff) | |
| parent | fe10e398e860955bac4d28ec031b701d358465e4 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"11 fixes"
* emailed patches form Andrew Morton <akpm@linux-foundation.org>:
reiserfs: fix buffer overflow with long warning messages
checkpatch: fix duplicate invalid vsprintf pointer extension '%p<foo>' messages
mm: do not bug_on on incorrect length in __mm_populate()
mm/memblock.c: do not complain about top-down allocations for !MEMORY_HOTREMOVE
fs, elf: make sure to page align bss in load_elf_library
x86/purgatory: add missing FORCE to Makefile target
net/9p/client.c: put refcount of trans_mod in error case in parse_opts()
mm: allow arch to supply p??_free_tlb functions
autofs: fix slab out of bounds read in getname_kernel()
fs/proc/task_mmu.c: fix Locked field in /proc/pid/smaps*
mm: do not drop unused pages when userfaultd is running
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/gup.c | 2 | ||||
| -rw-r--r-- | mm/memblock.c | 3 | ||||
| -rw-r--r-- | mm/mmap.c | 29 | ||||
| -rw-r--r-- | mm/rmap.c | 8 |
4 files changed, 21 insertions, 21 deletions
| @@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |||
| 1238 | int locked = 0; | 1238 | int locked = 0; |
| 1239 | long ret = 0; | 1239 | long ret = 0; |
| 1240 | 1240 | ||
| 1241 | VM_BUG_ON(start & ~PAGE_MASK); | ||
| 1242 | VM_BUG_ON(len != PAGE_ALIGN(len)); | ||
| 1243 | end = start + len; | 1241 | end = start + len; |
| 1244 | 1242 | ||
| 1245 | for (nstart = start; nstart < end; nstart = nend) { | 1243 | for (nstart = start; nstart < end; nstart = nend) { |
diff --git a/mm/memblock.c b/mm/memblock.c index 03d48d8835ba..11e46f83e1ad 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -227,7 +227,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | |||
| 227 | * so we use WARN_ONCE() here to see the stack trace if | 227 | * so we use WARN_ONCE() here to see the stack trace if |
| 228 | * fail happens. | 228 | * fail happens. |
| 229 | */ | 229 | */ |
| 230 | WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); | 230 | WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), |
| 231 | "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); | ||
| 231 | } | 232 | } |
| 232 | 233 | ||
| 233 | return __memblock_find_range_top_down(start, end, size, align, nid, | 234 | return __memblock_find_range_top_down(start, end, size, align, nid, |
| @@ -186,8 +186,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
| 186 | return next; | 186 | return next; |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf); | 189 | static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, |
| 190 | 190 | struct list_head *uf); | |
| 191 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 191 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
| 192 | { | 192 | { |
| 193 | unsigned long retval; | 193 | unsigned long retval; |
| @@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
| 245 | goto out; | 245 | goto out; |
| 246 | 246 | ||
| 247 | /* Ok, looks good - let it rip. */ | 247 | /* Ok, looks good - let it rip. */ |
| 248 | if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0) | 248 | if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) |
| 249 | goto out; | 249 | goto out; |
| 250 | 250 | ||
| 251 | set_brk: | 251 | set_brk: |
| @@ -2929,21 +2929,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
| 2929 | * anonymous maps. eventually we may be able to do some | 2929 | * anonymous maps. eventually we may be able to do some |
| 2930 | * brk-specific accounting here. | 2930 | * brk-specific accounting here. |
| 2931 | */ | 2931 | */ |
| 2932 | static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf) | 2932 | static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) |
| 2933 | { | 2933 | { |
| 2934 | struct mm_struct *mm = current->mm; | 2934 | struct mm_struct *mm = current->mm; |
| 2935 | struct vm_area_struct *vma, *prev; | 2935 | struct vm_area_struct *vma, *prev; |
| 2936 | unsigned long len; | ||
| 2937 | struct rb_node **rb_link, *rb_parent; | 2936 | struct rb_node **rb_link, *rb_parent; |
| 2938 | pgoff_t pgoff = addr >> PAGE_SHIFT; | 2937 | pgoff_t pgoff = addr >> PAGE_SHIFT; |
| 2939 | int error; | 2938 | int error; |
| 2940 | 2939 | ||
| 2941 | len = PAGE_ALIGN(request); | ||
| 2942 | if (len < request) | ||
| 2943 | return -ENOMEM; | ||
| 2944 | if (!len) | ||
| 2945 | return 0; | ||
| 2946 | |||
| 2947 | /* Until we need other flags, refuse anything except VM_EXEC. */ | 2940 | /* Until we need other flags, refuse anything except VM_EXEC. */ |
| 2948 | if ((flags & (~VM_EXEC)) != 0) | 2941 | if ((flags & (~VM_EXEC)) != 0) |
| 2949 | return -EINVAL; | 2942 | return -EINVAL; |
| @@ -3015,18 +3008,20 @@ out: | |||
| 3015 | return 0; | 3008 | return 0; |
| 3016 | } | 3009 | } |
| 3017 | 3010 | ||
| 3018 | static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf) | 3011 | int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) |
| 3019 | { | ||
| 3020 | return do_brk_flags(addr, len, 0, uf); | ||
| 3021 | } | ||
| 3022 | |||
| 3023 | int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) | ||
| 3024 | { | 3012 | { |
| 3025 | struct mm_struct *mm = current->mm; | 3013 | struct mm_struct *mm = current->mm; |
| 3014 | unsigned long len; | ||
| 3026 | int ret; | 3015 | int ret; |
| 3027 | bool populate; | 3016 | bool populate; |
| 3028 | LIST_HEAD(uf); | 3017 | LIST_HEAD(uf); |
| 3029 | 3018 | ||
| 3019 | len = PAGE_ALIGN(request); | ||
| 3020 | if (len < request) | ||
| 3021 | return -ENOMEM; | ||
| 3022 | if (!len) | ||
| 3023 | return 0; | ||
| 3024 | |||
| 3030 | if (down_write_killable(&mm->mmap_sem)) | 3025 | if (down_write_killable(&mm->mmap_sem)) |
| 3031 | return -EINTR; | 3026 | return -EINTR; |
| 3032 | 3027 | ||
| @@ -64,6 +64,7 @@ | |||
| 64 | #include <linux/backing-dev.h> | 64 | #include <linux/backing-dev.h> |
| 65 | #include <linux/page_idle.h> | 65 | #include <linux/page_idle.h> |
| 66 | #include <linux/memremap.h> | 66 | #include <linux/memremap.h> |
| 67 | #include <linux/userfaultfd_k.h> | ||
| 67 | 68 | ||
| 68 | #include <asm/tlbflush.h> | 69 | #include <asm/tlbflush.h> |
| 69 | 70 | ||
| @@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 1481 | set_pte_at(mm, address, pvmw.pte, pteval); | 1482 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1482 | } | 1483 | } |
| 1483 | 1484 | ||
| 1484 | } else if (pte_unused(pteval)) { | 1485 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
| 1485 | /* | 1486 | /* |
| 1486 | * The guest indicated that the page content is of no | 1487 | * The guest indicated that the page content is of no |
| 1487 | * interest anymore. Simply discard the pte, vmscan | 1488 | * interest anymore. Simply discard the pte, vmscan |
| 1488 | * will take care of the rest. | 1489 | * will take care of the rest. |
| 1490 | * A future reference will then fault in a new zero | ||
| 1491 | * page. When userfaultfd is active, we must not drop | ||
| 1492 | * this page though, as its main user (postcopy | ||
| 1493 | * migration) will not expect userfaults on already | ||
| 1494 | * copied pages. | ||
| 1489 | */ | 1495 | */ |
| 1490 | dec_mm_counter(mm, mm_counter(page)); | 1496 | dec_mm_counter(mm, mm_counter(page)); |
| 1491 | /* We have to invalidate as we cleared the pte */ | 1497 | /* We have to invalidate as we cleared the pte */ |
