summaryrefslogtreecommitdiffstats
path: root/mm/util.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-05-23 19:25:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-23 20:04:14 -0400
commit9fbeb5ab59a2b2a09cca2eb68283e7a090d4b98d (patch)
treeb4f3ff77c420bdafd5be3aa8293320ad4cb871e4 /mm/util.c
parentdc0ef0df7b6a90892ec41933212ac701152a254c (diff)
mm: make vm_mmap killable
All the callers of vm_mmap seem to check for the failure already and bail out in one way or another on the error which means that we can change it to use killable version of vm_mmap_pgoff and return -EINTR if the current task gets killed while waiting for mmap_sem. This also means that vm_mmap_pgoff can be killable by default and drop the additional parameter. This will help in the OOM conditions when the oom victim might be stuck waiting for the mmap_sem for write which in turn can block oom_reaper which relies on the mmap_sem for read to make a forward progress and reclaim the address space of the victim. Please note that load_elf_binary is ignoring vm_mmap error for current->personality & MMAP_PAGE_ZERO case but that shouldn't be a problem because the address is not used anywhere and we never return to the userspace if we got killed. Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/util.c')
-rw-r--r--mm/util.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/mm/util.c b/mm/util.c
index 03b237746850..917e0e3d0f8e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
289 289
290unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 290unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
291 unsigned long len, unsigned long prot, 291 unsigned long len, unsigned long prot,
292 unsigned long flag, unsigned long pgoff, bool killable) 292 unsigned long flag, unsigned long pgoff)
293{ 293{
294 unsigned long ret; 294 unsigned long ret;
295 struct mm_struct *mm = current->mm; 295 struct mm_struct *mm = current->mm;
@@ -297,12 +297,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
297 297
298 ret = security_mmap_file(file, prot, flag); 298 ret = security_mmap_file(file, prot, flag);
299 if (!ret) { 299 if (!ret) {
300 if (killable) { 300 if (down_write_killable(&mm->mmap_sem))
301 if (down_write_killable(&mm->mmap_sem)) 301 return -EINTR;
302 return -EINTR;
303 } else {
304 down_write(&mm->mmap_sem);
305 }
306 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 302 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
307 &populate); 303 &populate);
308 up_write(&mm->mmap_sem); 304 up_write(&mm->mmap_sem);
@@ -312,7 +308,6 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
312 return ret; 308 return ret;
313} 309}
314 310
315/* XXX are all callers checking an error */
316unsigned long vm_mmap(struct file *file, unsigned long addr, 311unsigned long vm_mmap(struct file *file, unsigned long addr,
317 unsigned long len, unsigned long prot, 312 unsigned long len, unsigned long prot,
318 unsigned long flag, unsigned long offset) 313 unsigned long flag, unsigned long offset)
@@ -322,7 +317,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
322 if (unlikely(offset_in_page(offset))) 317 if (unlikely(offset_in_page(offset)))
323 return -EINVAL; 318 return -EINVAL;
324 319
325 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT, false); 320 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
326} 321}
327EXPORT_SYMBOL(vm_mmap); 322EXPORT_SYMBOL(vm_mmap);
328 323