aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-05-23 19:25:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-23 20:04:14 -0400
commit9fbeb5ab59a2b2a09cca2eb68283e7a090d4b98d (patch)
treeb4f3ff77c420bdafd5be3aa8293320ad4cb871e4
parentdc0ef0df7b6a90892ec41933212ac701152a254c (diff)
mm: make vm_mmap killable
All the callers of vm_mmap seem to check for the failure already and bail out in one way or another on the error which means that we can change it to use killable version of vm_mmap_pgoff and return -EINTR if the current task gets killed while waiting for mmap_sem. This also means that vm_mmap_pgoff can be killable by default and drop the additional parameter. This will help in the OOM conditions when the oom victim might be stuck waiting for the mmap_sem for write which in turn can block oom_reaper which relies on the mmap_sem for read to make a forward progress and reclaim the address space of the victim. Please note that load_elf_binary is ignoring vm_mmap error for current->personality & MMAP_PAGE_ZERO case but that shouldn't be a problem because the address is not used anywhere and we never return to the userspace if we got killed. Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/internal.h3
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/util.c13
5 files changed, 8 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b530c99e8e81..d5eb8dddd7c0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2013,7 +2013,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {}
2013/* These take the mm semaphore themselves */ 2013/* These take the mm semaphore themselves */
2014extern unsigned long vm_brk(unsigned long, unsigned long); 2014extern unsigned long vm_brk(unsigned long, unsigned long);
2015extern int vm_munmap(unsigned long, size_t); 2015extern int vm_munmap(unsigned long, size_t);
2016extern unsigned long vm_mmap(struct file *, unsigned long, 2016extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2017 unsigned long, unsigned long, 2017 unsigned long, unsigned long,
2018 unsigned long, unsigned long); 2018 unsigned long, unsigned long);
2019 2019
diff --git a/mm/internal.h b/mm/internal.h
index bff7fd702331..a37e5b6f9d25 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -444,8 +444,7 @@ extern u32 hwpoison_filter_enable;
444 444
445extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, 445extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
446 unsigned long, unsigned long, 446 unsigned long, unsigned long,
447 unsigned long, unsigned long, 447 unsigned long, unsigned long);
448 bool);
449 448
450extern void set_pageblock_order(void); 449extern void set_pageblock_order(void);
451unsigned long reclaim_clean_pages_from_list(struct zone *zone, 450unsigned long reclaim_clean_pages_from_list(struct zone *zone,
diff --git a/mm/mmap.c b/mm/mmap.c
index 11e1f2ca72af..420088682d4a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1333,7 +1333,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1333 1333
1334 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1334 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1335 1335
1336 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff, true); 1336 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1337out_fput: 1337out_fput:
1338 if (file) 1338 if (file)
1339 fput(file); 1339 fput(file);
diff --git a/mm/nommu.c b/mm/nommu.c
index b74512746aae..c8bd59a03c71 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1446,7 +1446,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1446 1446
1447 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1447 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1448 1448
1449 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff, true); 1449 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1450 1450
1451 if (file) 1451 if (file)
1452 fput(file); 1452 fput(file);
diff --git a/mm/util.c b/mm/util.c
index 03b237746850..917e0e3d0f8e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
289 289
290unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 290unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
291 unsigned long len, unsigned long prot, 291 unsigned long len, unsigned long prot,
292 unsigned long flag, unsigned long pgoff, bool killable) 292 unsigned long flag, unsigned long pgoff)
293{ 293{
294 unsigned long ret; 294 unsigned long ret;
295 struct mm_struct *mm = current->mm; 295 struct mm_struct *mm = current->mm;
@@ -297,12 +297,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
297 297
298 ret = security_mmap_file(file, prot, flag); 298 ret = security_mmap_file(file, prot, flag);
299 if (!ret) { 299 if (!ret) {
300 if (killable) { 300 if (down_write_killable(&mm->mmap_sem))
301 if (down_write_killable(&mm->mmap_sem)) 301 return -EINTR;
302 return -EINTR;
303 } else {
304 down_write(&mm->mmap_sem);
305 }
306 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 302 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
307 &populate); 303 &populate);
308 up_write(&mm->mmap_sem); 304 up_write(&mm->mmap_sem);
@@ -312,7 +308,6 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
312 return ret; 308 return ret;
313} 309}
314 310
315/* XXX are all callers checking an error */
316unsigned long vm_mmap(struct file *file, unsigned long addr, 311unsigned long vm_mmap(struct file *file, unsigned long addr,
317 unsigned long len, unsigned long prot, 312 unsigned long len, unsigned long prot,
318 unsigned long flag, unsigned long offset) 313 unsigned long flag, unsigned long offset)
@@ -322,7 +317,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
322 if (unlikely(offset_in_page(offset))) 317 if (unlikely(offset_in_page(offset)))
323 return -EINVAL; 318 return -EINVAL;
324 319
325 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT, false); 320 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
326} 321}
327EXPORT_SYMBOL(vm_mmap); 322EXPORT_SYMBOL(vm_mmap);
328 323