aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2017-02-24 17:58:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:55 -0500
commit897ab3e0c49e24b62e2d54d165c7afec6bbca65b (patch)
tree5fa7e09864d6c959cef33849f6cb10ed04e459e4 /mm/mremap.c
parent846b1a0f1db065a8479159dd8fecddb1ebf30547 (diff)
userfaultfd: non-cooperative: add event for memory unmaps
When a non-cooperative userfaultfd monitor copies pages in the background, it may encounter regions that were already unmapped. Addition of UFFD_EVENT_UNMAP allows the uffd monitor to track precisely changes in the virtual memory layout. Since there might be different uffd contexts for the affected VMAs, we first should create a temporary representation for the unmap event for each uffd context and then notify them one by one to the appropriate userfault file descriptors. The event notification occurs after the mmap_sem has been released. [arnd@arndb.de: fix nommu build] Link: http://lkml.kernel.org/r/20170203165141.3665284-1-arnd@arndb.de [mhocko@suse.com: fix nommu build] Link: http://lkml.kernel.org/r/20170202091503.GA22823@dhcp22.suse.cz Link: http://lkml.kernel.org/r/1485542673-24387-3-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Pavel Emelyanov <xemul@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 8779928d6a70..8233b0105c82 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -252,7 +252,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
252static unsigned long move_vma(struct vm_area_struct *vma, 252static unsigned long move_vma(struct vm_area_struct *vma,
253 unsigned long old_addr, unsigned long old_len, 253 unsigned long old_addr, unsigned long old_len,
254 unsigned long new_len, unsigned long new_addr, 254 unsigned long new_len, unsigned long new_addr,
255 bool *locked, struct vm_userfaultfd_ctx *uf) 255 bool *locked, struct vm_userfaultfd_ctx *uf,
256 struct list_head *uf_unmap)
256{ 257{
257 struct mm_struct *mm = vma->vm_mm; 258 struct mm_struct *mm = vma->vm_mm;
258 struct vm_area_struct *new_vma; 259 struct vm_area_struct *new_vma;
@@ -341,7 +342,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
341 if (unlikely(vma->vm_flags & VM_PFNMAP)) 342 if (unlikely(vma->vm_flags & VM_PFNMAP))
342 untrack_pfn_moved(vma); 343 untrack_pfn_moved(vma);
343 344
344 if (do_munmap(mm, old_addr, old_len) < 0) { 345 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
345 /* OOM: unable to split vma, just get accounts right */ 346 /* OOM: unable to split vma, just get accounts right */
346 vm_unacct_memory(excess >> PAGE_SHIFT); 347 vm_unacct_memory(excess >> PAGE_SHIFT);
347 excess = 0; 348 excess = 0;
@@ -417,7 +418,8 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
417 418
418static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 419static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
419 unsigned long new_addr, unsigned long new_len, bool *locked, 420 unsigned long new_addr, unsigned long new_len, bool *locked,
420 struct vm_userfaultfd_ctx *uf) 421 struct vm_userfaultfd_ctx *uf,
422 struct list_head *uf_unmap)
421{ 423{
422 struct mm_struct *mm = current->mm; 424 struct mm_struct *mm = current->mm;
423 struct vm_area_struct *vma; 425 struct vm_area_struct *vma;
@@ -435,12 +437,12 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
435 if (addr + old_len > new_addr && new_addr + new_len > addr) 437 if (addr + old_len > new_addr && new_addr + new_len > addr)
436 goto out; 438 goto out;
437 439
438 ret = do_munmap(mm, new_addr, new_len); 440 ret = do_munmap(mm, new_addr, new_len, NULL);
439 if (ret) 441 if (ret)
440 goto out; 442 goto out;
441 443
442 if (old_len >= new_len) { 444 if (old_len >= new_len) {
443 ret = do_munmap(mm, addr+new_len, old_len - new_len); 445 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
444 if (ret && old_len != new_len) 446 if (ret && old_len != new_len)
445 goto out; 447 goto out;
446 old_len = new_len; 448 old_len = new_len;
@@ -462,7 +464,8 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
462 if (offset_in_page(ret)) 464 if (offset_in_page(ret))
463 goto out1; 465 goto out1;
464 466
465 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf); 467 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
468 uf_unmap);
466 if (!(offset_in_page(ret))) 469 if (!(offset_in_page(ret)))
467 goto out; 470 goto out;
468out1: 471out1:
@@ -502,6 +505,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
502 unsigned long charged = 0; 505 unsigned long charged = 0;
503 bool locked = false; 506 bool locked = false;
504 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 507 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
508 LIST_HEAD(uf_unmap);
505 509
506 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) 510 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
507 return ret; 511 return ret;
@@ -528,7 +532,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
528 532
529 if (flags & MREMAP_FIXED) { 533 if (flags & MREMAP_FIXED) {
530 ret = mremap_to(addr, old_len, new_addr, new_len, 534 ret = mremap_to(addr, old_len, new_addr, new_len,
531 &locked, &uf); 535 &locked, &uf, &uf_unmap);
532 goto out; 536 goto out;
533 } 537 }
534 538
@@ -538,7 +542,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
538 * do_munmap does all the needed commit accounting 542 * do_munmap does all the needed commit accounting
539 */ 543 */
540 if (old_len >= new_len) { 544 if (old_len >= new_len) {
541 ret = do_munmap(mm, addr+new_len, old_len - new_len); 545 ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
542 if (ret && old_len != new_len) 546 if (ret && old_len != new_len)
543 goto out; 547 goto out;
544 ret = addr; 548 ret = addr;
@@ -598,7 +602,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
598 } 602 }
599 603
600 ret = move_vma(vma, addr, old_len, new_len, new_addr, 604 ret = move_vma(vma, addr, old_len, new_len, new_addr,
601 &locked, &uf); 605 &locked, &uf, &uf_unmap);
602 } 606 }
603out: 607out:
604 if (offset_in_page(ret)) { 608 if (offset_in_page(ret)) {
@@ -609,5 +613,6 @@ out:
609 if (locked && new_len > old_len) 613 if (locked && new_len > old_len)
610 mm_populate(new_addr + old_len, new_len - old_len); 614 mm_populate(new_addr + old_len, new_len - old_len);
611 mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); 615 mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
616 userfaultfd_unmap_complete(mm, &uf_unmap);
612 return ret; 617 return ret;
613} 618}