summaryrefslogtreecommitdiffstats
path: root/ipc/shm.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2017-02-24 17:58:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:55 -0500
commit897ab3e0c49e24b62e2d54d165c7afec6bbca65b (patch)
tree5fa7e09864d6c959cef33849f6cb10ed04e459e4 /ipc/shm.c
parent846b1a0f1db065a8479159dd8fecddb1ebf30547 (diff)
userfaultfd: non-cooperative: add event for memory unmaps
When a non-cooperative userfaultfd monitor copies pages in the background, it may encounter regions that were already unmapped. Addition of UFFD_EVENT_UNMAP allows the uffd monitor to track precisely changes in the virtual memory layout. Since there might be different uffd contexts for the affected VMAs, we first should create a temporary representation for the unmap event for each uffd context and then notify them one by one to the appropriate userfault file descriptors. The event notification occurs after the mmap_sem has been released. [arnd@arndb.de: fix nommu build] Link: http://lkml.kernel.org/r/20170203165141.3665284-1-arnd@arndb.de [mhocko@suse.com: fix nommu build] Link: http://lkml.kernel.org/r/20170202091503.GA22823@dhcp22.suse.cz Link: http://lkml.kernel.org/r/1485542673-24387-3-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Pavel Emelyanov <xemul@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/shm.c')
-rw-r--r--ipc/shm.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/ipc/shm.c b/ipc/shm.c
index 7f6537b84ef5..d7805acb44fd 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1222,7 +1222,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1222 goto invalid; 1222 goto invalid;
1223 } 1223 }
1224 1224
1225 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); 1225 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1226 *raddr = addr; 1226 *raddr = addr;
1227 err = 0; 1227 err = 0;
1228 if (IS_ERR_VALUE(addr)) 1228 if (IS_ERR_VALUE(addr))
@@ -1329,7 +1329,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1329 */ 1329 */
1330 file = vma->vm_file; 1330 file = vma->vm_file;
1331 size = i_size_read(file_inode(vma->vm_file)); 1331 size = i_size_read(file_inode(vma->vm_file));
1332 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1332 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1333 /* 1333 /*
1334 * We discovered the size of the shm segment, so 1334 * We discovered the size of the shm segment, so
1335 * break out of here and fall through to the next 1335 * break out of here and fall through to the next
@@ -1356,7 +1356,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1356 if ((vma->vm_ops == &shm_vm_ops) && 1356 if ((vma->vm_ops == &shm_vm_ops) &&
1357 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && 1357 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1358 (vma->vm_file == file)) 1358 (vma->vm_file == file))
1359 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1359 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1360 vma = next; 1360 vma = next;
1361 } 1361 }
1362 1362
@@ -1365,7 +1365,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1365 * given 1365 * given
1366 */ 1366 */
1367 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1367 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1368 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1368 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1369 retval = 0; 1369 retval = 0;
1370 } 1370 }
1371 1371