aboutsummaryrefslogtreecommitdiffstats
path: root/mm/userfaultfd.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-06-07 20:09:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-07 20:34:38 -0400
commitdf2cc96e77011cf7989208b206da9817e0321028 (patch)
tree5831fc1b33713f8073c5661a36890f7822a4f484 /mm/userfaultfd.c
parentbe09102b4190561b67e3809b07a7fd29c9774152 (diff)
userfaultfd: prevent non-cooperative events vs mcopy_atomic races
If a process monitored with userfaultfd changes it's memory mappings or forks() at the same time as uffd monitor fills the process memory with UFFDIO_COPY, the actual creation of page table entries and copying of the data in mcopy_atomic may happen either before of after the memory mapping modifications and there is no way for the uffd monitor to maintain consistent view of the process memory layout. For instance, let's consider fork() running in parallel with userfaultfd_copy(): process | uffd monitor ---------------------------------+------------------------------ fork() | userfaultfd_copy() ... | ... dup_mmap() | down_read(mmap_sem) down_write(mmap_sem) | /* create PTEs, copy data */ dup_uffd() | up_read(mmap_sem) copy_page_range() | up_write(mmap_sem) | dup_uffd_complete() | /* notify monitor */ | If the userfaultfd_copy() takes the mmap_sem first, the new page(s) will be present by the time copy_page_range() is called and they will appear in the child's memory mappings. However, if the fork() is the first to take the mmap_sem, the new pages won't be mapped in the child's address space. If the pages are not present and child tries to access them, the monitor will get page fault notification and everything is fine. However, if the pages *are present*, the child can access them without uffd noticing. And if we copy them into child it'll see the wrong data. Since we are talking about background copy, we'd need to decide whether the pages should be copied or not regardless #PF notifications. Since userfaultfd monitor has no way to determine what was the order, let's disallow userfaultfd_copy in parallel with the non-cooperative events. In such case we return -EAGAIN and the uffd monitor can understand that userfaultfd_copy() clashed with a non-cooperative event and take an appropriate action. Link: http://lkml.kernel.org/r/1527061324-19949-1-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Pavel Emelyanov <xemul@virtuozzo.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Andrei Vagin <avagin@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r--mm/userfaultfd.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 39791b81ede7..5029f241908f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -404,7 +404,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
404 unsigned long dst_start, 404 unsigned long dst_start,
405 unsigned long src_start, 405 unsigned long src_start,
406 unsigned long len, 406 unsigned long len,
407 bool zeropage) 407 bool zeropage,
408 bool *mmap_changing)
408{ 409{
409 struct vm_area_struct *dst_vma; 410 struct vm_area_struct *dst_vma;
410 ssize_t err; 411 ssize_t err;
@@ -431,6 +432,15 @@ retry:
431 down_read(&dst_mm->mmap_sem); 432 down_read(&dst_mm->mmap_sem);
432 433
433 /* 434 /*
435 * If memory mappings are changing because of non-cooperative
436 * operation (e.g. mremap) running in parallel, bail out and
437 * request the user to retry later
438 */
439 err = -EAGAIN;
440 if (mmap_changing && READ_ONCE(*mmap_changing))
441 goto out_unlock;
442
443 /*
434 * Make sure the vma is not shared, that the dst range is 444 * Make sure the vma is not shared, that the dst range is
435 * both valid and fully within a single existing vma. 445 * both valid and fully within a single existing vma.
436 */ 446 */
@@ -563,13 +573,15 @@ out:
563} 573}
564 574
565ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 575ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
566 unsigned long src_start, unsigned long len) 576 unsigned long src_start, unsigned long len,
577 bool *mmap_changing)
567{ 578{
568 return __mcopy_atomic(dst_mm, dst_start, src_start, len, false); 579 return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
580 mmap_changing);
569} 581}
570 582
571ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 583ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
572 unsigned long len) 584 unsigned long len, bool *mmap_changing)
573{ 585{
574 return __mcopy_atomic(dst_mm, start, 0, len, true); 586 return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
575} 587}