diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2017-02-22 18:42:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-22 19:41:28 -0500 |
commit | 810a56b943e265bbabfcd5a8e54cb8d3b16cd6e4 (patch) | |
tree | 4d95abf9e7d4efb0fa40cb6f5b1c3552e3bf2f77 /mm/memory.c | |
parent | 60d4d2d2b40e44cd36bfb6049e8d9e2055a24f8a (diff) |
userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing
The new routine copy_huge_page_from_user() uses kmap_atomic() to map
PAGE_SIZE pages. However, this prevents page faults in the subsequent
call to copy_from_user(). This is OK in the case where the routine is
copied with mmap_sema held. However, in another case we want to allow
page faults. So, add a new argument allow_pagefault to indicate if the
routine should allow page faults.
[dan.carpenter@oracle.com: unmap the correct pointer]
Link: http://lkml.kernel.org/r/20170113082608.GA3548@mwanda
[akpm@linux-foundation.org: kunmap() takes a page*, per Hugh]
Link: http://lkml.kernel.org/r/20161216144821.5183-20-aarcange@redhat.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c index 4ade940d105c..d7676a68c80a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -4155,7 +4155,8 @@ void copy_user_huge_page(struct page *dst, struct page *src, | |||
4155 | 4155 | ||
4156 | long copy_huge_page_from_user(struct page *dst_page, | 4156 | long copy_huge_page_from_user(struct page *dst_page, |
4157 | const void __user *usr_src, | 4157 | const void __user *usr_src, |
4158 | unsigned int pages_per_huge_page) | 4158 | unsigned int pages_per_huge_page, |
4159 | bool allow_pagefault) | ||
4159 | { | 4160 | { |
4160 | void *src = (void *)usr_src; | 4161 | void *src = (void *)usr_src; |
4161 | void *page_kaddr; | 4162 | void *page_kaddr; |
@@ -4163,11 +4164,17 @@ long copy_huge_page_from_user(struct page *dst_page, | |||
4163 | unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; | 4164 | unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; |
4164 | 4165 | ||
4165 | for (i = 0; i < pages_per_huge_page; i++) { | 4166 | for (i = 0; i < pages_per_huge_page; i++) { |
4166 | page_kaddr = kmap_atomic(dst_page + i); | 4167 | if (allow_pagefault) |
4168 | page_kaddr = kmap(dst_page + i); | ||
4169 | else | ||
4170 | page_kaddr = kmap_atomic(dst_page + i); | ||
4167 | rc = copy_from_user(page_kaddr, | 4171 | rc = copy_from_user(page_kaddr, |
4168 | (const void __user *)(src + i * PAGE_SIZE), | 4172 | (const void __user *)(src + i * PAGE_SIZE), |
4169 | PAGE_SIZE); | 4173 | PAGE_SIZE); |
4170 | kunmap_atomic(page_kaddr); | 4174 | if (allow_pagefault) |
4175 | kunmap(dst_page + i); | ||
4176 | else | ||
4177 | kunmap_atomic(page_kaddr); | ||
4171 | 4178 | ||
4172 | ret_val -= (PAGE_SIZE - rc); | 4179 | ret_val -= (PAGE_SIZE - rc); |
4173 | if (rc) | 4180 | if (rc) |