diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2017-02-22 18:42:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-22 19:41:28 -0500 |
commit | fa4d75c1de13299c61b5e18a1ae46bc00888b599 (patch) | |
tree | 73ca3cf8aac2fa991fc7310658145d49772f0b49 /mm/memory.c | |
parent | 09fa5296a40d01e014b4851def20b975feb98fd5 (diff) |
userfaultfd: hugetlbfs: add copy_huge_page_from_user for hugetlb userfaultfd support
userfaultfd UFFDIO_COPY allows user level code to copy data to a page at
fault time. The data is copied from user space to a newly allocated
huge page. The new routine copy_huge_page_from_user performs this copy.
Link: http://lkml.kernel.org/r/20161216144821.5183-17-aarcange@redhat.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index ececdc4a2892..4ade940d105c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -4152,6 +4152,31 @@ void copy_user_huge_page(struct page *dst, struct page *src, | |||
4152 | copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); | 4152 | copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); |
4153 | } | 4153 | } |
4154 | } | 4154 | } |
4155 | |||
4156 | long copy_huge_page_from_user(struct page *dst_page, | ||
4157 | const void __user *usr_src, | ||
4158 | unsigned int pages_per_huge_page) | ||
4159 | { | ||
4160 | void *src = (void *)usr_src; | ||
4161 | void *page_kaddr; | ||
4162 | unsigned long i, rc = 0; | ||
4163 | unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; | ||
4164 | |||
4165 | for (i = 0; i < pages_per_huge_page; i++) { | ||
4166 | page_kaddr = kmap_atomic(dst_page + i); | ||
4167 | rc = copy_from_user(page_kaddr, | ||
4168 | (const void __user *)(src + i * PAGE_SIZE), | ||
4169 | PAGE_SIZE); | ||
4170 | kunmap_atomic(page_kaddr); | ||
4171 | |||
4172 | ret_val -= (PAGE_SIZE - rc); | ||
4173 | if (rc) | ||
4174 | break; | ||
4175 | |||
4176 | cond_resched(); | ||
4177 | } | ||
4178 | return ret_val; | ||
4179 | } | ||
4155 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 4180 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
4156 | 4181 | ||
4157 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS | 4182 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS |