diff options
author | Ira Weiny <ira.weiny@intel.com> | 2019-05-13 20:17:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 12:47:46 -0400 |
commit | 73b0140bf0fe9df90fb267c00673c4b9bf285430 (patch) | |
tree | 2076056c96b291bc875064b4e45ee4a6228f79b8 | |
parent | b798bec4741bdd80224214fdd004c8e52698e425 (diff) |
mm/gup: change GUP fast to use flags rather than a write 'bool'
To facilitate additional options to get_user_pages_fast() change the
singular write parameter to be gup_flags.
This patch does not change any functionality. New functionality will
follow in subsequent patches.
Some of the get_user_pages_fast() call sites were unchanged because they
already passed FOLL_WRITE or 0 for the write parameter.
NOTE: It was suggested to change the ordering of the get_user_pages_fast()
arguments to ensure that callers were converted. This breaks the current
GUP call site convention of having the returned pages be the final
parameter. So the suggestion was rejected.
Link: http://lkml.kernel.org/r/20190328084422.29911-4-ira.weiny@intel.com
Link: http://lkml.kernel.org/r/20190317183438.2057-4-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Mike Marshall <hubcap@omnibond.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
34 files changed, 73 insertions, 57 deletions
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 0d14e0d8eacf..4c2b4483683c 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c | |||
@@ -235,7 +235,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
235 | * get_user_pages_fast() - pin user pages in memory | 235 | * get_user_pages_fast() - pin user pages in memory |
236 | * @start: starting user address | 236 | * @start: starting user address |
237 | * @nr_pages: number of pages from start to pin | 237 | * @nr_pages: number of pages from start to pin |
238 | * @write: whether pages will be written to | 238 | * @gup_flags: flags modifying pin behaviour |
239 | * @pages: array that receives pointers to the pages pinned. | 239 | * @pages: array that receives pointers to the pages pinned. |
240 | * Should be at least nr_pages long. | 240 | * Should be at least nr_pages long. |
241 | * | 241 | * |
@@ -247,8 +247,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
247 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | 247 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
248 | * were pinned, returns -errno. | 248 | * were pinned, returns -errno. |
249 | */ | 249 | */ |
250 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 250 | int get_user_pages_fast(unsigned long start, int nr_pages, |
251 | struct page **pages) | 251 | unsigned int gup_flags, struct page **pages) |
252 | { | 252 | { |
253 | struct mm_struct *mm = current->mm; | 253 | struct mm_struct *mm = current->mm; |
254 | unsigned long addr, len, end; | 254 | unsigned long addr, len, end; |
@@ -273,7 +273,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
273 | next = pgd_addr_end(addr, end); | 273 | next = pgd_addr_end(addr, end); |
274 | if (pgd_none(pgd)) | 274 | if (pgd_none(pgd)) |
275 | goto slow; | 275 | goto slow; |
276 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | 276 | if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE, |
277 | pages, &nr)) | ||
277 | goto slow; | 278 | goto slow; |
278 | } while (pgdp++, addr = next, addr != end); | 279 | } while (pgdp++, addr = next, addr != end); |
279 | local_irq_enable(); | 280 | local_irq_enable(); |
@@ -289,7 +290,7 @@ slow_irqon: | |||
289 | pages += nr; | 290 | pages += nr; |
290 | 291 | ||
291 | ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, | 292 | ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, |
292 | pages, write ? FOLL_WRITE : 0); | 293 | pages, gup_flags); |
293 | 294 | ||
294 | /* Have to be a bit careful with return values */ | 295 | /* Have to be a bit careful with return values */ |
295 | if (nr > 0) { | 296 | if (nr > 0) { |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index be7bc070eae5..ab3d484c5e2e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -600,7 +600,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
600 | /* If writing != 0, then the HPTE must allow writing, if we get here */ | 600 | /* If writing != 0, then the HPTE must allow writing, if we get here */ |
601 | write_ok = writing; | 601 | write_ok = writing; |
602 | hva = gfn_to_hva_memslot(memslot, gfn); | 602 | hva = gfn_to_hva_memslot(memslot, gfn); |
603 | npages = get_user_pages_fast(hva, 1, writing, pages); | 603 | npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); |
604 | if (npages < 1) { | 604 | if (npages < 1) { |
605 | /* Check if it's an I/O mapping */ | 605 | /* Check if it's an I/O mapping */ |
606 | down_read(¤t->mm->mmap_sem); | 606 | down_read(¤t->mm->mmap_sem); |
@@ -1193,7 +1193,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, | |||
1193 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | 1193 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
1194 | goto err; | 1194 | goto err; |
1195 | hva = gfn_to_hva_memslot(memslot, gfn); | 1195 | hva = gfn_to_hva_memslot(memslot, gfn); |
1196 | npages = get_user_pages_fast(hva, 1, 1, pages); | 1196 | npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); |
1197 | if (npages < 1) | 1197 | if (npages < 1) |
1198 | goto err; | 1198 | goto err; |
1199 | page = pages[0]; | 1199 | page = pages[0]; |
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index 24296f4cadc6..e0af53fd78c5 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c | |||
@@ -783,7 +783,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
783 | if (!pages) | 783 | if (!pages) |
784 | return -ENOMEM; | 784 | return -ENOMEM; |
785 | 785 | ||
786 | ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); | 786 | ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages); |
787 | if (ret < 0) | 787 | if (ret < 0) |
788 | goto free_pages; | 788 | goto free_pages; |
789 | 789 | ||
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 37503ae62486..1fd706f6206c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -2376,7 +2376,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) | |||
2376 | ret = -EFAULT; | 2376 | ret = -EFAULT; |
2377 | goto out; | 2377 | goto out; |
2378 | } | 2378 | } |
2379 | ret = get_user_pages_fast(map->addr, 1, 1, &map->page); | 2379 | ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page); |
2380 | if (ret < 0) | 2380 | if (ret < 0) |
2381 | goto out; | 2381 | goto out; |
2382 | BUG_ON(ret != 1); | 2382 | BUG_ON(ret != 1); |
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index 3e27f6d1f1ec..277c882f7489 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c | |||
@@ -204,7 +204,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
204 | * get_user_pages_fast() - pin user pages in memory | 204 | * get_user_pages_fast() - pin user pages in memory |
205 | * @start: starting user address | 205 | * @start: starting user address |
206 | * @nr_pages: number of pages from start to pin | 206 | * @nr_pages: number of pages from start to pin |
207 | * @write: whether pages will be written to | 207 | * @gup_flags: flags modifying pin behaviour |
208 | * @pages: array that receives pointers to the pages pinned. | 208 | * @pages: array that receives pointers to the pages pinned. |
209 | * Should be at least nr_pages long. | 209 | * Should be at least nr_pages long. |
210 | * | 210 | * |
@@ -216,8 +216,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
216 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | 216 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
217 | * were pinned, returns -errno. | 217 | * were pinned, returns -errno. |
218 | */ | 218 | */ |
219 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 219 | int get_user_pages_fast(unsigned long start, int nr_pages, |
220 | struct page **pages) | 220 | unsigned int gup_flags, struct page **pages) |
221 | { | 221 | { |
222 | struct mm_struct *mm = current->mm; | 222 | struct mm_struct *mm = current->mm; |
223 | unsigned long addr, len, end; | 223 | unsigned long addr, len, end; |
@@ -241,7 +241,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
241 | next = pgd_addr_end(addr, end); | 241 | next = pgd_addr_end(addr, end); |
242 | if (pgd_none(pgd)) | 242 | if (pgd_none(pgd)) |
243 | goto slow; | 243 | goto slow; |
244 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | 244 | if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE, |
245 | pages, &nr)) | ||
245 | goto slow; | 246 | goto slow; |
246 | } while (pgdp++, addr = next, addr != end); | 247 | } while (pgdp++, addr = next, addr != end); |
247 | local_irq_enable(); | 248 | local_irq_enable(); |
@@ -261,7 +262,7 @@ slow_irqon: | |||
261 | 262 | ||
262 | ret = get_user_pages_unlocked(start, | 263 | ret = get_user_pages_unlocked(start, |
263 | (end - start) >> PAGE_SHIFT, pages, | 264 | (end - start) >> PAGE_SHIFT, pages, |
264 | write ? FOLL_WRITE : 0); | 265 | gup_flags); |
265 | 266 | ||
266 | /* Have to be a bit careful with return values */ | 267 | /* Have to be a bit careful with return values */ |
267 | if (nr > 0) { | 268 | if (nr > 0) { |
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index aee6dba83d0e..1e770a517d4a 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
@@ -245,8 +245,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
245 | return nr; | 245 | return nr; |
246 | } | 246 | } |
247 | 247 | ||
248 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 248 | int get_user_pages_fast(unsigned long start, int nr_pages, |
249 | struct page **pages) | 249 | unsigned int gup_flags, struct page **pages) |
250 | { | 250 | { |
251 | struct mm_struct *mm = current->mm; | 251 | struct mm_struct *mm = current->mm; |
252 | unsigned long addr, len, end; | 252 | unsigned long addr, len, end; |
@@ -303,7 +303,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
303 | next = pgd_addr_end(addr, end); | 303 | next = pgd_addr_end(addr, end); |
304 | if (pgd_none(pgd)) | 304 | if (pgd_none(pgd)) |
305 | goto slow; | 305 | goto slow; |
306 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | 306 | if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE, |
307 | pages, &nr)) | ||
307 | goto slow; | 308 | goto slow; |
308 | } while (pgdp++, addr = next, addr != end); | 309 | } while (pgdp++, addr = next, addr != end); |
309 | 310 | ||
@@ -324,7 +325,7 @@ slow: | |||
324 | 325 | ||
325 | ret = get_user_pages_unlocked(start, | 326 | ret = get_user_pages_unlocked(start, |
326 | (end - start) >> PAGE_SHIFT, pages, | 327 | (end - start) >> PAGE_SHIFT, pages, |
327 | write ? FOLL_WRITE : 0); | 328 | gup_flags); |
328 | 329 | ||
329 | /* Have to be a bit careful with return values */ | 330 | /* Have to be a bit careful with return values */ |
330 | if (nr > 0) { | 331 | if (nr > 0) { |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 6bdca39829bc..08715034e315 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -140,7 +140,7 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
140 | pt_element_t *table; | 140 | pt_element_t *table; |
141 | struct page *page; | 141 | struct page *page; |
142 | 142 | ||
143 | npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); | 143 | npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); |
144 | /* Check if the user is doing something meaningless. */ | 144 | /* Check if the user is doing something meaningless. */ |
145 | if (unlikely(npages != 1)) | 145 | if (unlikely(npages != 1)) |
146 | return -EFAULT; | 146 | return -EFAULT; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 406b558abfef..6b92eaf4a3b1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1805,7 +1805,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, | |||
1805 | return NULL; | 1805 | return NULL; |
1806 | 1806 | ||
1807 | /* Pin the user virtual address. */ | 1807 | /* Pin the user virtual address. */ |
1808 | npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); | 1808 | npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); |
1809 | if (npinned != npages) { | 1809 | if (npinned != npages) { |
1810 | pr_err("SEV: Failure locking %lu pages.\n", npages); | 1810 | pr_err("SEV: Failure locking %lu pages.\n", npages); |
1811 | goto err; | 1811 | goto err; |
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index e18a786fc943..c438722bf4e1 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c | |||
@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, | |||
102 | goto unlock_vm; | 102 | goto unlock_vm; |
103 | } | 103 | } |
104 | 104 | ||
105 | pinned = get_user_pages_fast(region->user_addr, npages, 1, | 105 | pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, |
106 | region->pages); | 106 | region->pages); |
107 | if (pinned < 0) { | 107 | if (pinned < 0) { |
108 | ret = pinned; | 108 | ret = pinned; |
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 8bf3a7c23ed3..062067438f1d 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c | |||
@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
243 | if (NULL == vsg->pages) | 243 | if (NULL == vsg->pages) |
244 | return -ENOMEM; | 244 | return -ENOMEM; |
245 | ret = get_user_pages_fast((unsigned long)xfer->mem_addr, | 245 | ret = get_user_pages_fast((unsigned long)xfer->mem_addr, |
246 | vsg->num_pages, vsg->direction == DMA_FROM_DEVICE, | 246 | vsg->num_pages, |
247 | vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, | ||
247 | vsg->pages); | 248 | vsg->pages); |
248 | if (ret != vsg->num_pages) { | 249 | if (ret != vsg->num_pages) { |
249 | if (ret < 0) | 250 | if (ret < 0) |
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 24b592c6522e..78ccacaf97d0 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c | |||
@@ -105,7 +105,8 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np | |||
105 | { | 105 | { |
106 | int ret; | 106 | int ret; |
107 | 107 | ||
108 | ret = get_user_pages_fast(vaddr, npages, writable, pages); | 108 | ret = get_user_pages_fast(vaddr, npages, writable ? FOLL_WRITE : 0, |
109 | pages); | ||
109 | if (ret < 0) | 110 | if (ret < 0) |
110 | return ret; | 111 | return ret; |
111 | 112 | ||
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 25265fd0fd6e..89cff9d1012b 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c | |||
@@ -603,7 +603,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, | |||
603 | /* pin user pages in memory */ | 603 | /* pin user pages in memory */ |
604 | rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ | 604 | rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ |
605 | m->nr_pages, | 605 | m->nr_pages, |
606 | m->write, /* readable/writable */ | 606 | m->write ? FOLL_WRITE : 0, /* readable/writable */ |
607 | m->page_list); /* ptrs to pages */ | 607 | m->page_list); /* ptrs to pages */ |
608 | if (rc < 0) | 608 | if (rc < 0) |
609 | goto fail_get_user_pages; | 609 | goto fail_get_user_pages; |
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index 997f92543dd4..422d08da3244 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c | |||
@@ -242,7 +242,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context, | |||
242 | /* | 242 | /* |
243 | * Lock physical page backing a given user VA. | 243 | * Lock physical page backing a given user VA. |
244 | */ | 244 | */ |
245 | retval = get_user_pages_fast(uva, 1, 1, &context->notify_page); | 245 | retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); |
246 | if (retval != 1) { | 246 | if (retval != 1) { |
247 | context->notify_page = NULL; | 247 | context->notify_page = NULL; |
248 | return VMCI_ERROR_GENERIC; | 248 | return VMCI_ERROR_GENERIC; |
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index f5f1aac9d163..1174735f003d 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c | |||
@@ -659,7 +659,8 @@ static int qp_host_get_user_memory(u64 produce_uva, | |||
659 | int err = VMCI_SUCCESS; | 659 | int err = VMCI_SUCCESS; |
660 | 660 | ||
661 | retval = get_user_pages_fast((uintptr_t) produce_uva, | 661 | retval = get_user_pages_fast((uintptr_t) produce_uva, |
662 | produce_q->kernel_if->num_pages, 1, | 662 | produce_q->kernel_if->num_pages, |
663 | FOLL_WRITE, | ||
663 | produce_q->kernel_if->u.h.header_page); | 664 | produce_q->kernel_if->u.h.header_page); |
664 | if (retval < (int)produce_q->kernel_if->num_pages) { | 665 | if (retval < (int)produce_q->kernel_if->num_pages) { |
665 | pr_debug("get_user_pages_fast(produce) failed (retval=%d)", | 666 | pr_debug("get_user_pages_fast(produce) failed (retval=%d)", |
@@ -671,7 +672,8 @@ static int qp_host_get_user_memory(u64 produce_uva, | |||
671 | } | 672 | } |
672 | 673 | ||
673 | retval = get_user_pages_fast((uintptr_t) consume_uva, | 674 | retval = get_user_pages_fast((uintptr_t) consume_uva, |
674 | consume_q->kernel_if->num_pages, 1, | 675 | consume_q->kernel_if->num_pages, |
676 | FOLL_WRITE, | ||
675 | consume_q->kernel_if->u.h.header_page); | 677 | consume_q->kernel_if->u.h.header_page); |
676 | if (retval < (int)consume_q->kernel_if->num_pages) { | 678 | if (retval < (int)consume_q->kernel_if->num_pages) { |
677 | pr_debug("get_user_pages_fast(consume) failed (retval=%d)", | 679 | pr_debug("get_user_pages_fast(consume) failed (retval=%d)", |
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 321bc673c417..cef0133aa47a 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c | |||
@@ -274,7 +274,8 @@ static int pin_user_pages(unsigned long first_page, | |||
274 | *iter_last_page_size = last_page_size; | 274 | *iter_last_page_size = last_page_size; |
275 | } | 275 | } |
276 | 276 | ||
277 | ret = get_user_pages_fast(first_page, requested_pages, !is_write, | 277 | ret = get_user_pages_fast(first_page, requested_pages, |
278 | !is_write ? FOLL_WRITE : 0, | ||
278 | pages); | 279 | pages); |
279 | if (ret <= 0) | 280 | if (ret <= 0) |
280 | return -EFAULT; | 281 | return -EFAULT; |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 1e1f42e210a0..4a4a75fa26d5 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
@@ -868,7 +868,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, | |||
868 | 868 | ||
869 | pinned = get_user_pages_fast( | 869 | pinned = get_user_pages_fast( |
870 | (unsigned long)xfer->loc_addr & PAGE_MASK, | 870 | (unsigned long)xfer->loc_addr & PAGE_MASK, |
871 | nr_pages, dir == DMA_FROM_DEVICE, page_list); | 871 | nr_pages, |
872 | dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, | ||
873 | page_list); | ||
872 | 874 | ||
873 | if (pinned != nr_pages) { | 875 | if (pinned != nr_pages) { |
874 | if (pinned < 0) { | 876 | if (pinned < 0) { |
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c index acd9ba40eabe..8090dc9a1514 100644 --- a/drivers/sbus/char/oradax.c +++ b/drivers/sbus/char/oradax.c | |||
@@ -437,7 +437,7 @@ static int dax_lock_page(void *va, struct page **p) | |||
437 | 437 | ||
438 | dax_dbg("uva %p", va); | 438 | dax_dbg("uva %p", va); |
439 | 439 | ||
440 | ret = get_user_pages_fast((unsigned long)va, 1, 1, p); | 440 | ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p); |
441 | if (ret == 1) { | 441 | if (ret == 1) { |
442 | dax_dbg("locked page %p, for VA %p", *p, va); | 442 | dax_dbg("locked page %p, for VA %p", *p, va); |
443 | return 0; | 443 | return 0; |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 19c022e66d63..3c6a18ad9a87 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4922,7 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp, | |||
4922 | 4922 | ||
4923 | /* Try to fault in all of the necessary pages */ | 4923 | /* Try to fault in all of the necessary pages */ |
4924 | /* rw==READ means read from drive, write into memory area */ | 4924 | /* rw==READ means read from drive, write into memory area */ |
4925 | res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages); | 4925 | res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0, |
4926 | pages); | ||
4926 | 4927 | ||
4927 | /* Errors and no page mapped should return here */ | 4928 | /* Errors and no page mapped should return here */ |
4928 | if (res < nr_pages) | 4929 | if (res < nr_pages) |
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c index 600928f63577..d35c4fb19e28 100644 --- a/drivers/staging/gasket/gasket_page_table.c +++ b/drivers/staging/gasket/gasket_page_table.c | |||
@@ -486,8 +486,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl, | |||
486 | ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr + | 486 | ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr + |
487 | off + i * PAGE_SIZE; | 487 | off + i * PAGE_SIZE; |
488 | } else { | 488 | } else { |
489 | ret = get_user_pages_fast(page_addr - offset, 1, 1, | 489 | ret = get_user_pages_fast(page_addr - offset, 1, |
490 | &page); | 490 | FOLL_WRITE, &page); |
491 | 491 | ||
492 | if (ret <= 0) { | 492 | if (ret <= 0) { |
493 | dev_err(pg_tbl->device, | 493 | dev_err(pg_tbl->device, |
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 0b9ab1d0dd45..49fd7312e2aa 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c | |||
@@ -273,7 +273,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, | |||
273 | goto err; | 273 | goto err; |
274 | } | 274 | } |
275 | 275 | ||
276 | rc = get_user_pages_fast(start, num_pages, 1, shm->pages); | 276 | rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages); |
277 | if (rc > 0) | 277 | if (rc > 0) |
278 | shm->num_pages = rc; | 278 | shm->num_pages = rc; |
279 | if (rc != num_pages) { | 279 | if (rc != num_pages) { |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 6b64e45a5269..40ddc0c5f677 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
@@ -532,7 +532,8 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa) | |||
532 | enum dma_data_direction direction = iommu_tce_direction(tce); | 532 | enum dma_data_direction direction = iommu_tce_direction(tce); |
533 | 533 | ||
534 | if (get_user_pages_fast(tce & PAGE_MASK, 1, | 534 | if (get_user_pages_fast(tce & PAGE_MASK, 1, |
535 | direction != DMA_TO_DEVICE, &page) != 1) | 535 | direction != DMA_TO_DEVICE ? FOLL_WRITE : 0, |
536 | &page) != 1) | ||
536 | return -EFAULT; | 537 | return -EFAULT; |
537 | 538 | ||
538 | *hpa = __pa((unsigned long) page_address(page)); | 539 | *hpa = __pa((unsigned long) page_address(page)); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 351af88231ad..1e3ed41ae1f3 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -1704,7 +1704,7 @@ static int set_bit_to_user(int nr, void __user *addr) | |||
1704 | int bit = nr + (log % PAGE_SIZE) * 8; | 1704 | int bit = nr + (log % PAGE_SIZE) * 8; |
1705 | int r; | 1705 | int r; |
1706 | 1706 | ||
1707 | r = get_user_pages_fast(log, 1, 1, &page); | 1707 | r = get_user_pages_fast(log, 1, FOLL_WRITE, &page); |
1708 | if (r < 0) | 1708 | if (r < 0) |
1709 | return r; | 1709 | return r; |
1710 | BUG_ON(r != 1); | 1710 | BUG_ON(r != 1); |
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index dfed532ed606..4e4d6a0df978 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c | |||
@@ -686,7 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, | |||
686 | if (!pages) | 686 | if (!pages) |
687 | return -ENOMEM; | 687 | return -ENOMEM; |
688 | 688 | ||
689 | ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages); | 689 | ret = get_user_pages_fast((unsigned long)buf, nr_pages, FOLL_WRITE, pages); |
690 | if (ret < nr_pages) { | 690 | if (ret < nr_pages) { |
691 | nr_pages = ret; | 691 | nr_pages = ret; |
692 | ret = -EINVAL; | 692 | ret = -EINVAL; |
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c index 8ba726e600e9..6446bcab4185 100644 --- a/drivers/virt/fsl_hypervisor.c +++ b/drivers/virt/fsl_hypervisor.c | |||
@@ -244,7 +244,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) | |||
244 | 244 | ||
245 | /* Get the physical addresses of the source buffer */ | 245 | /* Get the physical addresses of the source buffer */ |
246 | num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset, | 246 | num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset, |
247 | num_pages, param.source != -1, pages); | 247 | num_pages, param.source != -1 ? FOLL_WRITE : 0, pages); |
248 | 248 | ||
249 | if (num_pinned != num_pages) { | 249 | if (num_pinned != num_pages) { |
250 | /* get_user_pages() failed */ | 250 | /* get_user_pages() failed */ |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 7cf9c51318aa..02bc815982d4 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -852,7 +852,7 @@ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, | |||
852 | unsigned long xen_pfn; | 852 | unsigned long xen_pfn; |
853 | int ret; | 853 | int ret; |
854 | 854 | ||
855 | ret = get_user_pages_fast(addr, 1, writeable, &page); | 855 | ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page); |
856 | if (ret < 0) | 856 | if (ret < 0) |
857 | return ret; | 857 | return ret; |
858 | 858 | ||
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index d4811f981608..2bb916d68576 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c | |||
@@ -269,7 +269,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap, | |||
269 | 269 | ||
270 | /* map the pages */ | 270 | /* map the pages */ |
271 | ret = get_user_pages_fast((unsigned long)user_desc->ptr, | 271 | ret = get_user_pages_fast((unsigned long)user_desc->ptr, |
272 | bufmap->page_count, 1, bufmap->page_array); | 272 | bufmap->page_count, FOLL_WRITE, bufmap->page_array); |
273 | 273 | ||
274 | if (ret < 0) | 274 | if (ret < 0) |
275 | return ret; | 275 | return ret; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8bc677ce8f01..c3c73b3c9adc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1505,8 +1505,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | |||
1505 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 1505 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
1506 | struct page **pages, unsigned int gup_flags); | 1506 | struct page **pages, unsigned int gup_flags); |
1507 | 1507 | ||
1508 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 1508 | int get_user_pages_fast(unsigned long start, int nr_pages, |
1509 | struct page **pages); | 1509 | unsigned int gup_flags, struct page **pages); |
1510 | 1510 | ||
1511 | /* Container for pinned pfns / pages */ | 1511 | /* Container for pinned pfns / pages */ |
1512 | struct frame_vector { | 1512 | struct frame_vector { |
diff --git a/kernel/futex.c b/kernel/futex.c index 6262f1534ac9..2268b97d5439 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -543,7 +543,7 @@ again: | |||
543 | if (unlikely(should_fail_futex(fshared))) | 543 | if (unlikely(should_fail_futex(fshared))) |
544 | return -EFAULT; | 544 | return -EFAULT; |
545 | 545 | ||
546 | err = get_user_pages_fast(address, 1, 1, &page); | 546 | err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); |
547 | /* | 547 | /* |
548 | * If write access is not required (eg. FUTEX_WAIT), try | 548 | * If write access is not required (eg. FUTEX_WAIT), try |
549 | * and get read-only access. | 549 | * and get read-only access. |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index b396d328a764..f74fa832f3aa 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -1293,7 +1293,9 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, | |||
1293 | len = maxpages * PAGE_SIZE; | 1293 | len = maxpages * PAGE_SIZE; |
1294 | addr &= ~(PAGE_SIZE - 1); | 1294 | addr &= ~(PAGE_SIZE - 1); |
1295 | n = DIV_ROUND_UP(len, PAGE_SIZE); | 1295 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
1296 | res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages); | 1296 | res = get_user_pages_fast(addr, n, |
1297 | iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, | ||
1298 | pages); | ||
1297 | if (unlikely(res < 0)) | 1299 | if (unlikely(res < 0)) |
1298 | return res; | 1300 | return res; |
1299 | return (res == n ? len : res * PAGE_SIZE) - *start; | 1301 | return (res == n ? len : res * PAGE_SIZE) - *start; |
@@ -1374,7 +1376,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, | |||
1374 | p = get_pages_array(n); | 1376 | p = get_pages_array(n); |
1375 | if (!p) | 1377 | if (!p) |
1376 | return -ENOMEM; | 1378 | return -ENOMEM; |
1377 | res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p); | 1379 | res = get_user_pages_fast(addr, n, |
1380 | iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); | ||
1378 | if (unlikely(res < 0)) { | 1381 | if (unlikely(res < 0)) { |
1379 | kvfree(p); | 1382 | kvfree(p); |
1380 | return res; | 1383 | return res; |
@@ -2062,7 +2062,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
2062 | * get_user_pages_fast() - pin user pages in memory | 2062 | * get_user_pages_fast() - pin user pages in memory |
2063 | * @start: starting user address | 2063 | * @start: starting user address |
2064 | * @nr_pages: number of pages from start to pin | 2064 | * @nr_pages: number of pages from start to pin |
2065 | * @write: whether pages will be written to | 2065 | * @gup_flags: flags modifying pin behaviour |
2066 | * @pages: array that receives pointers to the pages pinned. | 2066 | * @pages: array that receives pointers to the pages pinned. |
2067 | * Should be at least nr_pages long. | 2067 | * Should be at least nr_pages long. |
2068 | * | 2068 | * |
@@ -2074,8 +2074,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
2074 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | 2074 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
2075 | * were pinned, returns -errno. | 2075 | * were pinned, returns -errno. |
2076 | */ | 2076 | */ |
2077 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 2077 | int get_user_pages_fast(unsigned long start, int nr_pages, |
2078 | struct page **pages) | 2078 | unsigned int gup_flags, struct page **pages) |
2079 | { | 2079 | { |
2080 | unsigned long addr, len, end; | 2080 | unsigned long addr, len, end; |
2081 | int nr = 0, ret = 0; | 2081 | int nr = 0, ret = 0; |
@@ -2093,7 +2093,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
2093 | 2093 | ||
2094 | if (gup_fast_permitted(start, nr_pages)) { | 2094 | if (gup_fast_permitted(start, nr_pages)) { |
2095 | local_irq_disable(); | 2095 | local_irq_disable(); |
2096 | gup_pgd_range(addr, end, write ? FOLL_WRITE : 0, pages, &nr); | 2096 | gup_pgd_range(addr, end, gup_flags, pages, &nr); |
2097 | local_irq_enable(); | 2097 | local_irq_enable(); |
2098 | ret = nr; | 2098 | ret = nr; |
2099 | } | 2099 | } |
@@ -2104,7 +2104,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
2104 | pages += nr; | 2104 | pages += nr; |
2105 | 2105 | ||
2106 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, | 2106 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, |
2107 | write ? FOLL_WRITE : 0); | 2107 | gup_flags); |
2108 | 2108 | ||
2109 | /* Have to be a bit careful with return values */ | 2109 | /* Have to be a bit careful with return values */ |
2110 | if (nr > 0) { | 2110 | if (nr > 0) { |
@@ -318,7 +318,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast); | |||
318 | * get_user_pages_fast() - pin user pages in memory | 318 | * get_user_pages_fast() - pin user pages in memory |
319 | * @start: starting user address | 319 | * @start: starting user address |
320 | * @nr_pages: number of pages from start to pin | 320 | * @nr_pages: number of pages from start to pin |
321 | * @write: whether pages will be written to | 321 | * @gup_flags: flags modifying pin behaviour |
322 | * @pages: array that receives pointers to the pages pinned. | 322 | * @pages: array that receives pointers to the pages pinned. |
323 | * Should be at least nr_pages long. | 323 | * Should be at least nr_pages long. |
324 | * | 324 | * |
@@ -339,10 +339,10 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast); | |||
339 | * were pinned, returns -errno. | 339 | * were pinned, returns -errno. |
340 | */ | 340 | */ |
341 | int __weak get_user_pages_fast(unsigned long start, | 341 | int __weak get_user_pages_fast(unsigned long start, |
342 | int nr_pages, int write, struct page **pages) | 342 | int nr_pages, unsigned int gup_flags, |
343 | struct page **pages) | ||
343 | { | 344 | { |
344 | return get_user_pages_unlocked(start, nr_pages, pages, | 345 | return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); |
345 | write ? FOLL_WRITE : 0); | ||
346 | } | 346 | } |
347 | EXPORT_SYMBOL_GPL(get_user_pages_fast); | 347 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
348 | 348 | ||
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index d3736f5bffec..74cafc0142ea 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
@@ -27,7 +27,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data, | |||
27 | while (got < num_pages) { | 27 | while (got < num_pages) { |
28 | rc = get_user_pages_fast( | 28 | rc = get_user_pages_fast( |
29 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), | 29 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), |
30 | num_pages - got, write_page, pages + got); | 30 | num_pages - got, write_page ? FOLL_WRITE : 0, pages + got); |
31 | if (rc < 0) | 31 | if (rc < 0) |
32 | break; | 32 | break; |
33 | BUG_ON(rc == 0); | 33 | BUG_ON(rc == 0); |
diff --git a/net/rds/info.c b/net/rds/info.c index e367a97a18c8..03f6fd56d237 100644 --- a/net/rds/info.c +++ b/net/rds/info.c | |||
@@ -193,7 +193,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, | |||
193 | ret = -ENOMEM; | 193 | ret = -ENOMEM; |
194 | goto out; | 194 | goto out; |
195 | } | 195 | } |
196 | ret = get_user_pages_fast(start, nr_pages, 1, pages); | 196 | ret = get_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); |
197 | if (ret != nr_pages) { | 197 | if (ret != nr_pages) { |
198 | if (ret > 0) | 198 | if (ret > 0) |
199 | nr_pages = ret; | 199 | nr_pages = ret; |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 182ab8430594..b340ed4fc43a 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -158,7 +158,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, | |||
158 | { | 158 | { |
159 | int ret; | 159 | int ret; |
160 | 160 | ||
161 | ret = get_user_pages_fast(user_addr, nr_pages, write, pages); | 161 | ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0, |
162 | pages); | ||
162 | 163 | ||
163 | if (ret >= 0 && ret < nr_pages) { | 164 | if (ret >= 0 && ret < nr_pages) { |
164 | while (ret--) | 165 | while (ret--) |