aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/ptrace.c9
-rw-r--r--arch/blackfin/kernel/ptrace.c5
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c4
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/m32r/kernel/ptrace.c15
-rw-r--r--arch/mips/kernel/ptrace32.c5
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/powerpc/kernel/ptrace32.c5
-rw-r--r--arch/s390/mm/gup.c3
-rw-r--r--arch/score/kernel/ptrace.c10
-rw-r--r--arch/sh/mm/gup.c3
-rw-r--r--arch/sparc/kernel/ptrace_64.c24
-rw-r--r--arch/sparc/mm/gup.c3
-rw-r--r--arch/x86/kernel/step.c3
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/mpx.c5
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--fs/exec.c9
-rw-r--r--fs/proc/base.c19
-rw-r--r--include/linux/mm.h19
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/ptrace.c16
-rw-r--r--mm/frame_vector.c9
-rw-r--r--mm/gup.c64
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/nommu.c38
-rw-r--r--mm/process_vm_access.c7
-rw-r--r--mm/util.c8
-rw-r--r--net/ceph/pagevec.c2
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--virt/kvm/async_pf.c3
-rw-r--r--virt/kvm/kvm_main.c11
61 files changed, 273 insertions, 189 deletions
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index d9ee81769899..940dfb406591 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
157static inline int 157static inline int
158read_int(struct task_struct *task, unsigned long addr, int * data) 158read_int(struct task_struct *task, unsigned long addr, int * data)
159{ 159{
160 int copied = access_process_vm(task, addr, data, sizeof(int), 0); 160 int copied = access_process_vm(task, addr, data, sizeof(int),
161 FOLL_FORCE);
161 return (copied == sizeof(int)) ? 0 : -EIO; 162 return (copied == sizeof(int)) ? 0 : -EIO;
162} 163}
163 164
164static inline int 165static inline int
165write_int(struct task_struct *task, unsigned long addr, int data) 166write_int(struct task_struct *task, unsigned long addr, int data)
166{ 167{
167 int copied = access_process_vm(task, addr, &data, sizeof(int), 1); 168 int copied = access_process_vm(task, addr, &data, sizeof(int),
169 FOLL_FORCE | FOLL_WRITE);
168 return (copied == sizeof(int)) ? 0 : -EIO; 170 return (copied == sizeof(int)) ? 0 : -EIO;
169} 171}
170 172
@@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request,
281 /* When I and D space are separate, these will need to be fixed. */ 283 /* When I and D space are separate, these will need to be fixed. */
282 case PTRACE_PEEKTEXT: /* read word at location addr. */ 284 case PTRACE_PEEKTEXT: /* read word at location addr. */
283 case PTRACE_PEEKDATA: 285 case PTRACE_PEEKDATA:
284 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 286 copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
287 FOLL_FORCE);
285 ret = -EIO; 288 ret = -EIO;
286 if (copied != sizeof(tmp)) 289 if (copied != sizeof(tmp))
287 break; 290 break;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8b8fe671b1a6..8d79286ee4e8 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request,
271 case BFIN_MEM_ACCESS_CORE: 271 case BFIN_MEM_ACCESS_CORE:
272 case BFIN_MEM_ACCESS_CORE_ONLY: 272 case BFIN_MEM_ACCESS_CORE_ONLY:
273 copied = access_process_vm(child, addr, &tmp, 273 copied = access_process_vm(child, addr, &tmp,
274 to_copy, 0); 274 to_copy, FOLL_FORCE);
275 if (copied) 275 if (copied)
276 break; 276 break;
277 277
@@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request,
324 case BFIN_MEM_ACCESS_CORE: 324 case BFIN_MEM_ACCESS_CORE:
325 case BFIN_MEM_ACCESS_CORE_ONLY: 325 case BFIN_MEM_ACCESS_CORE_ONLY:
326 copied = access_process_vm(child, addr, &data, 326 copied = access_process_vm(child, addr, &data,
327 to_copy, 1); 327 to_copy,
328 FOLL_FORCE | FOLL_WRITE);
328 break; 329 break;
329 case BFIN_MEM_ACCESS_DMA: 330 case BFIN_MEM_ACCESS_DMA:
330 if (safe_dma_memcpy(paddr, &data, to_copy)) 331 if (safe_dma_memcpy(paddr, &data, to_copy))
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index b5698c876fcc..099e170a93ee 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
2722 err = get_user_pages((unsigned long int)(oper.indata + prev_ix), 2722 err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
2723 noinpages, 2723 noinpages,
2724 0, /* read access only for in data */ 2724 0, /* read access only for in data */
2725 0, /* no force */
2726 inpages, 2725 inpages,
2727 NULL); 2726 NULL);
2728 2727
@@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
2736 if (oper.do_cipher){ 2735 if (oper.do_cipher){
2737 err = get_user_pages((unsigned long int)oper.cipher_outdata, 2736 err = get_user_pages((unsigned long int)oper.cipher_outdata,
2738 nooutpages, 2737 nooutpages,
2739 1, /* write access for out data */ 2738 FOLL_WRITE, /* write access for out data */
2740 0, /* no force */
2741 outpages, 2739 outpages,
2742 NULL); 2740 NULL);
2743 up_read(&current->mm->mmap_sem); 2741 up_read(&current->mm->mmap_sem);
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f085229cf870..f0df654ac6fc 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
147 /* The trampoline page is globally mapped, no page table to traverse.*/ 147 /* The trampoline page is globally mapped, no page table to traverse.*/
148 tmp = *(unsigned long*)addr; 148 tmp = *(unsigned long*)addr;
149 } else { 149 } else {
150 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 150 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
151 151
152 if (copied != sizeof(tmp)) 152 if (copied != sizeof(tmp))
153 break; 153 break;
@@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc)
279 int opsize = 0; 279 int opsize = 0;
280 280
281 /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */ 281 /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
282 copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0); 282 copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
283 if (copied != sizeof(opcode)) 283 if (copied != sizeof(opcode))
284 return 0; 284 return 0;
285 285
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 09f845793d12..5ed0ea92c5bf 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
142 u64 virt_addr=simple_strtoull(buf, NULL, 16); 142 u64 virt_addr=simple_strtoull(buf, NULL, 16);
143 int ret; 143 int ret;
144 144
145 ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); 145 ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
146 if (ret<=0) { 146 if (ret<=0) {
147#ifdef ERR_INJ_DEBUG 147#ifdef ERR_INJ_DEBUG
148 printk("Virtual address %lx is not existing.\n",virt_addr); 148 printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6f54d511cc50..31aa8c0f68e1 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
453 return 0; 453 return 0;
454 } 454 }
455 } 455 }
456 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); 456 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
457 if (copied != sizeof(ret)) 457 if (copied != sizeof(ret))
458 return -EIO; 458 return -EIO;
459 *val = ret; 459 *val = ret;
@@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
489 *ia64_rse_skip_regs(krbs, regnum) = val; 489 *ia64_rse_skip_regs(krbs, regnum) = val;
490 } 490 }
491 } 491 }
492 } else if (access_process_vm(child, addr, &val, sizeof(val), 1) 492 } else if (access_process_vm(child, addr, &val, sizeof(val),
493 FOLL_FORCE | FOLL_WRITE)
493 != sizeof(val)) 494 != sizeof(val))
494 return -EIO; 495 return -EIO;
495 return 0; 496 return 0;
@@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
543 ret = ia64_peek(child, sw, user_rbs_end, addr, &val); 544 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544 if (ret < 0) 545 if (ret < 0)
545 return ret; 546 return ret;
546 if (access_process_vm(child, addr, &val, sizeof(val), 1) 547 if (access_process_vm(child, addr, &val, sizeof(val),
548 FOLL_FORCE | FOLL_WRITE)
547 != sizeof(val)) 549 != sizeof(val))
548 return -EIO; 550 return -EIO;
549 } 551 }
@@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
559 561
560 /* now copy word for word from user rbs to kernel rbs: */ 562 /* now copy word for word from user rbs to kernel rbs: */
561 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 563 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
562 if (access_process_vm(child, addr, &val, sizeof(val), 0) 564 if (access_process_vm(child, addr, &val, sizeof(val),
565 FOLL_FORCE)
563 != sizeof(val)) 566 != sizeof(val))
564 return -EIO; 567 return -EIO;
565 568
@@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request,
1156 case PTRACE_PEEKTEXT: 1159 case PTRACE_PEEKTEXT:
1157 case PTRACE_PEEKDATA: 1160 case PTRACE_PEEKDATA:
1158 /* read word at location addr */ 1161 /* read word at location addr */
1159 if (access_process_vm(child, addr, &data, sizeof(data), 0) 1162 if (access_process_vm(child, addr, &data, sizeof(data),
1163 FOLL_FORCE)
1160 != sizeof(data)) 1164 != sizeof(data))
1161 return -EIO; 1165 return -EIO;
1162 /* ensure return value is not mistaken for error code */ 1166 /* ensure return value is not mistaken for error code */
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 51f5e9aa4901..c145605a981f 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child)
493 int i; 493 int i;
494 494
495 for (i = 0; i < p->nr_trap; i++) 495 for (i = 0; i < p->nr_trap; i++)
496 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1); 496 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
497 FOLL_FORCE | FOLL_WRITE);
497 p->nr_trap = 0; 498 p->nr_trap = 0;
498} 499}
499 500
@@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
537 unsigned long next_insn, code; 538 unsigned long next_insn, code;
538 unsigned long addr = next_pc & ~3; 539 unsigned long addr = next_pc & ~3;
539 540
540 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0) 541 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
542 FOLL_FORCE)
541 != sizeof(next_insn)) { 543 != sizeof(next_insn)) {
542 return -1; /* error */ 544 return -1; /* error */
543 } 545 }
@@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
546 if (register_debug_trap(child, next_pc, next_insn, &code)) { 548 if (register_debug_trap(child, next_pc, next_insn, &code)) {
547 return -1; /* error */ 549 return -1; /* error */
548 } 550 }
549 if (access_process_vm(child, addr, &code, sizeof(code), 1) 551 if (access_process_vm(child, addr, &code, sizeof(code),
552 FOLL_FORCE | FOLL_WRITE)
550 != sizeof(code)) { 553 != sizeof(code)) {
551 return -1; /* error */ 554 return -1; /* error */
552 } 555 }
@@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs)
562 addr = (regs->bpc - 2) & ~3; 565 addr = (regs->bpc - 2) & ~3;
563 regs->bpc -= 2; 566 regs->bpc -= 2;
564 if (unregister_debug_trap(current, addr, &code)) { 567 if (unregister_debug_trap(current, addr, &code)) {
565 access_process_vm(current, addr, &code, sizeof(code), 1); 568 access_process_vm(current, addr, &code, sizeof(code),
569 FOLL_FORCE | FOLL_WRITE);
566 invalidate_cache(); 570 invalidate_cache();
567 } 571 }
568} 572}
@@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child)
589 /* Compute next pc. */ 593 /* Compute next pc. */
590 pc = get_stack_long(child, PT_BPC); 594 pc = get_stack_long(child, PT_BPC);
591 595
592 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) 596 if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
597 FOLL_FORCE)
593 != sizeof(insn)) 598 != sizeof(insn))
594 return; 599 return;
595 600
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 283b5a1967d1..7e71a4e0281b 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
70 break; 70 break;
71 71
72 copied = access_process_vm(child, (u64)addrOthers, &tmp, 72 copied = access_process_vm(child, (u64)addrOthers, &tmp,
73 sizeof(tmp), 0); 73 sizeof(tmp), FOLL_FORCE);
74 if (copied != sizeof(tmp)) 74 if (copied != sizeof(tmp))
75 break; 75 break;
76 ret = put_user(tmp, (u32 __user *) (unsigned long) data); 76 ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
179 break; 179 break;
180 ret = 0; 180 ret = 0;
181 if (access_process_vm(child, (u64)addrOthers, &data, 181 if (access_process_vm(child, (u64)addrOthers, &data,
182 sizeof(data), 1) == sizeof(data)) 182 sizeof(data),
183 FOLL_FORCE | FOLL_WRITE) == sizeof(data))
183 break; 184 break;
184 ret = -EIO; 185 ret = -EIO;
185 break; 186 break;
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 42d124fb6474..d8c3c159289a 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -287,7 +287,7 @@ slow_irqon:
287 pages += nr; 287 pages += nr;
288 288
289 ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, 289 ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
290 write, 0, pages); 290 pages, write ? FOLL_WRITE : 0);
291 291
292 /* Have to be a bit careful with return values */ 292 /* Have to be a bit careful with return values */
293 if (nr > 0) { 293 if (nr > 0) {
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f52b7db327c8..010b7b310237 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
74 break; 74 break;
75 75
76 copied = access_process_vm(child, (u64)addrOthers, &tmp, 76 copied = access_process_vm(child, (u64)addrOthers, &tmp,
77 sizeof(tmp), 0); 77 sizeof(tmp), FOLL_FORCE);
78 if (copied != sizeof(tmp)) 78 if (copied != sizeof(tmp))
79 break; 79 break;
80 ret = put_user(tmp, (u32 __user *)data); 80 ret = put_user(tmp, (u32 __user *)data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
179 break; 179 break;
180 ret = 0; 180 ret = 0;
181 if (access_process_vm(child, (u64)addrOthers, &tmp, 181 if (access_process_vm(child, (u64)addrOthers, &tmp,
182 sizeof(tmp), 1) == sizeof(tmp)) 182 sizeof(tmp),
183 FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
183 break; 184 break;
184 ret = -EIO; 185 ret = -EIO;
185 break; 186 break;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index adb0c34bf431..18d4107e10ee 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
266 /* Try to get the remaining pages with get_user_pages */ 266 /* Try to get the remaining pages with get_user_pages */
267 start += nr << PAGE_SHIFT; 267 start += nr << PAGE_SHIFT;
268 pages += nr; 268 pages += nr;
269 ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); 269 ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
270 write ? FOLL_WRITE : 0);
270 /* Have to be a bit careful with return values */ 271 /* Have to be a bit careful with return values */
271 if (nr > 0) 272 if (nr > 0)
272 ret = (ret < 0) ? nr : ret + nr; 273 ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 55836188b217..4f7314d5f334 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child,
131{ 131{
132 int copied; 132 int copied;
133 133
134 copied = access_process_vm(child, addr, res, sizeof(*res), 0); 134 copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
135 135
136 return copied != sizeof(*res) ? -EIO : 0; 136 return copied != sizeof(*res) ? -EIO : 0;
137} 137}
@@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child,
142{ 142{
143 int copied; 143 int copied;
144 144
145 copied = access_process_vm(child, addr, res, sizeof(*res), 0); 145 copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
146 146
147 return copied != sizeof(*res) ? -EIO : 0; 147 return copied != sizeof(*res) ? -EIO : 0;
148} 148}
@@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child,
153{ 153{
154 int copied; 154 int copied;
155 155
156 copied = access_process_vm(child, addr, &val, sizeof(val), 1); 156 copied = access_process_vm(child, addr, &val, sizeof(val),
157 FOLL_FORCE | FOLL_WRITE);
157 158
158 return copied != sizeof(val) ? -EIO : 0; 159 return copied != sizeof(val) ? -EIO : 0;
159} 160}
@@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child,
164{ 165{
165 int copied; 166 int copied;
166 167
167 copied = access_process_vm(child, addr, &val, sizeof(val), 1); 168 copied = access_process_vm(child, addr, &val, sizeof(val),
169 FOLL_FORCE | FOLL_WRITE);
168 170
169 return copied != sizeof(val) ? -EIO : 0; 171 return copied != sizeof(val) ? -EIO : 0;
170} 172}
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 40fa6c8adc43..063c298ba56c 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@ slow_irqon:
258 pages += nr; 258 pages += nr;
259 259
260 ret = get_user_pages_unlocked(start, 260 ret = get_user_pages_unlocked(start,
261 (end - start) >> PAGE_SHIFT, write, 0, pages); 261 (end - start) >> PAGE_SHIFT, pages,
262 write ? FOLL_WRITE : 0);
262 263
263 /* Have to be a bit careful with return values */ 264 /* Have to be a bit careful with return values */
264 if (nr > 0) { 265 if (nr > 0) {
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc4928a089..ac082dd8c67d 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
127 if (copy_from_user(kbuf, (void __user *) uaddr, len)) 127 if (copy_from_user(kbuf, (void __user *) uaddr, len))
128 return -EFAULT; 128 return -EFAULT;
129 } else { 129 } else {
130 int len2 = access_process_vm(target, uaddr, kbuf, len, 0); 130 int len2 = access_process_vm(target, uaddr, kbuf, len,
131 FOLL_FORCE);
131 if (len2 != len) 132 if (len2 != len)
132 return -EFAULT; 133 return -EFAULT;
133 } 134 }
@@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
141 if (copy_to_user((void __user *) uaddr, kbuf, len)) 142 if (copy_to_user((void __user *) uaddr, kbuf, len))
142 return -EFAULT; 143 return -EFAULT;
143 } else { 144 } else {
144 int len2 = access_process_vm(target, uaddr, kbuf, len, 1); 145 int len2 = access_process_vm(target, uaddr, kbuf, len,
146 FOLL_FORCE | FOLL_WRITE);
145 if (len2 != len) 147 if (len2 != len)
146 return -EFAULT; 148 return -EFAULT;
147 } 149 }
@@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target,
505 if (access_process_vm(target, 507 if (access_process_vm(target,
506 (unsigned long) 508 (unsigned long)
507 &reg_window[pos], 509 &reg_window[pos],
508 k, sizeof(*k), 0) 510 k, sizeof(*k),
511 FOLL_FORCE)
509 != sizeof(*k)) 512 != sizeof(*k))
510 return -EFAULT; 513 return -EFAULT;
511 k++; 514 k++;
@@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target,
531 if (access_process_vm(target, 534 if (access_process_vm(target,
532 (unsigned long) 535 (unsigned long)
533 &reg_window[pos], 536 &reg_window[pos],
534 &reg, sizeof(reg), 0) 537 &reg, sizeof(reg),
538 FOLL_FORCE)
535 != sizeof(reg)) 539 != sizeof(reg))
536 return -EFAULT; 540 return -EFAULT;
537 if (access_process_vm(target, 541 if (access_process_vm(target,
538 (unsigned long) u, 542 (unsigned long) u,
539 &reg, sizeof(reg), 1) 543 &reg, sizeof(reg),
544 FOLL_FORCE | FOLL_WRITE)
540 != sizeof(reg)) 545 != sizeof(reg))
541 return -EFAULT; 546 return -EFAULT;
542 pos++; 547 pos++;
@@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target,
615 (unsigned long) 620 (unsigned long)
616 &reg_window[pos], 621 &reg_window[pos],
617 (void *) k, 622 (void *) k,
618 sizeof(*k), 1) 623 sizeof(*k),
624 FOLL_FORCE | FOLL_WRITE)
619 != sizeof(*k)) 625 != sizeof(*k))
620 return -EFAULT; 626 return -EFAULT;
621 k++; 627 k++;
@@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target,
642 if (access_process_vm(target, 648 if (access_process_vm(target,
643 (unsigned long) 649 (unsigned long)
644 u, 650 u,
645 &reg, sizeof(reg), 0) 651 &reg, sizeof(reg),
652 FOLL_FORCE)
646 != sizeof(reg)) 653 != sizeof(reg))
647 return -EFAULT; 654 return -EFAULT;
648 if (access_process_vm(target, 655 if (access_process_vm(target,
649 (unsigned long) 656 (unsigned long)
650 &reg_window[pos], 657 &reg_window[pos],
651 &reg, sizeof(reg), 1) 658 &reg, sizeof(reg),
659 FOLL_FORCE | FOLL_WRITE)
652 != sizeof(reg)) 660 != sizeof(reg))
653 return -EFAULT; 661 return -EFAULT;
654 pos++; 662 pos++;
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 4e06750a5d29..cd0e32bbcb1d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -238,7 +238,8 @@ slow:
238 pages += nr; 238 pages += nr;
239 239
240 ret = get_user_pages_unlocked(start, 240 ret = get_user_pages_unlocked(start,
241 (end - start) >> PAGE_SHIFT, write, 0, pages); 241 (end - start) >> PAGE_SHIFT, pages,
242 write ? FOLL_WRITE : 0);
242 243
243 /* Have to be a bit careful with return values */ 244 /* Have to be a bit careful with return values */
244 if (nr > 0) { 245 if (nr > 0) {
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index c9a073866ca7..a23ce84a3f6c 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
57 unsigned char opcode[15]; 57 unsigned char opcode[15];
58 unsigned long addr = convert_ip_to_linear(child, regs); 58 unsigned long addr = convert_ip_to_linear(child, regs);
59 59
60 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); 60 copied = access_process_vm(child, addr, opcode, sizeof(opcode),
61 FOLL_FORCE);
61 for (i = 0; i < copied; i++) { 62 for (i = 0; i < copied; i++) {
62 switch (opcode[i]) { 63 switch (opcode[i]) {
63 /* popf and iret */ 64 /* popf and iret */
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index b8b6a60b32cf..0d4fb3ebbbac 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -435,7 +435,7 @@ slow_irqon:
435 435
436 ret = get_user_pages_unlocked(start, 436 ret = get_user_pages_unlocked(start,
437 (end - start) >> PAGE_SHIFT, 437 (end - start) >> PAGE_SHIFT,
438 write, 0, pages); 438 pages, write ? FOLL_WRITE : 0);
439 439
440 /* Have to be a bit careful with return values */ 440 /* Have to be a bit careful with return values */
441 if (nr > 0) { 441 if (nr > 0) {
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 80476878eb4c..e4f800999b32 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
544{ 544{
545 long gup_ret; 545 long gup_ret;
546 int nr_pages = 1; 546 int nr_pages = 1;
547 int force = 0;
548 547
549 gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, 548 gup_ret = get_user_pages((unsigned long)addr, nr_pages,
550 force, NULL, NULL); 549 write ? FOLL_WRITE : 0, NULL, NULL);
551 /* 550 /*
552 * get_user_pages() returns number of pages gotten. 551 * get_user_pages() returns number of pages gotten.
553 * 0 means we failed to fault in and get anything, 552 * 0 means we failed to fault in and get anything,
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 5766ead6fdb9..60a5a5a85505 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -36,7 +36,8 @@ int is_syscall(unsigned long addr)
36 * slow, but that doesn't matter, since it will be called only 36 * slow, but that doesn't matter, since it will be called only
37 * in case of singlestepping, if copy_from_user failed. 37 * in case of singlestepping, if copy_from_user failed.
38 */ 38 */
39 n = access_process_vm(current, addr, &instr, sizeof(instr), 0); 39 n = access_process_vm(current, addr, &instr, sizeof(instr),
40 FOLL_FORCE);
40 if (n != sizeof(instr)) { 41 if (n != sizeof(instr)) {
41 printk(KERN_ERR "is_syscall : failed to read " 42 printk(KERN_ERR "is_syscall : failed to read "
42 "instruction from 0x%lx\n", addr); 43 "instruction from 0x%lx\n", addr);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index 0b5c184dd5b3..e30202b1716e 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -212,7 +212,8 @@ int is_syscall(unsigned long addr)
212 * slow, but that doesn't matter, since it will be called only 212 * slow, but that doesn't matter, since it will be called only
213 * in case of singlestepping, if copy_from_user failed. 213 * in case of singlestepping, if copy_from_user failed.
214 */ 214 */
215 n = access_process_vm(current, addr, &instr, sizeof(instr), 0); 215 n = access_process_vm(current, addr, &instr, sizeof(instr),
216 FOLL_FORCE);
216 if (n != sizeof(instr)) { 217 if (n != sizeof(instr)) {
217 printk("is_syscall : failed to read instruction from " 218 printk("is_syscall : failed to read instruction from "
218 "0x%lx\n", addr); 219 "0x%lx\n", addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
555int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 555int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
556{ 556{
557 struct amdgpu_ttm_tt *gtt = (void *)ttm; 557 struct amdgpu_ttm_tt *gtt = (void *)ttm;
558 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 558 unsigned int flags = 0;
559 unsigned pinned = 0; 559 unsigned pinned = 0;
560 int r; 560 int r;
561 561
562 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
563 flags |= FOLL_WRITE;
564
562 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 565 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
563 /* check that we only use anonymous memory 566 /* check that we only use anonymous memory
564 to prevent problems with writeback */ 567 to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
581 list_add(&guptask.list, &gtt->guptasks); 584 list_add(&guptask.list, &gtt->guptasks);
582 spin_unlock(&gtt->guptasklock); 585 spin_unlock(&gtt->guptasklock);
583 586
584 r = get_user_pages(userptr, num_pages, write, 0, p, NULL); 587 r = get_user_pages(userptr, num_pages, flags, p, NULL);
585 588
586 spin_lock(&gtt->guptasklock); 589 spin_lock(&gtt->guptasklock);
587 list_del(&guptask.list); 590 list_del(&guptask.list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
748 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; 748 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
749 struct page **pvec; 749 struct page **pvec;
750 uintptr_t ptr; 750 uintptr_t ptr;
751 unsigned int flags = 0;
751 752
752 pvec = drm_malloc_ab(npages, sizeof(struct page *)); 753 pvec = drm_malloc_ab(npages, sizeof(struct page *));
753 if (!pvec) 754 if (!pvec)
754 return ERR_PTR(-ENOMEM); 755 return ERR_PTR(-ENOMEM);
755 756
757 if (!etnaviv_obj->userptr.ro)
758 flags |= FOLL_WRITE;
759
756 pinned = 0; 760 pinned = 0;
757 ptr = etnaviv_obj->userptr.ptr; 761 ptr = etnaviv_obj->userptr.ptr;
758 762
759 down_read(&mm->mmap_sem); 763 down_read(&mm->mmap_sem);
760 while (pinned < npages) { 764 while (pinned < npages) {
761 ret = get_user_pages_remote(task, mm, ptr, npages - pinned, 765 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
762 !etnaviv_obj->userptr.ro, 0, 766 flags, pvec + pinned, NULL);
763 pvec + pinned, NULL);
764 if (ret < 0) 767 if (ret < 0)
765 break; 768 break;
766 769
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
488 goto err_free; 488 goto err_free;
489 } 489 }
490 490
491 ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); 491 ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
492 g2d_userptr->vec);
492 if (ret != npages) { 493 if (ret != npages) {
493 DRM_ERROR("failed to get user pages from userptr.\n"); 494 DRM_ERROR("failed to get user pages from userptr.\n");
494 if (ret < 0) 495 if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); 508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
509 if (pvec != NULL) { 509 if (pvec != NULL) {
510 struct mm_struct *mm = obj->userptr.mm->mm; 510 struct mm_struct *mm = obj->userptr.mm->mm;
511 unsigned int flags = 0;
512
513 if (!obj->userptr.read_only)
514 flags |= FOLL_WRITE;
511 515
512 ret = -EFAULT; 516 ret = -EFAULT;
513 if (atomic_inc_not_zero(&mm->mm_users)) { 517 if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
517 (work->task, mm, 521 (work->task, mm,
518 obj->userptr.ptr + pinned * PAGE_SIZE, 522 obj->userptr.ptr + pinned * PAGE_SIZE,
519 npages - pinned, 523 npages - pinned,
520 !obj->userptr.read_only, 0, 524 flags,
521 pvec + pinned, NULL); 525 pvec + pinned, NULL);
522 if (ret < 0) 526 if (ret < 0)
523 break; 527 break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
567 struct page **pages = ttm->pages + pinned; 567 struct page **pages = ttm->pages + pinned;
568 568
569 r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); 569 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
570 pages, NULL);
570 if (r < 0) 571 if (r < 0)
571 goto release_pages; 572 goto release_pages;
572 573
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages((unsigned long)xfer->mem_addr, 242 ret = get_user_pages((unsigned long)xfer->mem_addr,
243 vsg->num_pages, 243 vsg->num_pages,
244 (vsg->direction == DMA_FROM_DEVICE), 244 (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
245 0, vsg->pages, NULL); 245 vsg->pages, NULL);
246 246
247 up_read(&current->mm->mmap_sem); 247 up_read(&current->mm->mmap_sem);
248 if (ret != vsg->num_pages) { 248 if (ret != vsg->num_pages) {
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..224ad274ea0b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
94 unsigned long dma_attrs = 0; 94 unsigned long dma_attrs = 0;
95 struct scatterlist *sg, *sg_list_start; 95 struct scatterlist *sg, *sg_list_start;
96 int need_release = 0; 96 int need_release = 0;
97 unsigned int gup_flags = FOLL_WRITE;
97 98
98 if (dmasync) 99 if (dmasync)
99 dma_attrs |= DMA_ATTR_WRITE_BARRIER; 100 dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
183 if (ret) 184 if (ret)
184 goto out; 185 goto out;
185 186
187 if (!umem->writable)
188 gup_flags |= FOLL_FORCE;
189
186 need_release = 1; 190 need_release = 1;
187 sg_list_start = umem->sg_head.sgl; 191 sg_list_start = umem->sg_head.sgl;
188 192
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
190 ret = get_user_pages(cur_base, 194 ret = get_user_pages(cur_base,
191 min_t(unsigned long, npages, 195 min_t(unsigned long, npages,
192 PAGE_SIZE / sizeof (struct page *)), 196 PAGE_SIZE / sizeof (struct page *)),
193 1, !umem->writable, page_list, vma_list); 197 gup_flags, page_list, vma_list);
194 198
195 if (ret < 0) 199 if (ret < 0)
196 goto out; 200 goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
527 u64 off; 527 u64 off;
528 int j, k, ret = 0, start_idx, npages = 0; 528 int j, k, ret = 0, start_idx, npages = 0;
529 u64 base_virt_addr; 529 u64 base_virt_addr;
530 unsigned int flags = 0;
530 531
531 if (access_mask == 0) 532 if (access_mask == 0)
532 return -EINVAL; 533 return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
556 goto out_put_task; 557 goto out_put_task;
557 } 558 }
558 559
560 if (access_mask & ODP_WRITE_ALLOWED_BIT)
561 flags |= FOLL_WRITE;
562
559 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; 563 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
560 k = start_idx; 564 k = start_idx;
561 565
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
574 */ 578 */
575 npages = get_user_pages_remote(owning_process, owning_mm, 579 npages = get_user_pages_remote(owning_process, owning_mm,
576 user_virt, gup_num_pages, 580 user_virt, gup_num_pages,
577 access_mask & ODP_WRITE_ALLOWED_BIT, 581 flags, local_page_list, NULL);
578 0, local_page_list, NULL);
579 up_read(&owning_mm->mmap_sem); 582 up_read(&owning_mm->mmap_sem);
580 583
581 if (npages < 0) 584 if (npages < 0)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
472 goto out; 472 goto out;
473 } 473 }
474 474
475 ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); 475 ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
476 if (ret < 0) 476 if (ret < 0)
477 goto out; 477 goto out;
478 478
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94fd3633..75f08624ac05 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
67 67
68 for (got = 0; got < num_pages; got += ret) { 68 for (got = 0; got < num_pages; got += ret) {
69 ret = get_user_pages(start_page + got * PAGE_SIZE, 69 ret = get_user_pages(start_page + got * PAGE_SIZE,
70 num_pages - got, 1, 1, 70 num_pages - got,
71 FOLL_WRITE | FOLL_FORCE,
71 p + got, NULL); 72 p + got, NULL);
72 if (ret < 0) 73 if (ret < 0)
73 goto bail_release; 74 goto bail_release;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebee4d8a..1ccee6ea5bc3 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
111 int i; 111 int i;
112 int flags; 112 int flags;
113 dma_addr_t pa; 113 dma_addr_t pa;
114 unsigned int gup_flags;
114 115
115 if (!can_do_mlock()) 116 if (!can_do_mlock())
116 return -EPERM; 117 return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
135 136
136 flags = IOMMU_READ | IOMMU_CACHE; 137 flags = IOMMU_READ | IOMMU_CACHE;
137 flags |= (writable) ? IOMMU_WRITE : 0; 138 flags |= (writable) ? IOMMU_WRITE : 0;
139 gup_flags = FOLL_WRITE;
140 gup_flags |= (writable) ? 0 : FOLL_FORCE;
138 cur_base = addr & PAGE_MASK; 141 cur_base = addr & PAGE_MASK;
139 ret = 0; 142 ret = 0;
140 143
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
142 ret = get_user_pages(cur_base, 145 ret = get_user_pages(cur_base,
143 min_t(unsigned long, npages, 146 min_t(unsigned long, npages,
144 PAGE_SIZE / sizeof(struct page *)), 147 PAGE_SIZE / sizeof(struct page *)),
145 1, !writable, page_list, NULL); 148 gup_flags, page_list, NULL);
146 149
147 if (ret < 0) 150 if (ret < 0)
148 goto out; 151 goto out;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469fe842..2c9232ef7baa 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
124 } 124 }
125 125
126 /* Get user pages for DMA Xfer */ 126 /* Get user pages for DMA Xfer */
127 err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0, 127 err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
128 1, dma->map); 128 dma->map, FOLL_FORCE);
129 129
130 if (user_dma.page_count != err) { 130 if (user_dma.page_count != err) {
131 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", 131 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054cda6e..f7299d3d8244 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
76 76
77 /* Get user pages for DMA Xfer */ 77 /* Get user pages for DMA Xfer */
78 y_pages = get_user_pages_unlocked(y_dma.uaddr, 78 y_pages = get_user_pages_unlocked(y_dma.uaddr,
79 y_dma.page_count, 0, 1, &dma->map[0]); 79 y_dma.page_count, &dma->map[0], FOLL_FORCE);
80 uv_pages = 0; /* silence gcc. value is set and consumed only if: */ 80 uv_pages = 0; /* silence gcc. value is set and consumed only if: */
81 if (y_pages == y_dma.page_count) { 81 if (y_pages == y_dma.page_count) {
82 uv_pages = get_user_pages_unlocked(uv_dma.uaddr, 82 uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
83 uv_dma.page_count, 0, 1, &dma->map[y_pages]); 83 uv_dma.page_count, &dma->map[y_pages],
84 FOLL_FORCE);
84 } 85 }
85 86
86 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { 87 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index e668dde6d857..a31b95cb3b09 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
214 if (!vec) 214 if (!vec)
215 return -ENOMEM; 215 return -ENOMEM;
216 216
217 ret = get_vaddr_frames(virtp, 1, true, false, vec); 217 ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
218 if (ret != 1) { 218 if (ret != 1) {
219 frame_vector_destroy(vec); 219 frame_vector_destroy(vec);
220 return -EINVAL; 220 return -EINVAL;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f060b3f3..1db0af6c7f94 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
156{ 156{
157 unsigned long first, last; 157 unsigned long first, last;
158 int err, rw = 0; 158 int err, rw = 0;
159 unsigned int flags = FOLL_FORCE;
159 160
160 dma->direction = direction; 161 dma->direction = direction;
161 switch (dma->direction) { 162 switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
178 if (NULL == dma->pages) 179 if (NULL == dma->pages)
179 return -ENOMEM; 180 return -ENOMEM;
180 181
182 if (rw == READ)
183 flags |= FOLL_WRITE;
184
181 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", 185 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
182 data, size, dma->nr_pages); 186 data, size, dma->nr_pages);
183 187
184 err = get_user_pages(data & PAGE_MASK, dma->nr_pages, 188 err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
185 rw == READ, 1, /* force */ 189 flags, dma->pages, NULL);
186 dma->pages, NULL);
187 190
188 if (err != dma->nr_pages) { 191 if (err != dma->nr_pages) {
189 dma->nr_pages = (err >= 0) ? err : 0; 192 dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..1cd322e939c7 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
42 unsigned long first, last; 42 unsigned long first, last;
43 unsigned long nr; 43 unsigned long nr;
44 struct frame_vector *vec; 44 struct frame_vector *vec;
45 unsigned int flags = FOLL_FORCE;
46
47 if (write)
48 flags |= FOLL_WRITE;
45 49
46 first = start >> PAGE_SHIFT; 50 first = start >> PAGE_SHIFT;
47 last = (start + length - 1) >> PAGE_SHIFT; 51 last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
49 vec = frame_vector_create(nr); 53 vec = frame_vector_create(nr);
50 if (!vec) 54 if (!vec)
51 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec); 56 ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
53 if (ret < 0) 57 if (ret < 0)
54 goto out_destroy; 58 goto out_destroy;
55 /* We accept only complete set of PFNs */ 59 /* We accept only complete set of PFNs */
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1a20fd..f806a4471eb9 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@ retry:
1396 pinned_pages->nr_pages = get_user_pages( 1396 pinned_pages->nr_pages = get_user_pages(
1397 (u64)addr, 1397 (u64)addr,
1398 nr_pages, 1398 nr_pages,
1399 !!(prot & SCIF_PROT_WRITE), 1399 (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
1400 0,
1401 pinned_pages->pages, 1400 pinned_pages->pages,
1402 NULL); 1401 NULL);
1403 up_write(&mm->mmap_sem); 1402 up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9b17e3..6fb773dbcd0c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
198#else 198#else
199 *pageshift = PAGE_SHIFT; 199 *pageshift = PAGE_SHIFT;
200#endif 200#endif
201 if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0) 201 if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
202 return -EFAULT; 202 return -EFAULT;
203 *paddr = page_to_phys(page); 203 *paddr = page_to_phys(page);
204 put_page(page); 204 put_page(page);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d79d040..1aba2c74160e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
309 * much memory to the process. 309 * much memory to the process.
310 */ 310 */
311 down_read(&current->mm->mmap_sem); 311 down_read(&current->mm->mmap_sem);
312 ret = get_user_pages(address, 1, !is_write, 0, &page, NULL); 312 ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
313 &page, NULL);
313 up_read(&current->mm->mmap_sem); 314 up_read(&current->mm->mmap_sem);
314 if (ret < 0) 315 if (ret < 0)
315 break; 316 break;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe871d32..9013a585507e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
892 down_read(&current->mm->mmap_sem); 892 down_read(&current->mm->mmap_sem);
893 pinned = get_user_pages( 893 pinned = get_user_pages(
894 (unsigned long)xfer->loc_addr & PAGE_MASK, 894 (unsigned long)xfer->loc_addr & PAGE_MASK,
895 nr_pages, dir == DMA_FROM_DEVICE, 0, 895 nr_pages,
896 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
896 page_list, NULL); 897 page_list, NULL);
897 up_read(&current->mm->mmap_sem); 898 up_read(&current->mm->mmap_sem);
898 899
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..618422ea3a41 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4922 res = get_user_pages_unlocked( 4922 res = get_user_pages_unlocked(
4923 uaddr, 4923 uaddr,
4924 nr_pages, 4924 nr_pages,
4925 rw == READ, 4925 pages,
4926 0, /* don't force */ 4926 rw == READ ? FOLL_WRITE : 0); /* don't force */
4927 pages);
4928 4927
4929 /* Errors and no page mapped should return here */ 4928 /* Errors and no page mapped should return here */
4930 if (res < nr_pages) 4929 if (res < nr_pages)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040fdf9a7..1091b9f1dd07 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
423 actual_pages = get_user_pages(task, task->mm, 423 actual_pages = get_user_pages(task, task->mm,
424 (unsigned long)buf & ~(PAGE_SIZE - 1), 424 (unsigned long)buf & ~(PAGE_SIZE - 1),
425 num_pages, 425 num_pages,
426 (type == PAGELIST_READ) /*Write */ , 426 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
427 0 /*Force */ ,
428 pages, 427 pages,
429 NULL /*vmas */); 428 NULL /*vmas */);
430 up_read(&task->mm->mmap_sem); 429 up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e07471b..7b6cd4d80621 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1477 current->mm, /* mm */ 1477 current->mm, /* mm */
1478 (unsigned long)virt_addr, /* start */ 1478 (unsigned long)virt_addr, /* start */
1479 num_pages, /* len */ 1479 num_pages, /* len */
1480 0, /* write */ 1480 0, /* gup_flags */
1481 0, /* force */
1482 pages, /* pages (array of page pointers) */ 1481 pages, /* pages (array of page pointers) */
1483 NULL); /* vmas */ 1482 NULL); /* vmas */
1484 up_read(&current->mm->mmap_sem); 1483 up_read(&current->mm->mmap_sem);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca4411073..a2564ab91e62 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
686 if (!pages) 686 if (!pages)
687 return -ENOMEM; 687 return -ENOMEM;
688 688
689 ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE, 689 ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
690 0, pages); 690 FOLL_WRITE);
691 691
692 if (ret < nr_pages) { 692 if (ret < nr_pages) {
693 nr_pages = ret; 693 nr_pages = ret;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3a689b..150ce2abf6c8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
245 /* Get the physical addresses of the source buffer */ 245 /* Get the physical addresses of the source buffer */
246 down_read(&current->mm->mmap_sem); 246 down_read(&current->mm->mmap_sem);
247 num_pinned = get_user_pages(param.local_vaddr - lb_offset, 247 num_pinned = get_user_pages(param.local_vaddr - lb_offset,
248 num_pages, (param.source == -1) ? READ : WRITE, 248 num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
249 0, pages, NULL); 249 pages, NULL);
250 up_read(&current->mm->mmap_sem); 250 up_read(&current->mm->mmap_sem);
251 251
252 if (num_pinned != num_pages) { 252 if (num_pinned != num_pages) {
diff --git a/fs/exec.c b/fs/exec.c
index 6fcfb3f7b137..4e497b9ee71e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
191{ 191{
192 struct page *page; 192 struct page *page;
193 int ret; 193 int ret;
194 unsigned int gup_flags = FOLL_FORCE;
194 195
195#ifdef CONFIG_STACK_GROWSUP 196#ifdef CONFIG_STACK_GROWSUP
196 if (write) { 197 if (write) {
@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
199 return NULL; 200 return NULL;
200 } 201 }
201#endif 202#endif
203
204 if (write)
205 gup_flags |= FOLL_WRITE;
206
202 /* 207 /*
203 * We are doing an exec(). 'current' is the process 208 * We are doing an exec(). 'current' is the process
204 * doing the exec and bprm->mm is the new process's mm. 209 * doing the exec and bprm->mm is the new process's mm.
205 */ 210 */
206 ret = get_user_pages_remote(current, bprm->mm, pos, 1, write, 211 ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
207 1, &page, NULL); 212 &page, NULL);
208 if (ret <= 0) 213 if (ret <= 0)
209 return NULL; 214 return NULL;
210 215
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c2964d890c9a..8e654468ab67 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -252,7 +252,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
252 * Inherently racy -- command line shares address space 252 * Inherently racy -- command line shares address space
253 * with code and data. 253 * with code and data.
254 */ 254 */
255 rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); 255 rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
256 if (rv <= 0) 256 if (rv <= 0)
257 goto out_free_page; 257 goto out_free_page;
258 258
@@ -270,7 +270,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
270 int nr_read; 270 int nr_read;
271 271
272 _count = min3(count, len, PAGE_SIZE); 272 _count = min3(count, len, PAGE_SIZE);
273 nr_read = access_remote_vm(mm, p, page, _count, 0); 273 nr_read = access_remote_vm(mm, p, page, _count,
274 FOLL_FORCE);
274 if (nr_read < 0) 275 if (nr_read < 0)
275 rv = nr_read; 276 rv = nr_read;
276 if (nr_read <= 0) 277 if (nr_read <= 0)
@@ -305,7 +306,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
305 bool final; 306 bool final;
306 307
307 _count = min3(count, len, PAGE_SIZE); 308 _count = min3(count, len, PAGE_SIZE);
308 nr_read = access_remote_vm(mm, p, page, _count, 0); 309 nr_read = access_remote_vm(mm, p, page, _count,
310 FOLL_FORCE);
309 if (nr_read < 0) 311 if (nr_read < 0)
310 rv = nr_read; 312 rv = nr_read;
311 if (nr_read <= 0) 313 if (nr_read <= 0)
@@ -354,7 +356,8 @@ skip_argv:
354 bool final; 356 bool final;
355 357
356 _count = min3(count, len, PAGE_SIZE); 358 _count = min3(count, len, PAGE_SIZE);
357 nr_read = access_remote_vm(mm, p, page, _count, 0); 359 nr_read = access_remote_vm(mm, p, page, _count,
360 FOLL_FORCE);
358 if (nr_read < 0) 361 if (nr_read < 0)
359 rv = nr_read; 362 rv = nr_read;
360 if (nr_read <= 0) 363 if (nr_read <= 0)
@@ -832,6 +835,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
832 unsigned long addr = *ppos; 835 unsigned long addr = *ppos;
833 ssize_t copied; 836 ssize_t copied;
834 char *page; 837 char *page;
838 unsigned int flags = FOLL_FORCE;
835 839
836 if (!mm) 840 if (!mm)
837 return 0; 841 return 0;
@@ -844,6 +848,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
844 if (!atomic_inc_not_zero(&mm->mm_users)) 848 if (!atomic_inc_not_zero(&mm->mm_users))
845 goto free; 849 goto free;
846 850
851 if (write)
852 flags |= FOLL_WRITE;
853
847 while (count > 0) { 854 while (count > 0) {
848 int this_len = min_t(int, count, PAGE_SIZE); 855 int this_len = min_t(int, count, PAGE_SIZE);
849 856
@@ -852,7 +859,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
852 break; 859 break;
853 } 860 }
854 861
855 this_len = access_remote_vm(mm, addr, page, this_len, write); 862 this_len = access_remote_vm(mm, addr, page, this_len, flags);
856 if (!this_len) { 863 if (!this_len) {
857 if (!copied) 864 if (!copied)
858 copied = -EIO; 865 copied = -EIO;
@@ -965,7 +972,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
965 this_len = min(max_len, this_len); 972 this_len = min(max_len, this_len);
966 973
967 retval = access_remote_vm(mm, (env_start + src), 974 retval = access_remote_vm(mm, (env_start + src),
968 page, this_len, 0); 975 page, this_len, FOLL_FORCE);
969 976
970 if (retval <= 0) { 977 if (retval <= 0) {
971 ret = retval; 978 ret = retval;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6a51e9..ffbd72979ee7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1266,9 +1266,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
1266} 1266}
1267#endif 1267#endif
1268 1268
1269extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1269extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1270 unsigned int gup_flags);
1270extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1271extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1271 void *buf, int len, int write); 1272 void *buf, int len, unsigned int gup_flags);
1272 1273
1273long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1274long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1274 unsigned long start, unsigned long nr_pages, 1275 unsigned long start, unsigned long nr_pages,
@@ -1276,19 +1277,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1276 struct vm_area_struct **vmas, int *nonblocking); 1277 struct vm_area_struct **vmas, int *nonblocking);
1277long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1278long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1278 unsigned long start, unsigned long nr_pages, 1279 unsigned long start, unsigned long nr_pages,
1279 int write, int force, struct page **pages, 1280 unsigned int gup_flags, struct page **pages,
1280 struct vm_area_struct **vmas); 1281 struct vm_area_struct **vmas);
1281long get_user_pages(unsigned long start, unsigned long nr_pages, 1282long get_user_pages(unsigned long start, unsigned long nr_pages,
1282 int write, int force, struct page **pages, 1283 unsigned int gup_flags, struct page **pages,
1283 struct vm_area_struct **vmas); 1284 struct vm_area_struct **vmas);
1284long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1285long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1285 int write, int force, struct page **pages, int *locked); 1286 unsigned int gup_flags, struct page **pages, int *locked);
1286long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1287long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1287 unsigned long start, unsigned long nr_pages, 1288 unsigned long start, unsigned long nr_pages,
1288 int write, int force, struct page **pages, 1289 struct page **pages, unsigned int gup_flags);
1289 unsigned int gup_flags);
1290long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1290long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1291 int write, int force, struct page **pages); 1291 struct page **pages, unsigned int gup_flags);
1292int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1292int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1293 struct page **pages); 1293 struct page **pages);
1294 1294
@@ -1306,7 +1306,7 @@ struct frame_vector {
1306struct frame_vector *frame_vector_create(unsigned int nr_frames); 1306struct frame_vector *frame_vector_create(unsigned int nr_frames);
1307void frame_vector_destroy(struct frame_vector *vec); 1307void frame_vector_destroy(struct frame_vector *vec);
1308int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1308int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1309 bool write, bool force, struct frame_vector *vec); 1309 unsigned int gup_flags, struct frame_vector *vec);
1310void put_vaddr_frames(struct frame_vector *vec); 1310void put_vaddr_frames(struct frame_vector *vec);
1311int frame_vector_to_pages(struct frame_vector *vec); 1311int frame_vector_to_pages(struct frame_vector *vec);
1312void frame_vector_to_pfns(struct frame_vector *vec); 1312void frame_vector_to_pfns(struct frame_vector *vec);
@@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
2232#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2232#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
2233#define FOLL_MLOCK 0x1000 /* lock present pages */ 2233#define FOLL_MLOCK 0x1000 /* lock present pages */
2234#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2234#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2235#define FOLL_COW 0x4000 /* internal GUP flag */
2235 2236
2236typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2237typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2237 void *data); 2238 void *data);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d4129bb05e5d..f9ec9add2164 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
300 300
301retry: 301retry:
302 /* Read the page with vaddr into memory */ 302 /* Read the page with vaddr into memory */
303 ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); 303 ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
304 &vma);
304 if (ret <= 0) 305 if (ret <= 0)
305 return ret; 306 return ret;
306 307
@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1710 * but we treat this as a 'remote' access since it is 1711 * but we treat this as a 'remote' access since it is
1711 * essentially a kernel access to the memory. 1712 * essentially a kernel access to the memory.
1712 */ 1713 */
1713 result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); 1714 result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
1715 NULL);
1714 if (result < 0) 1716 if (result < 0)
1715 return result; 1717 return result;
1716 1718
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2a99027312a6..e6474f7272ec 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
537 int this_len, retval; 537 int this_len, retval;
538 538
539 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 539 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
540 retval = access_process_vm(tsk, src, buf, this_len, 0); 540 retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
541 if (!retval) { 541 if (!retval) {
542 if (copied) 542 if (copied)
543 break; 543 break;
@@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
564 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 564 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
565 if (copy_from_user(buf, src, this_len)) 565 if (copy_from_user(buf, src, this_len))
566 return -EFAULT; 566 return -EFAULT;
567 retval = access_process_vm(tsk, dst, buf, this_len, 1); 567 retval = access_process_vm(tsk, dst, buf, this_len,
568 FOLL_FORCE | FOLL_WRITE);
568 if (!retval) { 569 if (!retval) {
569 if (copied) 570 if (copied)
570 break; 571 break;
@@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1127 unsigned long tmp; 1128 unsigned long tmp;
1128 int copied; 1129 int copied;
1129 1130
1130 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 1131 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1131 if (copied != sizeof(tmp)) 1132 if (copied != sizeof(tmp))
1132 return -EIO; 1133 return -EIO;
1133 return put_user(tmp, (unsigned long __user *)data); 1134 return put_user(tmp, (unsigned long __user *)data);
@@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1138{ 1139{
1139 int copied; 1140 int copied;
1140 1141
1141 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 1142 copied = access_process_vm(tsk, addr, &data, sizeof(data),
1143 FOLL_FORCE | FOLL_WRITE);
1142 return (copied == sizeof(data)) ? 0 : -EIO; 1144 return (copied == sizeof(data)) ? 0 : -EIO;
1143} 1145}
1144 1146
@@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1155 switch (request) { 1157 switch (request) {
1156 case PTRACE_PEEKTEXT: 1158 case PTRACE_PEEKTEXT:
1157 case PTRACE_PEEKDATA: 1159 case PTRACE_PEEKDATA:
1158 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 1160 ret = access_process_vm(child, addr, &word, sizeof(word),
1161 FOLL_FORCE);
1159 if (ret != sizeof(word)) 1162 if (ret != sizeof(word))
1160 ret = -EIO; 1163 ret = -EIO;
1161 else 1164 else
@@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1164 1167
1165 case PTRACE_POKETEXT: 1168 case PTRACE_POKETEXT:
1166 case PTRACE_POKEDATA: 1169 case PTRACE_POKEDATA:
1167 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 1170 ret = access_process_vm(child, addr, &data, sizeof(data),
1171 FOLL_FORCE | FOLL_WRITE);
1168 ret = (ret != sizeof(data) ? -EIO : 0); 1172 ret = (ret != sizeof(data) ? -EIO : 0);
1169 break; 1173 break;
1170 1174
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 381bb07ed14f..db77dcb38afd 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -11,10 +11,7 @@
11 * get_vaddr_frames() - map virtual addresses to pfns 11 * get_vaddr_frames() - map virtual addresses to pfns
12 * @start: starting user address 12 * @start: starting user address
13 * @nr_frames: number of pages / pfns from start to map 13 * @nr_frames: number of pages / pfns from start to map
14 * @write: whether pages will be written to by the caller 14 * @gup_flags: flags modifying lookup behaviour
15 * @force: whether to force write access even if user mapping is
16 * readonly. See description of the same argument of
17 get_user_pages().
18 * @vec: structure which receives pages / pfns of the addresses mapped. 15 * @vec: structure which receives pages / pfns of the addresses mapped.
19 * It should have space for at least nr_frames entries. 16 * It should have space for at least nr_frames entries.
20 * 17 *
@@ -34,7 +31,7 @@
34 * This function takes care of grabbing mmap_sem as necessary. 31 * This function takes care of grabbing mmap_sem as necessary.
35 */ 32 */
36int get_vaddr_frames(unsigned long start, unsigned int nr_frames, 33int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
37 bool write, bool force, struct frame_vector *vec) 34 unsigned int gup_flags, struct frame_vector *vec)
38{ 35{
39 struct mm_struct *mm = current->mm; 36 struct mm_struct *mm = current->mm;
40 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
@@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
59 vec->got_ref = true; 56 vec->got_ref = true;
60 vec->is_pfns = false; 57 vec->is_pfns = false;
61 ret = get_user_pages_locked(start, nr_frames, 58 ret = get_user_pages_locked(start, nr_frames,
62 write, force, (struct page **)(vec->ptrs), &locked); 59 gup_flags, (struct page **)(vec->ptrs), &locked);
63 goto out; 60 goto out;
64 } 61 }
65 62
diff --git a/mm/gup.c b/mm/gup.c
index 96b2b2fd0fbd..7aa113c2d373 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
60 return -EEXIST; 60 return -EEXIST;
61} 61}
62 62
63/*
64 * FOLL_FORCE can write to even unwritable pte's, but only
65 * after we've gone through a COW cycle and they are dirty.
66 */
67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68{
69 return pte_write(pte) ||
70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71}
72
63static struct page *follow_page_pte(struct vm_area_struct *vma, 73static struct page *follow_page_pte(struct vm_area_struct *vma,
64 unsigned long address, pmd_t *pmd, unsigned int flags) 74 unsigned long address, pmd_t *pmd, unsigned int flags)
65{ 75{
@@ -95,7 +105,7 @@ retry:
95 } 105 }
96 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 106 if ((flags & FOLL_NUMA) && pte_protnone(pte))
97 goto no_page; 107 goto no_page;
98 if ((flags & FOLL_WRITE) && !pte_write(pte)) { 108 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
99 pte_unmap_unlock(ptep, ptl); 109 pte_unmap_unlock(ptep, ptl);
100 return NULL; 110 return NULL;
101 } 111 }
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
412 * reCOWed by userspace write). 422 * reCOWed by userspace write).
413 */ 423 */
414 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 424 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
415 *flags &= ~FOLL_WRITE; 425 *flags |= FOLL_COW;
416 return 0; 426 return 0;
417} 427}
418 428
@@ -729,7 +739,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
729 struct mm_struct *mm, 739 struct mm_struct *mm,
730 unsigned long start, 740 unsigned long start,
731 unsigned long nr_pages, 741 unsigned long nr_pages,
732 int write, int force,
733 struct page **pages, 742 struct page **pages,
734 struct vm_area_struct **vmas, 743 struct vm_area_struct **vmas,
735 int *locked, bool notify_drop, 744 int *locked, bool notify_drop,
@@ -747,10 +756,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
747 756
748 if (pages) 757 if (pages)
749 flags |= FOLL_GET; 758 flags |= FOLL_GET;
750 if (write)
751 flags |= FOLL_WRITE;
752 if (force)
753 flags |= FOLL_FORCE;
754 759
755 pages_done = 0; 760 pages_done = 0;
756 lock_dropped = false; 761 lock_dropped = false;
@@ -843,12 +848,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
843 * up_read(&mm->mmap_sem); 848 * up_read(&mm->mmap_sem);
844 */ 849 */
845long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 850long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
846 int write, int force, struct page **pages, 851 unsigned int gup_flags, struct page **pages,
847 int *locked) 852 int *locked)
848{ 853{
849 return __get_user_pages_locked(current, current->mm, start, nr_pages, 854 return __get_user_pages_locked(current, current->mm, start, nr_pages,
850 write, force, pages, NULL, locked, true, 855 pages, NULL, locked, true,
851 FOLL_TOUCH); 856 gup_flags | FOLL_TOUCH);
852} 857}
853EXPORT_SYMBOL(get_user_pages_locked); 858EXPORT_SYMBOL(get_user_pages_locked);
854 859
@@ -864,14 +869,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
864 */ 869 */
865__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 870__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
866 unsigned long start, unsigned long nr_pages, 871 unsigned long start, unsigned long nr_pages,
867 int write, int force, struct page **pages, 872 struct page **pages, unsigned int gup_flags)
868 unsigned int gup_flags)
869{ 873{
870 long ret; 874 long ret;
871 int locked = 1; 875 int locked = 1;
876
872 down_read(&mm->mmap_sem); 877 down_read(&mm->mmap_sem);
873 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 878 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
874 pages, NULL, &locked, false, gup_flags); 879 &locked, false, gup_flags);
875 if (locked) 880 if (locked)
876 up_read(&mm->mmap_sem); 881 up_read(&mm->mmap_sem);
877 return ret; 882 return ret;
@@ -896,10 +901,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
896 * "force" parameter). 901 * "force" parameter).
897 */ 902 */
898long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 903long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
899 int write, int force, struct page **pages) 904 struct page **pages, unsigned int gup_flags)
900{ 905{
901 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 906 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
902 write, force, pages, FOLL_TOUCH); 907 pages, gup_flags | FOLL_TOUCH);
903} 908}
904EXPORT_SYMBOL(get_user_pages_unlocked); 909EXPORT_SYMBOL(get_user_pages_unlocked);
905 910
@@ -910,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
910 * @mm: mm_struct of target mm 915 * @mm: mm_struct of target mm
911 * @start: starting user address 916 * @start: starting user address
912 * @nr_pages: number of pages from start to pin 917 * @nr_pages: number of pages from start to pin
913 * @write: whether pages will be written to by the caller 918 * @gup_flags: flags modifying lookup behaviour
914 * @force: whether to force access even when user mapping is currently
915 * protected (but never forces write access to shared mapping).
916 * @pages: array that receives pointers to the pages pinned. 919 * @pages: array that receives pointers to the pages pinned.
917 * Should be at least nr_pages long. Or NULL, if caller 920 * Should be at least nr_pages long. Or NULL, if caller
918 * only intends to ensure the pages are faulted in. 921 * only intends to ensure the pages are faulted in.
@@ -941,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
941 * or similar operation cannot guarantee anything stronger anyway because 944 * or similar operation cannot guarantee anything stronger anyway because
942 * locks can't be held over the syscall boundary. 945 * locks can't be held over the syscall boundary.
943 * 946 *
944 * If write=0, the page must not be written to. If the page is written to, 947 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
945 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called 948 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
946 * after the page is finished with, and before put_page is called. 949 * be called after the page is finished with, and before put_page is called.
947 * 950 *
948 * get_user_pages is typically used for fewer-copy IO operations, to get a 951 * get_user_pages is typically used for fewer-copy IO operations, to get a
949 * handle on the memory by some means other than accesses via the user virtual 952 * handle on the memory by some means other than accesses via the user virtual
@@ -960,12 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
960 */ 963 */
961long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 964long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
962 unsigned long start, unsigned long nr_pages, 965 unsigned long start, unsigned long nr_pages,
963 int write, int force, struct page **pages, 966 unsigned int gup_flags, struct page **pages,
964 struct vm_area_struct **vmas) 967 struct vm_area_struct **vmas)
965{ 968{
966 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 969 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
967 pages, vmas, NULL, false, 970 NULL, false,
968 FOLL_TOUCH | FOLL_REMOTE); 971 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
969} 972}
970EXPORT_SYMBOL(get_user_pages_remote); 973EXPORT_SYMBOL(get_user_pages_remote);
971 974
@@ -976,12 +979,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
976 * obviously don't pass FOLL_REMOTE in here. 979 * obviously don't pass FOLL_REMOTE in here.
977 */ 980 */
978long get_user_pages(unsigned long start, unsigned long nr_pages, 981long get_user_pages(unsigned long start, unsigned long nr_pages,
979 int write, int force, struct page **pages, 982 unsigned int gup_flags, struct page **pages,
980 struct vm_area_struct **vmas) 983 struct vm_area_struct **vmas)
981{ 984{
982 return __get_user_pages_locked(current, current->mm, start, nr_pages, 985 return __get_user_pages_locked(current, current->mm, start, nr_pages,
983 write, force, pages, vmas, NULL, false, 986 pages, vmas, NULL, false,
984 FOLL_TOUCH); 987 gup_flags | FOLL_TOUCH);
985} 988}
986EXPORT_SYMBOL(get_user_pages); 989EXPORT_SYMBOL(get_user_pages);
987 990
@@ -1505,7 +1508,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1505 start += nr << PAGE_SHIFT; 1508 start += nr << PAGE_SHIFT;
1506 pages += nr; 1509 pages += nr;
1507 1510
1508 ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); 1511 ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
1512 write ? FOLL_WRITE : 0);
1509 1513
1510 /* Have to be a bit careful with return values */ 1514 /* Have to be a bit careful with return values */
1511 if (nr > 0) { 1515 if (nr > 0) {
diff --git a/mm/memory.c b/mm/memory.c
index fc1987dfd8cc..e18c57bdc75c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
3869 * given task for page fault accounting. 3869 * given task for page fault accounting.
3870 */ 3870 */
3871static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 3871static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3872 unsigned long addr, void *buf, int len, int write) 3872 unsigned long addr, void *buf, int len, unsigned int gup_flags)
3873{ 3873{
3874 struct vm_area_struct *vma; 3874 struct vm_area_struct *vma;
3875 void *old_buf = buf; 3875 void *old_buf = buf;
3876 int write = gup_flags & FOLL_WRITE;
3876 3877
3877 down_read(&mm->mmap_sem); 3878 down_read(&mm->mmap_sem);
3878 /* ignore errors, just check how much was successfully transferred */ 3879 /* ignore errors, just check how much was successfully transferred */
@@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3882 struct page *page = NULL; 3883 struct page *page = NULL;
3883 3884
3884 ret = get_user_pages_remote(tsk, mm, addr, 1, 3885 ret = get_user_pages_remote(tsk, mm, addr, 1,
3885 write, 1, &page, &vma); 3886 gup_flags, &page, &vma);
3886 if (ret <= 0) { 3887 if (ret <= 0) {
3887#ifndef CONFIG_HAVE_IOREMAP_PROT 3888#ifndef CONFIG_HAVE_IOREMAP_PROT
3888 break; 3889 break;
@@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3934 * @addr: start address to access 3935 * @addr: start address to access
3935 * @buf: source or destination buffer 3936 * @buf: source or destination buffer
3936 * @len: number of bytes to transfer 3937 * @len: number of bytes to transfer
3937 * @write: whether the access is a write 3938 * @gup_flags: flags modifying lookup behaviour
3938 * 3939 *
3939 * The caller must hold a reference on @mm. 3940 * The caller must hold a reference on @mm.
3940 */ 3941 */
3941int access_remote_vm(struct mm_struct *mm, unsigned long addr, 3942int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3942 void *buf, int len, int write) 3943 void *buf, int len, unsigned int gup_flags)
3943{ 3944{
3944 return __access_remote_vm(NULL, mm, addr, buf, len, write); 3945 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
3945} 3946}
3946 3947
3947/* 3948/*
@@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3950 * Do not walk the page table directly, use get_user_pages 3951 * Do not walk the page table directly, use get_user_pages
3951 */ 3952 */
3952int access_process_vm(struct task_struct *tsk, unsigned long addr, 3953int access_process_vm(struct task_struct *tsk, unsigned long addr,
3953 void *buf, int len, int write) 3954 void *buf, int len, unsigned int gup_flags)
3954{ 3955{
3955 struct mm_struct *mm; 3956 struct mm_struct *mm;
3956 int ret; 3957 int ret;
@@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
3959 if (!mm) 3960 if (!mm)
3960 return 0; 3961 return 0;
3961 3962
3962 ret = __access_remote_vm(tsk, mm, addr, buf, len, write); 3963 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
3964
3963 mmput(mm); 3965 mmput(mm);
3964 3966
3965 return ret; 3967 return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ad1c96ac313c..0b859af06b87 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
850 struct page *p; 850 struct page *p;
851 int err; 851 int err;
852 852
853 err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); 853 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
854 if (err >= 0) { 854 if (err >= 0) {
855 err = page_to_nid(p); 855 err = page_to_nid(p);
856 put_page(p); 856 put_page(p);
diff --git a/mm/nommu.c b/mm/nommu.c
index 95daf81a4855..db5fd1795298 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -160,33 +160,25 @@ finish_or_fault:
160 * - don't permit access to VMAs that don't support it, such as I/O mappings 160 * - don't permit access to VMAs that don't support it, such as I/O mappings
161 */ 161 */
162long get_user_pages(unsigned long start, unsigned long nr_pages, 162long get_user_pages(unsigned long start, unsigned long nr_pages,
163 int write, int force, struct page **pages, 163 unsigned int gup_flags, struct page **pages,
164 struct vm_area_struct **vmas) 164 struct vm_area_struct **vmas)
165{ 165{
166 int flags = 0; 166 return __get_user_pages(current, current->mm, start, nr_pages,
167 167 gup_flags, pages, vmas, NULL);
168 if (write)
169 flags |= FOLL_WRITE;
170 if (force)
171 flags |= FOLL_FORCE;
172
173 return __get_user_pages(current, current->mm, start, nr_pages, flags,
174 pages, vmas, NULL);
175} 168}
176EXPORT_SYMBOL(get_user_pages); 169EXPORT_SYMBOL(get_user_pages);
177 170
178long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 171long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
179 int write, int force, struct page **pages, 172 unsigned int gup_flags, struct page **pages,
180 int *locked) 173 int *locked)
181{ 174{
182 return get_user_pages(start, nr_pages, write, force, pages, NULL); 175 return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
183} 176}
184EXPORT_SYMBOL(get_user_pages_locked); 177EXPORT_SYMBOL(get_user_pages_locked);
185 178
186long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 179long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
187 unsigned long start, unsigned long nr_pages, 180 unsigned long start, unsigned long nr_pages,
188 int write, int force, struct page **pages, 181 struct page **pages, unsigned int gup_flags)
189 unsigned int gup_flags)
190{ 182{
191 long ret; 183 long ret;
192 down_read(&mm->mmap_sem); 184 down_read(&mm->mmap_sem);
@@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
198EXPORT_SYMBOL(__get_user_pages_unlocked); 190EXPORT_SYMBOL(__get_user_pages_unlocked);
199 191
200long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 192long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
201 int write, int force, struct page **pages) 193 struct page **pages, unsigned int gup_flags)
202{ 194{
203 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 195 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
204 write, force, pages, 0); 196 pages, gup_flags);
205} 197}
206EXPORT_SYMBOL(get_user_pages_unlocked); 198EXPORT_SYMBOL(get_user_pages_unlocked);
207 199
@@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe,
1817EXPORT_SYMBOL(filemap_map_pages); 1809EXPORT_SYMBOL(filemap_map_pages);
1818 1810
1819static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1811static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1820 unsigned long addr, void *buf, int len, int write) 1812 unsigned long addr, void *buf, int len, unsigned int gup_flags)
1821{ 1813{
1822 struct vm_area_struct *vma; 1814 struct vm_area_struct *vma;
1815 int write = gup_flags & FOLL_WRITE;
1823 1816
1824 down_read(&mm->mmap_sem); 1817 down_read(&mm->mmap_sem);
1825 1818
@@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1854 * @addr: start address to access 1847 * @addr: start address to access
1855 * @buf: source or destination buffer 1848 * @buf: source or destination buffer
1856 * @len: number of bytes to transfer 1849 * @len: number of bytes to transfer
1857 * @write: whether the access is a write 1850 * @gup_flags: flags modifying lookup behaviour
1858 * 1851 *
1859 * The caller must hold a reference on @mm. 1852 * The caller must hold a reference on @mm.
1860 */ 1853 */
1861int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1854int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1862 void *buf, int len, int write) 1855 void *buf, int len, unsigned int gup_flags)
1863{ 1856{
1864 return __access_remote_vm(NULL, mm, addr, buf, len, write); 1857 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
1865} 1858}
1866 1859
1867/* 1860/*
1868 * Access another process' address space. 1861 * Access another process' address space.
1869 * - source/target buffer must be kernel space 1862 * - source/target buffer must be kernel space
1870 */ 1863 */
1871int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 1864int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1865 unsigned int gup_flags)
1872{ 1866{
1873 struct mm_struct *mm; 1867 struct mm_struct *mm;
1874 1868
@@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1879 if (!mm) 1873 if (!mm)
1880 return 0; 1874 return 0;
1881 1875
1882 len = __access_remote_vm(tsk, mm, addr, buf, len, write); 1876 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
1883 1877
1884 mmput(mm); 1878 mmput(mm);
1885 return len; 1879 return len;
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 07514d41ebcc..be8dc8d1edb9 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
88 ssize_t rc = 0; 88 ssize_t rc = 0;
89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 / sizeof(struct pages *); 90 / sizeof(struct pages *);
91 unsigned int flags = FOLL_REMOTE;
91 92
92 /* Work out address and page range required */ 93 /* Work out address and page range required */
93 if (len == 0) 94 if (len == 0)
94 return 0; 95 return 0;
95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 96 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
96 97
98 if (vm_write)
99 flags |= FOLL_WRITE;
100
97 while (!rc && nr_pages && iov_iter_count(iter)) { 101 while (!rc && nr_pages && iov_iter_count(iter)) {
98 int pages = min(nr_pages, max_pages_per_loop); 102 int pages = min(nr_pages, max_pages_per_loop);
99 size_t bytes; 103 size_t bytes;
@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
104 * current/current->mm 108 * current/current->mm
105 */ 109 */
106 pages = __get_user_pages_unlocked(task, mm, pa, pages, 110 pages = __get_user_pages_unlocked(task, mm, pa, pages,
107 vm_write, 0, process_pages, 111 process_pages, flags);
108 FOLL_REMOTE);
109 if (pages <= 0) 112 if (pages <= 0)
110 return -EFAULT; 113 return -EFAULT;
111 114
diff --git a/mm/util.c b/mm/util.c
index 662cddf914af..952cbe7ad7b7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -283,7 +283,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
283int __weak get_user_pages_fast(unsigned long start, 283int __weak get_user_pages_fast(unsigned long start,
284 int nr_pages, int write, struct page **pages) 284 int nr_pages, int write, struct page **pages)
285{ 285{
286 return get_user_pages_unlocked(start, nr_pages, write, 0, pages); 286 return get_user_pages_unlocked(start, nr_pages, pages,
287 write ? FOLL_WRITE : 0);
287} 288}
288EXPORT_SYMBOL_GPL(get_user_pages_fast); 289EXPORT_SYMBOL_GPL(get_user_pages_fast);
289 290
@@ -623,7 +624,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
623 if (len > buflen) 624 if (len > buflen)
624 len = buflen; 625 len = buflen;
625 626
626 res = access_process_vm(task, arg_start, buffer, len, 0); 627 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
627 628
628 /* 629 /*
629 * If the nul at the end of args has been overwritten, then 630 * If the nul at the end of args has been overwritten, then
@@ -638,7 +639,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
638 if (len > buflen - res) 639 if (len > buflen - res)
639 len = buflen - res; 640 len = buflen - res;
640 res += access_process_vm(task, env_start, 641 res += access_process_vm(task, env_start,
641 buffer+res, len, 0); 642 buffer+res, len,
643 FOLL_FORCE);
642 res = strnlen(buffer, res); 644 res = strnlen(buffer, res);
643 } 645 }
644 } 646 }
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 00d2601407c5..1a7c9a79a53c 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
26 while (got < num_pages) { 26 while (got < num_pages) {
27 rc = get_user_pages_unlocked( 27 rc = get_user_pages_unlocked(
28 (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 28 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
29 num_pages - got, write_page, 0, pages + got); 29 num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
30 if (rc < 0) 30 if (rc < 0)
31 break; 31 break;
32 BUG_ON(rc == 0); 32 BUG_ON(rc == 0);
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index ade7c6cad172..682b73af7766 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
881 * the execve(). 881 * the execve().
882 */ 882 */
883 if (get_user_pages_remote(current, bprm->mm, pos, 1, 883 if (get_user_pages_remote(current, bprm->mm, pos, 1,
884 0, 1, &page, NULL) <= 0) 884 FOLL_FORCE, &page, NULL) <= 0)
885 return false; 885 return false;
886#else 886#else
887 page = bprm->page[pos / PAGE_SIZE]; 887 page = bprm->page[pos / PAGE_SIZE];
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index db9668869f6f..8035cc1eb955 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
84 * mm and might be done in another context, so we must 84 * mm and might be done in another context, so we must
85 * use FOLL_REMOTE. 85 * use FOLL_REMOTE.
86 */ 86 */
87 __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); 87 __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
88 FOLL_WRITE | FOLL_REMOTE);
88 89
89 kvm_async_page_present_sync(vcpu, apf); 90 kvm_async_page_present_sync(vcpu, apf);
90 91
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81dfc73d3df3..28510e72618a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1416 down_read(&current->mm->mmap_sem); 1416 down_read(&current->mm->mmap_sem);
1417 npages = get_user_page_nowait(addr, write_fault, page); 1417 npages = get_user_page_nowait(addr, write_fault, page);
1418 up_read(&current->mm->mmap_sem); 1418 up_read(&current->mm->mmap_sem);
1419 } else 1419 } else {
1420 unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
1421
1422 if (write_fault)
1423 flags |= FOLL_WRITE;
1424
1420 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1425 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
1421 write_fault, 0, page, 1426 page, flags);
1422 FOLL_TOUCH|FOLL_HWPOISON); 1427 }
1423 if (npages != 1) 1428 if (npages != 1)
1424 return npages; 1429 return npages;
1425 1430