diff options
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/core/umem_odp.c | 7 | ||||
-rw-r--r-- | fs/exec.c | 9 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 6 | ||||
-rw-r--r-- | mm/gup.c | 22 | ||||
-rw-r--r-- | mm/memory.c | 6 | ||||
-rw-r--r-- | security/tomoyo/domain.c | 2 |
9 files changed, 40 insertions, 27 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5ce3603e6eac..0370b842d9cc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages( | |||
748 | int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; | 748 | int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; |
749 | struct page **pvec; | 749 | struct page **pvec; |
750 | uintptr_t ptr; | 750 | uintptr_t ptr; |
751 | unsigned int flags = 0; | ||
751 | 752 | ||
752 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); | 753 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); |
753 | if (!pvec) | 754 | if (!pvec) |
754 | return ERR_PTR(-ENOMEM); | 755 | return ERR_PTR(-ENOMEM); |
755 | 756 | ||
757 | if (!etnaviv_obj->userptr.ro) | ||
758 | flags |= FOLL_WRITE; | ||
759 | |||
756 | pinned = 0; | 760 | pinned = 0; |
757 | ptr = etnaviv_obj->userptr.ptr; | 761 | ptr = etnaviv_obj->userptr.ptr; |
758 | 762 | ||
759 | down_read(&mm->mmap_sem); | 763 | down_read(&mm->mmap_sem); |
760 | while (pinned < npages) { | 764 | while (pinned < npages) { |
761 | ret = get_user_pages_remote(task, mm, ptr, npages - pinned, | 765 | ret = get_user_pages_remote(task, mm, ptr, npages - pinned, |
762 | !etnaviv_obj->userptr.ro, 0, | 766 | flags, pvec + pinned, NULL); |
763 | pvec + pinned, NULL); | ||
764 | if (ret < 0) | 767 | if (ret < 0) |
765 | break; | 768 | break; |
766 | 769 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index e537930c64b5..c6f780f5abc9 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
508 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); | 508 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); |
509 | if (pvec != NULL) { | 509 | if (pvec != NULL) { |
510 | struct mm_struct *mm = obj->userptr.mm->mm; | 510 | struct mm_struct *mm = obj->userptr.mm->mm; |
511 | unsigned int flags = 0; | ||
512 | |||
513 | if (!obj->userptr.read_only) | ||
514 | flags |= FOLL_WRITE; | ||
511 | 515 | ||
512 | ret = -EFAULT; | 516 | ret = -EFAULT; |
513 | if (atomic_inc_not_zero(&mm->mm_users)) { | 517 | if (atomic_inc_not_zero(&mm->mm_users)) { |
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
517 | (work->task, mm, | 521 | (work->task, mm, |
518 | obj->userptr.ptr + pinned * PAGE_SIZE, | 522 | obj->userptr.ptr + pinned * PAGE_SIZE, |
519 | npages - pinned, | 523 | npages - pinned, |
520 | !obj->userptr.read_only, 0, | 524 | flags, |
521 | pvec + pinned, NULL); | 525 | pvec + pinned, NULL); |
522 | if (ret < 0) | 526 | if (ret < 0) |
523 | break; | 527 | break; |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 75077a018675..1f0fe3217f23 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
527 | u64 off; | 527 | u64 off; |
528 | int j, k, ret = 0, start_idx, npages = 0; | 528 | int j, k, ret = 0, start_idx, npages = 0; |
529 | u64 base_virt_addr; | 529 | u64 base_virt_addr; |
530 | unsigned int flags = 0; | ||
530 | 531 | ||
531 | if (access_mask == 0) | 532 | if (access_mask == 0) |
532 | return -EINVAL; | 533 | return -EINVAL; |
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
556 | goto out_put_task; | 557 | goto out_put_task; |
557 | } | 558 | } |
558 | 559 | ||
560 | if (access_mask & ODP_WRITE_ALLOWED_BIT) | ||
561 | flags |= FOLL_WRITE; | ||
562 | |||
559 | start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; | 563 | start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; |
560 | k = start_idx; | 564 | k = start_idx; |
561 | 565 | ||
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
574 | */ | 578 | */ |
575 | npages = get_user_pages_remote(owning_process, owning_mm, | 579 | npages = get_user_pages_remote(owning_process, owning_mm, |
576 | user_virt, gup_num_pages, | 580 | user_virt, gup_num_pages, |
577 | access_mask & ODP_WRITE_ALLOWED_BIT, | 581 | flags, local_page_list, NULL); |
578 | 0, local_page_list, NULL); | ||
579 | up_read(&owning_mm->mmap_sem); | 582 | up_read(&owning_mm->mmap_sem); |
580 | 583 | ||
581 | if (npages < 0) | 584 | if (npages < 0) |
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
191 | { | 191 | { |
192 | struct page *page; | 192 | struct page *page; |
193 | int ret; | 193 | int ret; |
194 | unsigned int gup_flags = FOLL_FORCE; | ||
194 | 195 | ||
195 | #ifdef CONFIG_STACK_GROWSUP | 196 | #ifdef CONFIG_STACK_GROWSUP |
196 | if (write) { | 197 | if (write) { |
@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
199 | return NULL; | 200 | return NULL; |
200 | } | 201 | } |
201 | #endif | 202 | #endif |
203 | |||
204 | if (write) | ||
205 | gup_flags |= FOLL_WRITE; | ||
206 | |||
202 | /* | 207 | /* |
203 | * We are doing an exec(). 'current' is the process | 208 | * We are doing an exec(). 'current' is the process |
204 | * doing the exec and bprm->mm is the new process's mm. | 209 | * doing the exec and bprm->mm is the new process's mm. |
205 | */ | 210 | */ |
206 | ret = get_user_pages_remote(current, bprm->mm, pos, 1, write, | 211 | ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, |
207 | 1, &page, NULL); | 212 | &page, NULL); |
208 | if (ret <= 0) | 213 | if (ret <= 0) |
209 | return NULL; | 214 | return NULL; |
210 | 215 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 30bb5d9631bb..ecc4be7b67e0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1276,7 +1276,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1276 | struct vm_area_struct **vmas, int *nonblocking); | 1276 | struct vm_area_struct **vmas, int *nonblocking); |
1277 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 1277 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
1278 | unsigned long start, unsigned long nr_pages, | 1278 | unsigned long start, unsigned long nr_pages, |
1279 | int write, int force, struct page **pages, | 1279 | unsigned int gup_flags, struct page **pages, |
1280 | struct vm_area_struct **vmas); | 1280 | struct vm_area_struct **vmas); |
1281 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 1281 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
1282 | unsigned int gup_flags, struct page **pages, | 1282 | unsigned int gup_flags, struct page **pages, |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d4129bb05e5d..f9ec9add2164 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, | |||
300 | 300 | ||
301 | retry: | 301 | retry: |
302 | /* Read the page with vaddr into memory */ | 302 | /* Read the page with vaddr into memory */ |
303 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); | 303 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, |
304 | &vma); | ||
304 | if (ret <= 0) | 305 | if (ret <= 0) |
305 | return ret; | 306 | return ret; |
306 | 307 | ||
@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) | |||
1710 | * but we treat this as a 'remote' access since it is | 1711 | * but we treat this as a 'remote' access since it is |
1711 | * essentially a kernel access to the memory. | 1712 | * essentially a kernel access to the memory. |
1712 | */ | 1713 | */ |
1713 | result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); | 1714 | result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, |
1715 | NULL); | ||
1714 | if (result < 0) | 1716 | if (result < 0) |
1715 | return result; | 1717 | return result; |
1716 | 1718 | ||
@@ -915,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); | |||
915 | * @mm: mm_struct of target mm | 915 | * @mm: mm_struct of target mm |
916 | * @start: starting user address | 916 | * @start: starting user address |
917 | * @nr_pages: number of pages from start to pin | 917 | * @nr_pages: number of pages from start to pin |
918 | * @write: whether pages will be written to by the caller | 918 | * @gup_flags: flags modifying lookup behaviour |
919 | * @force: whether to force access even when user mapping is currently | ||
920 | * protected (but never forces write access to shared mapping). | ||
921 | * @pages: array that receives pointers to the pages pinned. | 919 | * @pages: array that receives pointers to the pages pinned. |
922 | * Should be at least nr_pages long. Or NULL, if caller | 920 | * Should be at least nr_pages long. Or NULL, if caller |
923 | * only intends to ensure the pages are faulted in. | 921 | * only intends to ensure the pages are faulted in. |
@@ -946,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked); | |||
946 | * or similar operation cannot guarantee anything stronger anyway because | 944 | * or similar operation cannot guarantee anything stronger anyway because |
947 | * locks can't be held over the syscall boundary. | 945 | * locks can't be held over the syscall boundary. |
948 | * | 946 | * |
949 | * If write=0, the page must not be written to. If the page is written to, | 947 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
950 | * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called | 948 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must |
951 | * after the page is finished with, and before put_page is called. | 949 | * be called after the page is finished with, and before put_page is called. |
952 | * | 950 | * |
953 | * get_user_pages is typically used for fewer-copy IO operations, to get a | 951 | * get_user_pages is typically used for fewer-copy IO operations, to get a |
954 | * handle on the memory by some means other than accesses via the user virtual | 952 | * handle on the memory by some means other than accesses via the user virtual |
@@ -965,18 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked); | |||
965 | */ | 963 | */ |
966 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 964 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
967 | unsigned long start, unsigned long nr_pages, | 965 | unsigned long start, unsigned long nr_pages, |
968 | int write, int force, struct page **pages, | 966 | unsigned int gup_flags, struct page **pages, |
969 | struct vm_area_struct **vmas) | 967 | struct vm_area_struct **vmas) |
970 | { | 968 | { |
971 | unsigned int flags = FOLL_TOUCH | FOLL_REMOTE; | ||
972 | |||
973 | if (write) | ||
974 | flags |= FOLL_WRITE; | ||
975 | if (force) | ||
976 | flags |= FOLL_FORCE; | ||
977 | |||
978 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, | 969 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
979 | NULL, false, flags); | 970 | NULL, false, |
971 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); | ||
980 | } | 972 | } |
981 | EXPORT_SYMBOL(get_user_pages_remote); | 973 | EXPORT_SYMBOL(get_user_pages_remote); |
982 | 974 | ||
diff --git a/mm/memory.c b/mm/memory.c index fc1987dfd8cc..20a9adb7b36e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3873,6 +3873,10 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
3873 | { | 3873 | { |
3874 | struct vm_area_struct *vma; | 3874 | struct vm_area_struct *vma; |
3875 | void *old_buf = buf; | 3875 | void *old_buf = buf; |
3876 | unsigned int flags = FOLL_FORCE; | ||
3877 | |||
3878 | if (write) | ||
3879 | flags |= FOLL_WRITE; | ||
3876 | 3880 | ||
3877 | down_read(&mm->mmap_sem); | 3881 | down_read(&mm->mmap_sem); |
3878 | /* ignore errors, just check how much was successfully transferred */ | 3882 | /* ignore errors, just check how much was successfully transferred */ |
@@ -3882,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
3882 | struct page *page = NULL; | 3886 | struct page *page = NULL; |
3883 | 3887 | ||
3884 | ret = get_user_pages_remote(tsk, mm, addr, 1, | 3888 | ret = get_user_pages_remote(tsk, mm, addr, 1, |
3885 | write, 1, &page, &vma); | 3889 | flags, &page, &vma); |
3886 | if (ret <= 0) { | 3890 | if (ret <= 0) { |
3887 | #ifndef CONFIG_HAVE_IOREMAP_PROT | 3891 | #ifndef CONFIG_HAVE_IOREMAP_PROT |
3888 | break; | 3892 | break; |
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index ade7c6cad172..682b73af7766 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c | |||
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, | |||
881 | * the execve(). | 881 | * the execve(). |
882 | */ | 882 | */ |
883 | if (get_user_pages_remote(current, bprm->mm, pos, 1, | 883 | if (get_user_pages_remote(current, bprm->mm, pos, 1, |
884 | 0, 1, &page, NULL) <= 0) | 884 | FOLL_FORCE, &page, NULL) <= 0) |
885 | return false; | 885 | return false; |
886 | #else | 886 | #else |
887 | page = bprm->page[pos / PAGE_SIZE]; | 887 | page = bprm->page[pos / PAGE_SIZE]; |