aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLorenzo Stoakes <lstoakes@gmail.com>2016-10-12 20:20:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-18 17:13:37 -0400
commitd4944b0ecec0af882483fe44b66729316e575208 (patch)
tree0c16d81fa34c04dc63e4457e363edcfc21bfdfac
parent859110d7497cdd0e6b21010d6f777049d676382c (diff)
mm: remove write/force parameters from __get_user_pages_unlocked()
This removes the redundant 'write' and 'force' parameters from __get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/gup.c17
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/process_vm_access.c7
-rw-r--r--virt/kvm/async_pf.c3
-rw-r--r--virt/kvm/kvm_main.c11
6 files changed, 34 insertions, 19 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed85879f47f5..bcdea1f4e98c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1285,8 +1285,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1285 int write, int force, struct page **pages, int *locked); 1285 int write, int force, struct page **pages, int *locked);
1286long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1286long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1287 unsigned long start, unsigned long nr_pages, 1287 unsigned long start, unsigned long nr_pages,
1288 int write, int force, struct page **pages, 1288 struct page **pages, unsigned int gup_flags);
1289 unsigned int gup_flags);
1290long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1289long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1291 int write, int force, struct page **pages); 1290 int write, int force, struct page **pages);
1292int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1291int get_user_pages_fast(unsigned long start, int nr_pages, int write,
diff --git a/mm/gup.c b/mm/gup.c
index 720eb9385204..e997f545b059 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -875,17 +875,11 @@ EXPORT_SYMBOL(get_user_pages_locked);
875 */ 875 */
876__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 876__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
877 unsigned long start, unsigned long nr_pages, 877 unsigned long start, unsigned long nr_pages,
878 int write, int force, struct page **pages, 878 struct page **pages, unsigned int gup_flags)
879 unsigned int gup_flags)
880{ 879{
881 long ret; 880 long ret;
882 int locked = 1; 881 int locked = 1;
883 882
884 if (write)
885 gup_flags |= FOLL_WRITE;
886 if (force)
887 gup_flags |= FOLL_FORCE;
888
889 down_read(&mm->mmap_sem); 883 down_read(&mm->mmap_sem);
890 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, 884 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
891 &locked, false, gup_flags); 885 &locked, false, gup_flags);
@@ -915,8 +909,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
915long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 909long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
916 int write, int force, struct page **pages) 910 int write, int force, struct page **pages)
917{ 911{
912 unsigned int flags = FOLL_TOUCH;
913
914 if (write)
915 flags |= FOLL_WRITE;
916 if (force)
917 flags |= FOLL_FORCE;
918
918 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 919 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
919 write, force, pages, FOLL_TOUCH); 920 pages, flags);
920} 921}
921EXPORT_SYMBOL(get_user_pages_unlocked); 922EXPORT_SYMBOL(get_user_pages_unlocked);
922 923
diff --git a/mm/nommu.c b/mm/nommu.c
index 95daf81a4855..925dcc1fa2f3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -185,8 +185,7 @@ EXPORT_SYMBOL(get_user_pages_locked);
185 185
186long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 186long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
187 unsigned long start, unsigned long nr_pages, 187 unsigned long start, unsigned long nr_pages,
188 int write, int force, struct page **pages, 188 struct page **pages, unsigned int gup_flags)
189 unsigned int gup_flags)
190{ 189{
191 long ret; 190 long ret;
192 down_read(&mm->mmap_sem); 191 down_read(&mm->mmap_sem);
@@ -200,8 +199,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
200long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 199long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
201 int write, int force, struct page **pages) 200 int write, int force, struct page **pages)
202{ 201{
202 unsigned int flags = 0;
203
204 if (write)
205 flags |= FOLL_WRITE;
206 if (force)
207 flags |= FOLL_FORCE;
208
203 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 209 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
204 write, force, pages, 0); 210 pages, flags);
205} 211}
206EXPORT_SYMBOL(get_user_pages_unlocked); 212EXPORT_SYMBOL(get_user_pages_unlocked);
207 213
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 07514d41ebcc..be8dc8d1edb9 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
88 ssize_t rc = 0; 88 ssize_t rc = 0;
89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 / sizeof(struct pages *); 90 / sizeof(struct pages *);
91 unsigned int flags = FOLL_REMOTE;
91 92
92 /* Work out address and page range required */ 93 /* Work out address and page range required */
93 if (len == 0) 94 if (len == 0)
94 return 0; 95 return 0;
95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 96 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
96 97
98 if (vm_write)
99 flags |= FOLL_WRITE;
100
97 while (!rc && nr_pages && iov_iter_count(iter)) { 101 while (!rc && nr_pages && iov_iter_count(iter)) {
98 int pages = min(nr_pages, max_pages_per_loop); 102 int pages = min(nr_pages, max_pages_per_loop);
99 size_t bytes; 103 size_t bytes;
@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
104 * current/current->mm 108 * current/current->mm
105 */ 109 */
106 pages = __get_user_pages_unlocked(task, mm, pa, pages, 110 pages = __get_user_pages_unlocked(task, mm, pa, pages,
107 vm_write, 0, process_pages, 111 process_pages, flags);
108 FOLL_REMOTE);
109 if (pages <= 0) 112 if (pages <= 0)
110 return -EFAULT; 113 return -EFAULT;
111 114
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index db9668869f6f..8035cc1eb955 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
84 * mm and might be done in another context, so we must 84 * mm and might be done in another context, so we must
85 * use FOLL_REMOTE. 85 * use FOLL_REMOTE.
86 */ 86 */
87 __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); 87 __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
88 FOLL_WRITE | FOLL_REMOTE);
88 89
89 kvm_async_page_present_sync(vcpu, apf); 90 kvm_async_page_present_sync(vcpu, apf);
90 91
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81dfc73d3df3..28510e72618a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1416 down_read(&current->mm->mmap_sem); 1416 down_read(&current->mm->mmap_sem);
1417 npages = get_user_page_nowait(addr, write_fault, page); 1417 npages = get_user_page_nowait(addr, write_fault, page);
1418 up_read(&current->mm->mmap_sem); 1418 up_read(&current->mm->mmap_sem);
1419 } else 1419 } else {
1420 unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
1421
1422 if (write_fault)
1423 flags |= FOLL_WRITE;
1424
1420 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1425 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
1421 write_fault, 0, page, 1426 page, flags);
1422 FOLL_TOUCH|FOLL_HWPOISON); 1427 }
1423 if (npages != 1) 1428 if (npages != 1)
1424 return npages; 1429 return npages;
1425 1430