summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLorenzo Stoakes <lstoakes@gmail.com>2016-12-14 18:06:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit8b7457ef9a9eb46cd1675d40d8e1fd3c47a38395 (patch)
tree485def76ab013b2609eb24762e4c39fc531c72d7
parent5b56d49fc31dbb0487e14ead790fc81ca9fb2c99 (diff)
mm: unexport __get_user_pages_unlocked()
Unexport the low-level __get_user_pages_unlocked() function and replaces invocations with calls to more appropriate higher-level functions. In hva_to_pfn_slow() we are able to replace __get_user_pages_unlocked() with get_user_pages_unlocked() since we can now pass gup_flags. In async_pf_execute() and process_vm_rw_single_vec() we need to pass different tsk, mm arguments so get_user_pages_remote() is the sane replacement in these cases (having added manual acquisition and release of mmap_sem.) Additionally get_user_pages_remote() reintroduces use of the FOLL_TOUCH flag. However, this flag was originally silently dropped by commit 1e9877902dc7 ("mm/gup: Introduce get_user_pages_remote()"), so this appears to have been unintentional and reintroducing it is therefore not an issue. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20161027095141.2569-3-lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Jan Kara <jack@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/gup.c8
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/process_vm_access.c12
-rw-r--r--virt/kvm/async_pf.c10
-rw-r--r--virt/kvm/kvm_main.c5
6 files changed, 25 insertions, 21 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cc154454675a..7b2d14ed3815 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1280,9 +1280,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
1280 struct vm_area_struct **vmas); 1280 struct vm_area_struct **vmas);
1281long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1281long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1282 unsigned int gup_flags, struct page **pages, int *locked); 1282 unsigned int gup_flags, struct page **pages, int *locked);
1283long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1284 unsigned long start, unsigned long nr_pages,
1285 struct page **pages, unsigned int gup_flags);
1286long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1283long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1287 struct page **pages, unsigned int gup_flags); 1284 struct page **pages, unsigned int gup_flags);
1288int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1285int get_user_pages_fast(unsigned long start, int nr_pages, int write,
diff --git a/mm/gup.c b/mm/gup.c
index b64c907aa4f0..55315555489d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -865,9 +865,10 @@ EXPORT_SYMBOL(get_user_pages_locked);
865 * caller if required (just like with __get_user_pages). "FOLL_GET" 865 * caller if required (just like with __get_user_pages). "FOLL_GET"
866 * is set implicitly if "pages" is non-NULL. 866 * is set implicitly if "pages" is non-NULL.
867 */ 867 */
868__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 868static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
869 unsigned long start, unsigned long nr_pages, 869 struct mm_struct *mm, unsigned long start,
870 struct page **pages, unsigned int gup_flags) 870 unsigned long nr_pages, struct page **pages,
871 unsigned int gup_flags)
871{ 872{
872 long ret; 873 long ret;
873 int locked = 1; 874 int locked = 1;
@@ -879,7 +880,6 @@ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct m
879 up_read(&mm->mmap_sem); 880 up_read(&mm->mmap_sem);
880 return ret; 881 return ret;
881} 882}
882EXPORT_SYMBOL(__get_user_pages_unlocked);
883 883
884/* 884/*
885 * get_user_pages_unlocked() is suitable to replace the form: 885 * get_user_pages_unlocked() is suitable to replace the form:
diff --git a/mm/nommu.c b/mm/nommu.c
index 9720e0bab029..c299a7fbca70 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -176,9 +176,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
176} 176}
177EXPORT_SYMBOL(get_user_pages_locked); 177EXPORT_SYMBOL(get_user_pages_locked);
178 178
179long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 179static long __get_user_pages_unlocked(struct task_struct *tsk,
180 unsigned long start, unsigned long nr_pages, 180 struct mm_struct *mm, unsigned long start,
181 struct page **pages, unsigned int gup_flags) 181 unsigned long nr_pages, struct page **pages,
182 unsigned int gup_flags)
182{ 183{
183 long ret; 184 long ret;
184 down_read(&mm->mmap_sem); 185 down_read(&mm->mmap_sem);
@@ -187,7 +188,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
187 up_read(&mm->mmap_sem); 188 up_read(&mm->mmap_sem);
188 return ret; 189 return ret;
189} 190}
190EXPORT_SYMBOL(__get_user_pages_unlocked);
191 191
192long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 192long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
193 struct page **pages, unsigned int gup_flags) 193 struct page **pages, unsigned int gup_flags)
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index be8dc8d1edb9..84d0c7eada2b 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,7 +88,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
88 ssize_t rc = 0; 88 ssize_t rc = 0;
89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 / sizeof(struct pages *); 90 / sizeof(struct pages *);
91 unsigned int flags = FOLL_REMOTE; 91 unsigned int flags = 0;
92 92
93 /* Work out address and page range required */ 93 /* Work out address and page range required */
94 if (len == 0) 94 if (len == 0)
@@ -100,15 +100,19 @@ static int process_vm_rw_single_vec(unsigned long addr,
100 100
101 while (!rc && nr_pages && iov_iter_count(iter)) { 101 while (!rc && nr_pages && iov_iter_count(iter)) {
102 int pages = min(nr_pages, max_pages_per_loop); 102 int pages = min(nr_pages, max_pages_per_loop);
103 int locked = 1;
103 size_t bytes; 104 size_t bytes;
104 105
105 /* 106 /*
106 * Get the pages we're interested in. We must 107 * Get the pages we're interested in. We must
107 * add FOLL_REMOTE because task/mm might not 108 * access remotely because task/mm might not
108 * current/current->mm 109 * current/current->mm
109 */ 110 */
110 pages = __get_user_pages_unlocked(task, mm, pa, pages, 111 down_read(&mm->mmap_sem);
111 process_pages, flags); 112 pages = get_user_pages_remote(task, mm, pa, pages, flags,
113 process_pages, NULL, &locked);
114 if (locked)
115 up_read(&mm->mmap_sem);
112 if (pages <= 0) 116 if (pages <= 0)
113 return -EFAULT; 117 return -EFAULT;
114 118
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index efeceb0a222d..3815e940fbea 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -76,16 +76,20 @@ static void async_pf_execute(struct work_struct *work)
76 struct kvm_vcpu *vcpu = apf->vcpu; 76 struct kvm_vcpu *vcpu = apf->vcpu;
77 unsigned long addr = apf->addr; 77 unsigned long addr = apf->addr;
78 gva_t gva = apf->gva; 78 gva_t gva = apf->gva;
79 int locked = 1;
79 80
80 might_sleep(); 81 might_sleep();
81 82
82 /* 83 /*
83 * This work is run asynchromously to the task which owns 84 * This work is run asynchromously to the task which owns
84 * mm and might be done in another context, so we must 85 * mm and might be done in another context, so we must
85 * use FOLL_REMOTE. 86 * access remotely.
86 */ 87 */
87 __get_user_pages_unlocked(NULL, mm, addr, 1, NULL, 88 down_read(&mm->mmap_sem);
88 FOLL_WRITE | FOLL_REMOTE); 89 get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
90 &locked);
91 if (locked)
92 up_read(&mm->mmap_sem);
89 93
90 kvm_async_page_present_sync(vcpu, apf); 94 kvm_async_page_present_sync(vcpu, apf);
91 95
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 823544c166be..de102cae7125 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1418,13 +1418,12 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1418 npages = get_user_page_nowait(addr, write_fault, page); 1418 npages = get_user_page_nowait(addr, write_fault, page);
1419 up_read(&current->mm->mmap_sem); 1419 up_read(&current->mm->mmap_sem);
1420 } else { 1420 } else {
1421 unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON; 1421 unsigned int flags = FOLL_HWPOISON;
1422 1422
1423 if (write_fault) 1423 if (write_fault)
1424 flags |= FOLL_WRITE; 1424 flags |= FOLL_WRITE;
1425 1425
1426 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1426 npages = get_user_pages_unlocked(addr, 1, page, flags);
1427 page, flags);
1428 } 1427 }
1429 if (npages != 1) 1428 if (npages != 1)
1430 return npages; 1429 return npages;