summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-07-11 23:57:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 14:05:45 -0400
commit050a9adc64383aed3429a31432b4f5a7b0cdc8ac (patch)
treee8bf6fec0f1a4fd0aadbbad9f9834cb648ff1129 /mm
parentd3649f68b4336e7ef7aa264cf05ba1265feb0968 (diff)
mm: consolidate the get_user_pages* implementations
Always build mm/gup.c so that we don't have to provide separate nommu stubs. Also merge the get_user_pages_fast and __get_user_pages_fast stubs when HAVE_FAST_GUP into the main implementations, which will never call the fast path if HAVE_FAST_GUP is not set. This also ensures the new put_user_pages* helpers are available for nommu, as those are currently missing, which would create a problem as soon as we actually grew users for it. Link: http://lkml.kernel.org/r/20190625143715.1689-13-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: James Hogan <jhogan@kernel.org> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/Makefile4
-rw-r--r--mm/gup.c67
-rw-r--r--mm/nommu.c88
-rw-r--r--mm/util.c47
5 files changed, 65 insertions, 142 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index b5a258d62465..48840b28482b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -133,6 +133,7 @@ config HAVE_MEMBLOCK_PHYS_MAP
133 bool 133 bool
134 134
135config HAVE_FAST_GUP 135config HAVE_FAST_GUP
136 depends on MMU
136 bool 137 bool
137 138
138config ARCH_KEEP_MEMBLOCK 139config ARCH_KEEP_MEMBLOCK
diff --git a/mm/Makefile b/mm/Makefile
index ac5e5ba78874..dc0746ca1109 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -22,7 +22,7 @@ KCOV_INSTRUMENT_mmzone.o := n
22KCOV_INSTRUMENT_vmstat.o := n 22KCOV_INSTRUMENT_vmstat.o := n
23 23
24mmu-y := nommu.o 24mmu-y := nommu.o
25mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 25mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
26 mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \ 26 mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
27 msync.o page_vma_mapped.o pagewalk.o \ 27 msync.o page_vma_mapped.o pagewalk.o \
28 pgtable-generic.o rmap.o vmalloc.o 28 pgtable-generic.o rmap.o vmalloc.o
@@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
39 mm_init.o mmu_context.o percpu.o slab_common.o \ 39 mm_init.o mmu_context.o percpu.o slab_common.o \
40 compaction.o vmacache.o \ 40 compaction.o vmacache.o \
41 interval_tree.o list_lru.o workingset.o \ 41 interval_tree.o list_lru.o workingset.o \
42 debug.o $(mmu-y) 42 debug.o gup.o $(mmu-y)
43 43
44# Give 'page_alloc' its own module-parameter namespace 44# Give 'page_alloc' its own module-parameter namespace
45page-alloc-y := page_alloc.o 45page-alloc-y := page_alloc.o
diff --git a/mm/gup.c b/mm/gup.c
index d36b82c05e79..cc5ddd8869b7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -134,6 +134,7 @@ void put_user_pages(struct page **pages, unsigned long npages)
134} 134}
135EXPORT_SYMBOL(put_user_pages); 135EXPORT_SYMBOL(put_user_pages);
136 136
137#ifdef CONFIG_MMU
137static struct page *no_page_table(struct vm_area_struct *vma, 138static struct page *no_page_table(struct vm_area_struct *vma,
138 unsigned int flags) 139 unsigned int flags)
139{ 140{
@@ -1322,6 +1323,51 @@ struct page *get_dump_page(unsigned long addr)
1322 return page; 1323 return page;
1323} 1324}
1324#endif /* CONFIG_ELF_CORE */ 1325#endif /* CONFIG_ELF_CORE */
1326#else /* CONFIG_MMU */
1327static long __get_user_pages_locked(struct task_struct *tsk,
1328 struct mm_struct *mm, unsigned long start,
1329 unsigned long nr_pages, struct page **pages,
1330 struct vm_area_struct **vmas, int *locked,
1331 unsigned int foll_flags)
1332{
1333 struct vm_area_struct *vma;
1334 unsigned long vm_flags;
1335 int i;
1336
1337 /* calculate required read or write permissions.
1338 * If FOLL_FORCE is set, we only require the "MAY" flags.
1339 */
1340 vm_flags = (foll_flags & FOLL_WRITE) ?
1341 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1342 vm_flags &= (foll_flags & FOLL_FORCE) ?
1343 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1344
1345 for (i = 0; i < nr_pages; i++) {
1346 vma = find_vma(mm, start);
1347 if (!vma)
1348 goto finish_or_fault;
1349
1350 /* protect what we can, including chardevs */
1351 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1352 !(vm_flags & vma->vm_flags))
1353 goto finish_or_fault;
1354
1355 if (pages) {
1356 pages[i] = virt_to_page(start);
1357 if (pages[i])
1358 get_page(pages[i]);
1359 }
1360 if (vmas)
1361 vmas[i] = vma;
1362 start = (start + PAGE_SIZE) & PAGE_MASK;
1363 }
1364
1365 return i;
1366
1367finish_or_fault:
1368 return i ? : -EFAULT;
1369}
1370#endif /* !CONFIG_MMU */
1325 1371
1326#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA) 1372#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
1327static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) 1373static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
@@ -1484,7 +1530,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
1484{ 1530{
1485 return nr_pages; 1531 return nr_pages;
1486} 1532}
1487#endif 1533#endif /* CONFIG_CMA */
1488 1534
1489/* 1535/*
1490 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 1536 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -2160,6 +2206,12 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
2160 return; 2206 return;
2161 } while (pgdp++, addr = next, addr != end); 2207 } while (pgdp++, addr = next, addr != end);
2162} 2208}
2209#else
2210static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2211 unsigned int flags, struct page **pages, int *nr)
2212{
2213}
2214#endif /* CONFIG_HAVE_FAST_GUP */
2163 2215
2164#ifndef gup_fast_permitted 2216#ifndef gup_fast_permitted
2165/* 2217/*
@@ -2177,6 +2229,9 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
2177 * the regular GUP. 2229 * the regular GUP.
2178 * Note a difference with get_user_pages_fast: this always returns the 2230 * Note a difference with get_user_pages_fast: this always returns the
2179 * number of pages pinned, 0 if no pages were pinned. 2231 * number of pages pinned, 0 if no pages were pinned.
2232 *
2233 * If the architecture does not support this function, simply return with no
2234 * pages pinned.
2180 */ 2235 */
2181int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 2236int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
2182 struct page **pages) 2237 struct page **pages)
@@ -2206,7 +2261,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
2206 * block IPIs that come from THPs splitting. 2261 * block IPIs that come from THPs splitting.
2207 */ 2262 */
2208 2263
2209 if (gup_fast_permitted(start, end)) { 2264 if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
2265 gup_fast_permitted(start, end)) {
2210 local_irq_save(flags); 2266 local_irq_save(flags);
2211 gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); 2267 gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
2212 local_irq_restore(flags); 2268 local_irq_restore(flags);
@@ -2214,6 +2270,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
2214 2270
2215 return nr; 2271 return nr;
2216} 2272}
2273EXPORT_SYMBOL_GPL(__get_user_pages_fast);
2217 2274
2218static int __gup_longterm_unlocked(unsigned long start, int nr_pages, 2275static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2219 unsigned int gup_flags, struct page **pages) 2276 unsigned int gup_flags, struct page **pages)
@@ -2270,7 +2327,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
2270 if (unlikely(!access_ok((void __user *)start, len))) 2327 if (unlikely(!access_ok((void __user *)start, len)))
2271 return -EFAULT; 2328 return -EFAULT;
2272 2329
2273 if (gup_fast_permitted(start, end)) { 2330 if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
2331 gup_fast_permitted(start, end)) {
2274 local_irq_disable(); 2332 local_irq_disable();
2275 gup_pgd_range(addr, end, gup_flags, pages, &nr); 2333 gup_pgd_range(addr, end, gup_flags, pages, &nr);
2276 local_irq_enable(); 2334 local_irq_enable();
@@ -2296,5 +2354,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
2296 2354
2297 return ret; 2355 return ret;
2298} 2356}
2299 2357EXPORT_SYMBOL_GPL(get_user_pages_fast);
2300#endif /* CONFIG_HAVE_GENERIC_GUP */
diff --git a/mm/nommu.c b/mm/nommu.c
index d8c02fbe03b5..07165ad2e548 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -111,94 +111,6 @@ unsigned int kobjsize(const void *objp)
111 return PAGE_SIZE << compound_order(page); 111 return PAGE_SIZE << compound_order(page);
112} 112}
113 113
114static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
115 unsigned long start, unsigned long nr_pages,
116 unsigned int foll_flags, struct page **pages,
117 struct vm_area_struct **vmas, int *nonblocking)
118{
119 struct vm_area_struct *vma;
120 unsigned long vm_flags;
121 int i;
122
123 /* calculate required read or write permissions.
124 * If FOLL_FORCE is set, we only require the "MAY" flags.
125 */
126 vm_flags = (foll_flags & FOLL_WRITE) ?
127 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
128 vm_flags &= (foll_flags & FOLL_FORCE) ?
129 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
130
131 for (i = 0; i < nr_pages; i++) {
132 vma = find_vma(mm, start);
133 if (!vma)
134 goto finish_or_fault;
135
136 /* protect what we can, including chardevs */
137 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
138 !(vm_flags & vma->vm_flags))
139 goto finish_or_fault;
140
141 if (pages) {
142 pages[i] = virt_to_page(start);
143 if (pages[i])
144 get_page(pages[i]);
145 }
146 if (vmas)
147 vmas[i] = vma;
148 start = (start + PAGE_SIZE) & PAGE_MASK;
149 }
150
151 return i;
152
153finish_or_fault:
154 return i ? : -EFAULT;
155}
156
157/*
158 * get a list of pages in an address range belonging to the specified process
159 * and indicate the VMA that covers each page
160 * - this is potentially dodgy as we may end incrementing the page count of a
161 * slab page or a secondary page from a compound page
162 * - don't permit access to VMAs that don't support it, such as I/O mappings
163 */
164long get_user_pages(unsigned long start, unsigned long nr_pages,
165 unsigned int gup_flags, struct page **pages,
166 struct vm_area_struct **vmas)
167{
168 return __get_user_pages(current, current->mm, start, nr_pages,
169 gup_flags, pages, vmas, NULL);
170}
171EXPORT_SYMBOL(get_user_pages);
172
173long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
174 unsigned int gup_flags, struct page **pages,
175 int *locked)
176{
177 return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
178}
179EXPORT_SYMBOL(get_user_pages_locked);
180
181static long __get_user_pages_unlocked(struct task_struct *tsk,
182 struct mm_struct *mm, unsigned long start,
183 unsigned long nr_pages, struct page **pages,
184 unsigned int gup_flags)
185{
186 long ret;
187 down_read(&mm->mmap_sem);
188 ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
189 NULL, NULL);
190 up_read(&mm->mmap_sem);
191 return ret;
192}
193
194long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
195 struct page **pages, unsigned int gup_flags)
196{
197 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
198 pages, gup_flags);
199}
200EXPORT_SYMBOL(get_user_pages_unlocked);
201
202/** 114/**
203 * follow_pfn - look up PFN at a user virtual address 115 * follow_pfn - look up PFN at a user virtual address
204 * @vma: memory mapping 116 * @vma: memory mapping
diff --git a/mm/util.c b/mm/util.c
index 9834c4ab7d8e..68575a315dc5 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -300,53 +300,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
300} 300}
301#endif 301#endif
302 302
303/*
304 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
305 * back to the regular GUP.
306 * Note a difference with get_user_pages_fast: this always returns the
307 * number of pages pinned, 0 if no pages were pinned.
308 * If the architecture does not support this function, simply return with no
309 * pages pinned.
310 */
311int __weak __get_user_pages_fast(unsigned long start,
312 int nr_pages, int write, struct page **pages)
313{
314 return 0;
315}
316EXPORT_SYMBOL_GPL(__get_user_pages_fast);
317
318/**
319 * get_user_pages_fast() - pin user pages in memory
320 * @start: starting user address
321 * @nr_pages: number of pages from start to pin
322 * @gup_flags: flags modifying pin behaviour
323 * @pages: array that receives pointers to the pages pinned.
324 * Should be at least nr_pages long.
325 *
326 * get_user_pages_fast provides equivalent functionality to get_user_pages,
327 * operating on current and current->mm, with force=0 and vma=NULL. However
328 * unlike get_user_pages, it must be called without mmap_sem held.
329 *
330 * get_user_pages_fast may take mmap_sem and page table locks, so no
331 * assumptions can be made about lack of locking. get_user_pages_fast is to be
332 * implemented in a way that is advantageous (vs get_user_pages()) when the
333 * user memory area is already faulted in and present in ptes. However if the
334 * pages have to be faulted in, it may turn out to be slightly slower so
335 * callers need to carefully consider what to use. On many architectures,
336 * get_user_pages_fast simply falls back to get_user_pages.
337 *
338 * Return: number of pages pinned. This may be fewer than the number
339 * requested. If nr_pages is 0 or negative, returns 0. If no pages
340 * were pinned, returns -errno.
341 */
342int __weak get_user_pages_fast(unsigned long start,
343 int nr_pages, unsigned int gup_flags,
344 struct page **pages)
345{
346 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
347}
348EXPORT_SYMBOL_GPL(get_user_pages_fast);
349
350unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 303unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
351 unsigned long len, unsigned long prot, 304 unsigned long len, unsigned long prot,
352 unsigned long flag, unsigned long pgoff) 305 unsigned long flag, unsigned long pgoff)