aboutsummaryrefslogtreecommitdiffstats
path: root/mm/gup.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-07-06 18:38:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:24:33 -0400
commit080dbb618b4bc25883a7654c1e92b2c49e3d6d63 (patch)
tree7bbdeb0ffa98c460d871ada0b87dd5e95202b07f /mm/gup.c
parent383321ab8578dfe3bbcc0bc5604c0f8ae08a5c98 (diff)
mm/follow_page_mask: split follow_page_mask to smaller functions.
Makes code reading easy. No functional changes in this patch. In a followup patch, we will be updating the follow_page_mask to handle hugetlb hugepd format so that archs like ppc64 can switch to the generic version. This split helps in doing that nicely. Link: http://lkml.kernel.org/r/1494926612-23928-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Mike Kravetz <kravetz@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r--mm/gup.c148
1 files changed, 91 insertions, 57 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 3ab78dc3db7d..bf68e21d7a3a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -208,68 +208,16 @@ no_page:
208 return no_page_table(vma, flags); 208 return no_page_table(vma, flags);
209} 209}
210 210
211/** 211static struct page *follow_pmd_mask(struct vm_area_struct *vma,
212 * follow_page_mask - look up a page descriptor from a user-virtual address 212 unsigned long address, pud_t *pudp,
213 * @vma: vm_area_struct mapping @address 213 unsigned int flags, unsigned int *page_mask)
214 * @address: virtual address to look up
215 * @flags: flags modifying lookup behaviour
216 * @page_mask: on output, *page_mask is set according to the size of the page
217 *
218 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
219 *
220 * Returns the mapped (struct page *), %NULL if no mapping exists, or
221 * an error pointer if there is a mapping to something not represented
222 * by a page descriptor (see also vm_normal_page()).
223 */
224struct page *follow_page_mask(struct vm_area_struct *vma,
225 unsigned long address, unsigned int flags,
226 unsigned int *page_mask)
227{ 214{
228 pgd_t *pgd;
229 p4d_t *p4d;
230 pud_t *pud;
231 pmd_t *pmd; 215 pmd_t *pmd;
232 spinlock_t *ptl; 216 spinlock_t *ptl;
233 struct page *page; 217 struct page *page;
234 struct mm_struct *mm = vma->vm_mm; 218 struct mm_struct *mm = vma->vm_mm;
235 219
236 *page_mask = 0; 220 pmd = pmd_offset(pudp, address);
237
238 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
239 if (!IS_ERR(page)) {
240 BUG_ON(flags & FOLL_GET);
241 return page;
242 }
243
244 pgd = pgd_offset(mm, address);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 return no_page_table(vma, flags);
247 p4d = p4d_offset(pgd, address);
248 if (p4d_none(*p4d))
249 return no_page_table(vma, flags);
250 BUILD_BUG_ON(p4d_huge(*p4d));
251 if (unlikely(p4d_bad(*p4d)))
252 return no_page_table(vma, flags);
253 pud = pud_offset(p4d, address);
254 if (pud_none(*pud))
255 return no_page_table(vma, flags);
256 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
257 page = follow_huge_pud(mm, address, pud, flags);
258 if (page)
259 return page;
260 return no_page_table(vma, flags);
261 }
262 if (pud_devmap(*pud)) {
263 ptl = pud_lock(mm, pud);
264 page = follow_devmap_pud(vma, address, pud, flags);
265 spin_unlock(ptl);
266 if (page)
267 return page;
268 }
269 if (unlikely(pud_bad(*pud)))
270 return no_page_table(vma, flags);
271
272 pmd = pmd_offset(pud, address);
273 if (pmd_none(*pmd)) 221 if (pmd_none(*pmd))
274 return no_page_table(vma, flags); 222 return no_page_table(vma, flags);
275 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { 223 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
@@ -319,13 +267,99 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
319 return ret ? ERR_PTR(ret) : 267 return ret ? ERR_PTR(ret) :
320 follow_page_pte(vma, address, pmd, flags); 268 follow_page_pte(vma, address, pmd, flags);
321 } 269 }
322
323 page = follow_trans_huge_pmd(vma, address, pmd, flags); 270 page = follow_trans_huge_pmd(vma, address, pmd, flags);
324 spin_unlock(ptl); 271 spin_unlock(ptl);
325 *page_mask = HPAGE_PMD_NR - 1; 272 *page_mask = HPAGE_PMD_NR - 1;
326 return page; 273 return page;
327} 274}
328 275
276
277static struct page *follow_pud_mask(struct vm_area_struct *vma,
278 unsigned long address, p4d_t *p4dp,
279 unsigned int flags, unsigned int *page_mask)
280{
281 pud_t *pud;
282 spinlock_t *ptl;
283 struct page *page;
284 struct mm_struct *mm = vma->vm_mm;
285
286 pud = pud_offset(p4dp, address);
287 if (pud_none(*pud))
288 return no_page_table(vma, flags);
289 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
290 page = follow_huge_pud(mm, address, pud, flags);
291 if (page)
292 return page;
293 return no_page_table(vma, flags);
294 }
295 if (pud_devmap(*pud)) {
296 ptl = pud_lock(mm, pud);
297 page = follow_devmap_pud(vma, address, pud, flags);
298 spin_unlock(ptl);
299 if (page)
300 return page;
301 }
302 if (unlikely(pud_bad(*pud)))
303 return no_page_table(vma, flags);
304
305 return follow_pmd_mask(vma, address, pud, flags, page_mask);
306}
307
308
309static struct page *follow_p4d_mask(struct vm_area_struct *vma,
310 unsigned long address, pgd_t *pgdp,
311 unsigned int flags, unsigned int *page_mask)
312{
313 p4d_t *p4d;
314
315 p4d = p4d_offset(pgdp, address);
316 if (p4d_none(*p4d))
317 return no_page_table(vma, flags);
318 BUILD_BUG_ON(p4d_huge(*p4d));
319 if (unlikely(p4d_bad(*p4d)))
320 return no_page_table(vma, flags);
321
322 return follow_pud_mask(vma, address, p4d, flags, page_mask);
323}
324
325/**
326 * follow_page_mask - look up a page descriptor from a user-virtual address
327 * @vma: vm_area_struct mapping @address
328 * @address: virtual address to look up
329 * @flags: flags modifying lookup behaviour
330 * @page_mask: on output, *page_mask is set according to the size of the page
331 *
332 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
333 *
334 * Returns the mapped (struct page *), %NULL if no mapping exists, or
335 * an error pointer if there is a mapping to something not represented
336 * by a page descriptor (see also vm_normal_page()).
337 */
338struct page *follow_page_mask(struct vm_area_struct *vma,
339 unsigned long address, unsigned int flags,
340 unsigned int *page_mask)
341{
342 pgd_t *pgd;
343 struct page *page;
344 struct mm_struct *mm = vma->vm_mm;
345
346 *page_mask = 0;
347
348 /* make this handle hugepd */
349 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
350 if (!IS_ERR(page)) {
351 BUG_ON(flags & FOLL_GET);
352 return page;
353 }
354
355 pgd = pgd_offset(mm, address);
356
357 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
358 return no_page_table(vma, flags);
359
360 return follow_p4d_mask(vma, address, pgd, flags, page_mask);
361}
362
329static int get_gate_page(struct mm_struct *mm, unsigned long address, 363static int get_gate_page(struct mm_struct *mm, unsigned long address,
330 unsigned int gup_flags, struct vm_area_struct **vma, 364 unsigned int gup_flags, struct vm_area_struct **vma,
331 struct page **page) 365 struct page **page)