diff options
author | Keith Busch <keith.busch@intel.com> | 2018-10-26 18:10:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-26 19:38:15 -0400 |
commit | df06b37ffe5a442503b7095b77b0a970df515459 (patch) | |
tree | 2fdb301eba4b4c2240595040496096b3a6489c79 | |
parent | 9fd61bc95130d4971568b89c9548b5e0a4e18e0e (diff) |
mm/gup: cache dev_pagemap while pinning pages
Getting pages from ZONE_DEVICE memory needs to check the backing device's
live-ness, which is tracked in the device's dev_pagemap metadata. This
metadata is stored in a radix tree and looking it up adds measurable
software overhead.
This patch avoids repeating this relatively costly operation when
dev_pagemap is used by caching the last dev_pagemap while getting user
pages. The gup_benchmark kernel self test reports this reduces time to
get user pages to as low as 1/3 of the previous time.
Link: http://lkml.kernel.org/r/20181012173040.15669-1-keith.busch@intel.com
Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/huge_mm.h | 8 | ||||
-rw-r--r-- | include/linux/mm.h | 12 | ||||
-rw-r--r-- | mm/gup.c | 110 | ||||
-rw-r--r-- | mm/huge_memory.c | 16 | ||||
-rw-r--r-- | mm/nommu.c | 6 |
5 files changed, 79 insertions, 73 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index fdcb45999b26..4663ee96cf59 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -213,9 +213,9 @@ static inline int hpage_nr_pages(struct page *page) | |||
213 | } | 213 | } |
214 | 214 | ||
215 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | 215 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
216 | pmd_t *pmd, int flags); | 216 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap); |
217 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | 217 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
218 | pud_t *pud, int flags); | 218 | pud_t *pud, int flags, struct dev_pagemap **pgmap); |
219 | 219 | ||
220 | extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); | 220 | extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); |
221 | 221 | ||
@@ -344,13 +344,13 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | 346 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, |
347 | unsigned long addr, pmd_t *pmd, int flags) | 347 | unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
348 | { | 348 | { |
349 | return NULL; | 349 | return NULL; |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, | 352 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, |
353 | unsigned long addr, pud_t *pud, int flags) | 353 | unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) |
354 | { | 354 | { |
355 | return NULL; | 355 | return NULL; |
356 | } | 356 | } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a023c5ce71fa..1e52b8fd1685 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2536,16 +2536,8 @@ static inline vm_fault_t vmf_error(int err) | |||
2536 | return VM_FAULT_SIGBUS; | 2536 | return VM_FAULT_SIGBUS; |
2537 | } | 2537 | } |
2538 | 2538 | ||
2539 | struct page *follow_page_mask(struct vm_area_struct *vma, | 2539 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
2540 | unsigned long address, unsigned int foll_flags, | 2540 | unsigned int foll_flags); |
2541 | unsigned int *page_mask); | ||
2542 | |||
2543 | static inline struct page *follow_page(struct vm_area_struct *vma, | ||
2544 | unsigned long address, unsigned int foll_flags) | ||
2545 | { | ||
2546 | unsigned int unused_page_mask; | ||
2547 | return follow_page_mask(vma, address, foll_flags, &unused_page_mask); | ||
2548 | } | ||
2549 | 2541 | ||
2550 | #define FOLL_WRITE 0x01 /* check pte is writable */ | 2542 | #define FOLL_WRITE 0x01 /* check pte is writable */ |
2551 | #define FOLL_TOUCH 0x02 /* mark page accessed */ | 2543 | #define FOLL_TOUCH 0x02 /* mark page accessed */ |
@@ -20,6 +20,11 @@ | |||
20 | 20 | ||
21 | #include "internal.h" | 21 | #include "internal.h" |
22 | 22 | ||
23 | struct follow_page_context { | ||
24 | struct dev_pagemap *pgmap; | ||
25 | unsigned int page_mask; | ||
26 | }; | ||
27 | |||
23 | static struct page *no_page_table(struct vm_area_struct *vma, | 28 | static struct page *no_page_table(struct vm_area_struct *vma, |
24 | unsigned int flags) | 29 | unsigned int flags) |
25 | { | 30 | { |
@@ -71,10 +76,10 @@ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) | |||
71 | } | 76 | } |
72 | 77 | ||
73 | static struct page *follow_page_pte(struct vm_area_struct *vma, | 78 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
74 | unsigned long address, pmd_t *pmd, unsigned int flags) | 79 | unsigned long address, pmd_t *pmd, unsigned int flags, |
80 | struct dev_pagemap **pgmap) | ||
75 | { | 81 | { |
76 | struct mm_struct *mm = vma->vm_mm; | 82 | struct mm_struct *mm = vma->vm_mm; |
77 | struct dev_pagemap *pgmap = NULL; | ||
78 | struct page *page; | 83 | struct page *page; |
79 | spinlock_t *ptl; | 84 | spinlock_t *ptl; |
80 | pte_t *ptep, pte; | 85 | pte_t *ptep, pte; |
@@ -116,8 +121,8 @@ retry: | |||
116 | * Only return device mapping pages in the FOLL_GET case since | 121 | * Only return device mapping pages in the FOLL_GET case since |
117 | * they are only valid while holding the pgmap reference. | 122 | * they are only valid while holding the pgmap reference. |
118 | */ | 123 | */ |
119 | pgmap = get_dev_pagemap(pte_pfn(pte), NULL); | 124 | *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); |
120 | if (pgmap) | 125 | if (*pgmap) |
121 | page = pte_page(pte); | 126 | page = pte_page(pte); |
122 | else | 127 | else |
123 | goto no_page; | 128 | goto no_page; |
@@ -152,15 +157,8 @@ retry: | |||
152 | goto retry; | 157 | goto retry; |
153 | } | 158 | } |
154 | 159 | ||
155 | if (flags & FOLL_GET) { | 160 | if (flags & FOLL_GET) |
156 | get_page(page); | 161 | get_page(page); |
157 | |||
158 | /* drop the pgmap reference now that we hold the page */ | ||
159 | if (pgmap) { | ||
160 | put_dev_pagemap(pgmap); | ||
161 | pgmap = NULL; | ||
162 | } | ||
163 | } | ||
164 | if (flags & FOLL_TOUCH) { | 162 | if (flags & FOLL_TOUCH) { |
165 | if ((flags & FOLL_WRITE) && | 163 | if ((flags & FOLL_WRITE) && |
166 | !pte_dirty(pte) && !PageDirty(page)) | 164 | !pte_dirty(pte) && !PageDirty(page)) |
@@ -210,7 +208,8 @@ no_page: | |||
210 | 208 | ||
211 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, | 209 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
212 | unsigned long address, pud_t *pudp, | 210 | unsigned long address, pud_t *pudp, |
213 | unsigned int flags, unsigned int *page_mask) | 211 | unsigned int flags, |
212 | struct follow_page_context *ctx) | ||
214 | { | 213 | { |
215 | pmd_t *pmd, pmdval; | 214 | pmd_t *pmd, pmdval; |
216 | spinlock_t *ptl; | 215 | spinlock_t *ptl; |
@@ -258,13 +257,13 @@ retry: | |||
258 | } | 257 | } |
259 | if (pmd_devmap(pmdval)) { | 258 | if (pmd_devmap(pmdval)) { |
260 | ptl = pmd_lock(mm, pmd); | 259 | ptl = pmd_lock(mm, pmd); |
261 | page = follow_devmap_pmd(vma, address, pmd, flags); | 260 | page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); |
262 | spin_unlock(ptl); | 261 | spin_unlock(ptl); |
263 | if (page) | 262 | if (page) |
264 | return page; | 263 | return page; |
265 | } | 264 | } |
266 | if (likely(!pmd_trans_huge(pmdval))) | 265 | if (likely(!pmd_trans_huge(pmdval))) |
267 | return follow_page_pte(vma, address, pmd, flags); | 266 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
268 | 267 | ||
269 | if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) | 268 | if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) |
270 | return no_page_table(vma, flags); | 269 | return no_page_table(vma, flags); |
@@ -284,7 +283,7 @@ retry_locked: | |||
284 | } | 283 | } |
285 | if (unlikely(!pmd_trans_huge(*pmd))) { | 284 | if (unlikely(!pmd_trans_huge(*pmd))) { |
286 | spin_unlock(ptl); | 285 | spin_unlock(ptl); |
287 | return follow_page_pte(vma, address, pmd, flags); | 286 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
288 | } | 287 | } |
289 | if (flags & FOLL_SPLIT) { | 288 | if (flags & FOLL_SPLIT) { |
290 | int ret; | 289 | int ret; |
@@ -307,18 +306,18 @@ retry_locked: | |||
307 | } | 306 | } |
308 | 307 | ||
309 | return ret ? ERR_PTR(ret) : | 308 | return ret ? ERR_PTR(ret) : |
310 | follow_page_pte(vma, address, pmd, flags); | 309 | follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
311 | } | 310 | } |
312 | page = follow_trans_huge_pmd(vma, address, pmd, flags); | 311 | page = follow_trans_huge_pmd(vma, address, pmd, flags); |
313 | spin_unlock(ptl); | 312 | spin_unlock(ptl); |
314 | *page_mask = HPAGE_PMD_NR - 1; | 313 | ctx->page_mask = HPAGE_PMD_NR - 1; |
315 | return page; | 314 | return page; |
316 | } | 315 | } |
317 | 316 | ||
318 | |||
319 | static struct page *follow_pud_mask(struct vm_area_struct *vma, | 317 | static struct page *follow_pud_mask(struct vm_area_struct *vma, |
320 | unsigned long address, p4d_t *p4dp, | 318 | unsigned long address, p4d_t *p4dp, |
321 | unsigned int flags, unsigned int *page_mask) | 319 | unsigned int flags, |
320 | struct follow_page_context *ctx) | ||
322 | { | 321 | { |
323 | pud_t *pud; | 322 | pud_t *pud; |
324 | spinlock_t *ptl; | 323 | spinlock_t *ptl; |
@@ -344,7 +343,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma, | |||
344 | } | 343 | } |
345 | if (pud_devmap(*pud)) { | 344 | if (pud_devmap(*pud)) { |
346 | ptl = pud_lock(mm, pud); | 345 | ptl = pud_lock(mm, pud); |
347 | page = follow_devmap_pud(vma, address, pud, flags); | 346 | page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); |
348 | spin_unlock(ptl); | 347 | spin_unlock(ptl); |
349 | if (page) | 348 | if (page) |
350 | return page; | 349 | return page; |
@@ -352,13 +351,13 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma, | |||
352 | if (unlikely(pud_bad(*pud))) | 351 | if (unlikely(pud_bad(*pud))) |
353 | return no_page_table(vma, flags); | 352 | return no_page_table(vma, flags); |
354 | 353 | ||
355 | return follow_pmd_mask(vma, address, pud, flags, page_mask); | 354 | return follow_pmd_mask(vma, address, pud, flags, ctx); |
356 | } | 355 | } |
357 | 356 | ||
358 | |||
359 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, | 357 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, |
360 | unsigned long address, pgd_t *pgdp, | 358 | unsigned long address, pgd_t *pgdp, |
361 | unsigned int flags, unsigned int *page_mask) | 359 | unsigned int flags, |
360 | struct follow_page_context *ctx) | ||
362 | { | 361 | { |
363 | p4d_t *p4d; | 362 | p4d_t *p4d; |
364 | struct page *page; | 363 | struct page *page; |
@@ -378,7 +377,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma, | |||
378 | return page; | 377 | return page; |
379 | return no_page_table(vma, flags); | 378 | return no_page_table(vma, flags); |
380 | } | 379 | } |
381 | return follow_pud_mask(vma, address, p4d, flags, page_mask); | 380 | return follow_pud_mask(vma, address, p4d, flags, ctx); |
382 | } | 381 | } |
383 | 382 | ||
384 | /** | 383 | /** |
@@ -396,13 +395,13 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma, | |||
396 | */ | 395 | */ |
397 | struct page *follow_page_mask(struct vm_area_struct *vma, | 396 | struct page *follow_page_mask(struct vm_area_struct *vma, |
398 | unsigned long address, unsigned int flags, | 397 | unsigned long address, unsigned int flags, |
399 | unsigned int *page_mask) | 398 | struct follow_page_context *ctx) |
400 | { | 399 | { |
401 | pgd_t *pgd; | 400 | pgd_t *pgd; |
402 | struct page *page; | 401 | struct page *page; |
403 | struct mm_struct *mm = vma->vm_mm; | 402 | struct mm_struct *mm = vma->vm_mm; |
404 | 403 | ||
405 | *page_mask = 0; | 404 | ctx->page_mask = 0; |
406 | 405 | ||
407 | /* make this handle hugepd */ | 406 | /* make this handle hugepd */ |
408 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | 407 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); |
@@ -431,7 +430,19 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
431 | return no_page_table(vma, flags); | 430 | return no_page_table(vma, flags); |
432 | } | 431 | } |
433 | 432 | ||
434 | return follow_p4d_mask(vma, address, pgd, flags, page_mask); | 433 | return follow_p4d_mask(vma, address, pgd, flags, ctx); |
434 | } | ||
435 | |||
436 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | ||
437 | unsigned int foll_flags) | ||
438 | { | ||
439 | struct follow_page_context ctx = { NULL }; | ||
440 | struct page *page; | ||
441 | |||
442 | page = follow_page_mask(vma, address, foll_flags, &ctx); | ||
443 | if (ctx.pgmap) | ||
444 | put_dev_pagemap(ctx.pgmap); | ||
445 | return page; | ||
435 | } | 446 | } |
436 | 447 | ||
437 | static int get_gate_page(struct mm_struct *mm, unsigned long address, | 448 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
@@ -659,9 +670,9 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
659 | unsigned int gup_flags, struct page **pages, | 670 | unsigned int gup_flags, struct page **pages, |
660 | struct vm_area_struct **vmas, int *nonblocking) | 671 | struct vm_area_struct **vmas, int *nonblocking) |
661 | { | 672 | { |
662 | long i = 0; | 673 | long ret = 0, i = 0; |
663 | unsigned int page_mask; | ||
664 | struct vm_area_struct *vma = NULL; | 674 | struct vm_area_struct *vma = NULL; |
675 | struct follow_page_context ctx = { NULL }; | ||
665 | 676 | ||
666 | if (!nr_pages) | 677 | if (!nr_pages) |
667 | return 0; | 678 | return 0; |
@@ -691,12 +702,14 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
691 | pages ? &pages[i] : NULL); | 702 | pages ? &pages[i] : NULL); |
692 | if (ret) | 703 | if (ret) |
693 | return i ? : ret; | 704 | return i ? : ret; |
694 | page_mask = 0; | 705 | ctx.page_mask = 0; |
695 | goto next_page; | 706 | goto next_page; |
696 | } | 707 | } |
697 | 708 | ||
698 | if (!vma || check_vma_flags(vma, gup_flags)) | 709 | if (!vma || check_vma_flags(vma, gup_flags)) { |
699 | return i ? : -EFAULT; | 710 | ret = -EFAULT; |
711 | goto out; | ||
712 | } | ||
700 | if (is_vm_hugetlb_page(vma)) { | 713 | if (is_vm_hugetlb_page(vma)) { |
701 | i = follow_hugetlb_page(mm, vma, pages, vmas, | 714 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
702 | &start, &nr_pages, i, | 715 | &start, &nr_pages, i, |
@@ -709,23 +722,26 @@ retry: | |||
709 | * If we have a pending SIGKILL, don't keep faulting pages and | 722 | * If we have a pending SIGKILL, don't keep faulting pages and |
710 | * potentially allocating memory. | 723 | * potentially allocating memory. |
711 | */ | 724 | */ |
712 | if (unlikely(fatal_signal_pending(current))) | 725 | if (unlikely(fatal_signal_pending(current))) { |
713 | return i ? i : -ERESTARTSYS; | 726 | ret = -ERESTARTSYS; |
727 | goto out; | ||
728 | } | ||
714 | cond_resched(); | 729 | cond_resched(); |
715 | page = follow_page_mask(vma, start, foll_flags, &page_mask); | 730 | |
731 | page = follow_page_mask(vma, start, foll_flags, &ctx); | ||
716 | if (!page) { | 732 | if (!page) { |
717 | int ret; | ||
718 | ret = faultin_page(tsk, vma, start, &foll_flags, | 733 | ret = faultin_page(tsk, vma, start, &foll_flags, |
719 | nonblocking); | 734 | nonblocking); |
720 | switch (ret) { | 735 | switch (ret) { |
721 | case 0: | 736 | case 0: |
722 | goto retry; | 737 | goto retry; |
738 | case -EBUSY: | ||
739 | ret = 0; | ||
740 | /* FALLTHRU */ | ||
723 | case -EFAULT: | 741 | case -EFAULT: |
724 | case -ENOMEM: | 742 | case -ENOMEM: |
725 | case -EHWPOISON: | 743 | case -EHWPOISON: |
726 | return i ? i : ret; | 744 | goto out; |
727 | case -EBUSY: | ||
728 | return i; | ||
729 | case -ENOENT: | 745 | case -ENOENT: |
730 | goto next_page; | 746 | goto next_page; |
731 | } | 747 | } |
@@ -737,27 +753,31 @@ retry: | |||
737 | */ | 753 | */ |
738 | goto next_page; | 754 | goto next_page; |
739 | } else if (IS_ERR(page)) { | 755 | } else if (IS_ERR(page)) { |
740 | return i ? i : PTR_ERR(page); | 756 | ret = PTR_ERR(page); |
757 | goto out; | ||
741 | } | 758 | } |
742 | if (pages) { | 759 | if (pages) { |
743 | pages[i] = page; | 760 | pages[i] = page; |
744 | flush_anon_page(vma, page, start); | 761 | flush_anon_page(vma, page, start); |
745 | flush_dcache_page(page); | 762 | flush_dcache_page(page); |
746 | page_mask = 0; | 763 | ctx.page_mask = 0; |
747 | } | 764 | } |
748 | next_page: | 765 | next_page: |
749 | if (vmas) { | 766 | if (vmas) { |
750 | vmas[i] = vma; | 767 | vmas[i] = vma; |
751 | page_mask = 0; | 768 | ctx.page_mask = 0; |
752 | } | 769 | } |
753 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | 770 | page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); |
754 | if (page_increm > nr_pages) | 771 | if (page_increm > nr_pages) |
755 | page_increm = nr_pages; | 772 | page_increm = nr_pages; |
756 | i += page_increm; | 773 | i += page_increm; |
757 | start += page_increm * PAGE_SIZE; | 774 | start += page_increm * PAGE_SIZE; |
758 | nr_pages -= page_increm; | 775 | nr_pages -= page_increm; |
759 | } while (nr_pages); | 776 | } while (nr_pages); |
760 | return i; | 777 | out: |
778 | if (ctx.pgmap) | ||
779 | put_dev_pagemap(ctx.pgmap); | ||
780 | return i ? i : ret; | ||
761 | } | 781 | } |
762 | 782 | ||
763 | static bool vma_permits_fault(struct vm_area_struct *vma, | 783 | static bool vma_permits_fault(struct vm_area_struct *vma, |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8ea1b36bd452..25c7d7509cf4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -852,11 +852,10 @@ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
852 | } | 852 | } |
853 | 853 | ||
854 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | 854 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
855 | pmd_t *pmd, int flags) | 855 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
856 | { | 856 | { |
857 | unsigned long pfn = pmd_pfn(*pmd); | 857 | unsigned long pfn = pmd_pfn(*pmd); |
858 | struct mm_struct *mm = vma->vm_mm; | 858 | struct mm_struct *mm = vma->vm_mm; |
859 | struct dev_pagemap *pgmap; | ||
860 | struct page *page; | 859 | struct page *page; |
861 | 860 | ||
862 | assert_spin_locked(pmd_lockptr(mm, pmd)); | 861 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
@@ -886,12 +885,11 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
886 | return ERR_PTR(-EEXIST); | 885 | return ERR_PTR(-EEXIST); |
887 | 886 | ||
888 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; | 887 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; |
889 | pgmap = get_dev_pagemap(pfn, NULL); | 888 | *pgmap = get_dev_pagemap(pfn, *pgmap); |
890 | if (!pgmap) | 889 | if (!*pgmap) |
891 | return ERR_PTR(-EFAULT); | 890 | return ERR_PTR(-EFAULT); |
892 | page = pfn_to_page(pfn); | 891 | page = pfn_to_page(pfn); |
893 | get_page(page); | 892 | get_page(page); |
894 | put_dev_pagemap(pgmap); | ||
895 | 893 | ||
896 | return page; | 894 | return page; |
897 | } | 895 | } |
@@ -1000,11 +998,10 @@ static void touch_pud(struct vm_area_struct *vma, unsigned long addr, | |||
1000 | } | 998 | } |
1001 | 999 | ||
1002 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | 1000 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
1003 | pud_t *pud, int flags) | 1001 | pud_t *pud, int flags, struct dev_pagemap **pgmap) |
1004 | { | 1002 | { |
1005 | unsigned long pfn = pud_pfn(*pud); | 1003 | unsigned long pfn = pud_pfn(*pud); |
1006 | struct mm_struct *mm = vma->vm_mm; | 1004 | struct mm_struct *mm = vma->vm_mm; |
1007 | struct dev_pagemap *pgmap; | ||
1008 | struct page *page; | 1005 | struct page *page; |
1009 | 1006 | ||
1010 | assert_spin_locked(pud_lockptr(mm, pud)); | 1007 | assert_spin_locked(pud_lockptr(mm, pud)); |
@@ -1028,12 +1025,11 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | |||
1028 | return ERR_PTR(-EEXIST); | 1025 | return ERR_PTR(-EEXIST); |
1029 | 1026 | ||
1030 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; | 1027 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; |
1031 | pgmap = get_dev_pagemap(pfn, NULL); | 1028 | *pgmap = get_dev_pagemap(pfn, *pgmap); |
1032 | if (!pgmap) | 1029 | if (!*pgmap) |
1033 | return ERR_PTR(-EFAULT); | 1030 | return ERR_PTR(-EFAULT); |
1034 | page = pfn_to_page(pfn); | 1031 | page = pfn_to_page(pfn); |
1035 | get_page(page); | 1032 | get_page(page); |
1036 | put_dev_pagemap(pgmap); | ||
1037 | 1033 | ||
1038 | return page; | 1034 | return page; |
1039 | } | 1035 | } |
diff --git a/mm/nommu.c b/mm/nommu.c index e4aac33216ae..749276beb109 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1709,11 +1709,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
1709 | return ret; | 1709 | return ret; |
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | struct page *follow_page_mask(struct vm_area_struct *vma, | 1712 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
1713 | unsigned long address, unsigned int flags, | 1713 | unsigned int foll_flags) |
1714 | unsigned int *page_mask) | ||
1715 | { | 1714 | { |
1716 | *page_mask = 0; | ||
1717 | return NULL; | 1715 | return NULL; |
1718 | } | 1716 | } |
1719 | 1717 | ||