diff options
-rw-r--r-- | mm/gup.c | 118 | ||||
-rw-r--r-- | mm/mlock.c | 118 |
2 files changed, 118 insertions, 118 deletions
@@ -819,6 +819,124 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
819 | EXPORT_SYMBOL(get_user_pages); | 819 | EXPORT_SYMBOL(get_user_pages); |
820 | 820 | ||
821 | /** | 821 | /** |
822 | * populate_vma_page_range() - populate a range of pages in the vma. | ||
823 | * @vma: target vma | ||
824 | * @start: start address | ||
825 | * @end: end address | ||
826 | * @nonblocking: | ||
827 | * | ||
828 | * This takes care of mlocking the pages too if VM_LOCKED is set. | ||
829 | * | ||
830 | * return 0 on success, negative error code on error. | ||
831 | * | ||
832 | * vma->vm_mm->mmap_sem must be held. | ||
833 | * | ||
834 | * If @nonblocking is NULL, it may be held for read or write and will | ||
835 | * be unperturbed. | ||
836 | * | ||
837 | * If @nonblocking is non-NULL, it must held for read only and may be | ||
838 | * released. If it's released, *@nonblocking will be set to 0. | ||
839 | */ | ||
840 | long populate_vma_page_range(struct vm_area_struct *vma, | ||
841 | unsigned long start, unsigned long end, int *nonblocking) | ||
842 | { | ||
843 | struct mm_struct *mm = vma->vm_mm; | ||
844 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | ||
845 | int gup_flags; | ||
846 | |||
847 | VM_BUG_ON(start & ~PAGE_MASK); | ||
848 | VM_BUG_ON(end & ~PAGE_MASK); | ||
849 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | ||
850 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | ||
851 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | ||
852 | |||
853 | gup_flags = FOLL_TOUCH | FOLL_POPULATE; | ||
854 | /* | ||
855 | * We want to touch writable mappings with a write fault in order | ||
856 | * to break COW, except for shared mappings because these don't COW | ||
857 | * and we would not want to dirty them for nothing. | ||
858 | */ | ||
859 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | ||
860 | gup_flags |= FOLL_WRITE; | ||
861 | |||
862 | /* | ||
863 | * We want mlock to succeed for regions that have any permissions | ||
864 | * other than PROT_NONE. | ||
865 | */ | ||
866 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | ||
867 | gup_flags |= FOLL_FORCE; | ||
868 | |||
869 | /* | ||
870 | * We made sure addr is within a VMA, so the following will | ||
871 | * not result in a stack expansion that recurses back here. | ||
872 | */ | ||
873 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | ||
874 | NULL, NULL, nonblocking); | ||
875 | } | ||
876 | |||
877 | /* | ||
878 | * __mm_populate - populate and/or mlock pages within a range of address space. | ||
879 | * | ||
880 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | ||
881 | * flags. VMAs must be already marked with the desired vm_flags, and | ||
882 | * mmap_sem must not be held. | ||
883 | */ | ||
884 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | ||
885 | { | ||
886 | struct mm_struct *mm = current->mm; | ||
887 | unsigned long end, nstart, nend; | ||
888 | struct vm_area_struct *vma = NULL; | ||
889 | int locked = 0; | ||
890 | long ret = 0; | ||
891 | |||
892 | VM_BUG_ON(start & ~PAGE_MASK); | ||
893 | VM_BUG_ON(len != PAGE_ALIGN(len)); | ||
894 | end = start + len; | ||
895 | |||
896 | for (nstart = start; nstart < end; nstart = nend) { | ||
897 | /* | ||
898 | * We want to fault in pages for [nstart; end) address range. | ||
899 | * Find first corresponding VMA. | ||
900 | */ | ||
901 | if (!locked) { | ||
902 | locked = 1; | ||
903 | down_read(&mm->mmap_sem); | ||
904 | vma = find_vma(mm, nstart); | ||
905 | } else if (nstart >= vma->vm_end) | ||
906 | vma = vma->vm_next; | ||
907 | if (!vma || vma->vm_start >= end) | ||
908 | break; | ||
909 | /* | ||
910 | * Set [nstart; nend) to intersection of desired address | ||
911 | * range with the first VMA. Also, skip undesirable VMA types. | ||
912 | */ | ||
913 | nend = min(end, vma->vm_end); | ||
914 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | ||
915 | continue; | ||
916 | if (nstart < vma->vm_start) | ||
917 | nstart = vma->vm_start; | ||
918 | /* | ||
919 | * Now fault in a range of pages. populate_vma_page_range() | ||
920 | * double checks the vma flags, so that it won't mlock pages | ||
921 | * if the vma was already munlocked. | ||
922 | */ | ||
923 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | ||
924 | if (ret < 0) { | ||
925 | if (ignore_errors) { | ||
926 | ret = 0; | ||
927 | continue; /* continue at next VMA */ | ||
928 | } | ||
929 | break; | ||
930 | } | ||
931 | nend = nstart + ret * PAGE_SIZE; | ||
932 | ret = 0; | ||
933 | } | ||
934 | if (locked) | ||
935 | up_read(&mm->mmap_sem); | ||
936 | return ret; /* 0 or negative error code */ | ||
937 | } | ||
938 | |||
939 | /** | ||
822 | * get_dump_page() - pin user page in memory while writing it to core dump | 940 | * get_dump_page() - pin user page in memory while writing it to core dump |
823 | * @addr: user address | 941 | * @addr: user address |
824 | * | 942 | * |
diff --git a/mm/mlock.c b/mm/mlock.c index 0214263fca45..6fd2cf15e868 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -205,62 +205,6 @@ out: | |||
205 | return nr_pages - 1; | 205 | return nr_pages - 1; |
206 | } | 206 | } |
207 | 207 | ||
208 | /** | ||
209 | * populate_vma_page_range() - populate a range of pages in the vma. | ||
210 | * @vma: target vma | ||
211 | * @start: start address | ||
212 | * @end: end address | ||
213 | * @nonblocking: | ||
214 | * | ||
215 | * This takes care of mlocking the pages too if VM_LOCKED is set. | ||
216 | * | ||
217 | * return 0 on success, negative error code on error. | ||
218 | * | ||
219 | * vma->vm_mm->mmap_sem must be held. | ||
220 | * | ||
221 | * If @nonblocking is NULL, it may be held for read or write and will | ||
222 | * be unperturbed. | ||
223 | * | ||
224 | * If @nonblocking is non-NULL, it must held for read only and may be | ||
225 | * released. If it's released, *@nonblocking will be set to 0. | ||
226 | */ | ||
227 | long populate_vma_page_range(struct vm_area_struct *vma, | ||
228 | unsigned long start, unsigned long end, int *nonblocking) | ||
229 | { | ||
230 | struct mm_struct *mm = vma->vm_mm; | ||
231 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | ||
232 | int gup_flags; | ||
233 | |||
234 | VM_BUG_ON(start & ~PAGE_MASK); | ||
235 | VM_BUG_ON(end & ~PAGE_MASK); | ||
236 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | ||
237 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | ||
238 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | ||
239 | |||
240 | gup_flags = FOLL_TOUCH | FOLL_POPULATE; | ||
241 | /* | ||
242 | * We want to touch writable mappings with a write fault in order | ||
243 | * to break COW, except for shared mappings because these don't COW | ||
244 | * and we would not want to dirty them for nothing. | ||
245 | */ | ||
246 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | ||
247 | gup_flags |= FOLL_WRITE; | ||
248 | |||
249 | /* | ||
250 | * We want mlock to succeed for regions that have any permissions | ||
251 | * other than PROT_NONE. | ||
252 | */ | ||
253 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | ||
254 | gup_flags |= FOLL_FORCE; | ||
255 | |||
256 | /* | ||
257 | * We made sure addr is within a VMA, so the following will | ||
258 | * not result in a stack expansion that recurses back here. | ||
259 | */ | ||
260 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | ||
261 | NULL, NULL, nonblocking); | ||
262 | } | ||
263 | |||
264 | /* | 208 | /* |
265 | * convert get_user_pages() return value to posix mlock() error | 209 | * convert get_user_pages() return value to posix mlock() error |
266 | */ | 210 | */ |
@@ -660,68 +604,6 @@ static int do_mlock(unsigned long start, size_t len, int on) | |||
660 | return error; | 604 | return error; |
661 | } | 605 | } |
662 | 606 | ||
663 | /* | ||
664 | * __mm_populate - populate and/or mlock pages within a range of address space. | ||
665 | * | ||
666 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | ||
667 | * flags. VMAs must be already marked with the desired vm_flags, and | ||
668 | * mmap_sem must not be held. | ||
669 | */ | ||
670 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | ||
671 | { | ||
672 | struct mm_struct *mm = current->mm; | ||
673 | unsigned long end, nstart, nend; | ||
674 | struct vm_area_struct *vma = NULL; | ||
675 | int locked = 0; | ||
676 | long ret = 0; | ||
677 | |||
678 | VM_BUG_ON(start & ~PAGE_MASK); | ||
679 | VM_BUG_ON(len != PAGE_ALIGN(len)); | ||
680 | end = start + len; | ||
681 | |||
682 | for (nstart = start; nstart < end; nstart = nend) { | ||
683 | /* | ||
684 | * We want to fault in pages for [nstart; end) address range. | ||
685 | * Find first corresponding VMA. | ||
686 | */ | ||
687 | if (!locked) { | ||
688 | locked = 1; | ||
689 | down_read(&mm->mmap_sem); | ||
690 | vma = find_vma(mm, nstart); | ||
691 | } else if (nstart >= vma->vm_end) | ||
692 | vma = vma->vm_next; | ||
693 | if (!vma || vma->vm_start >= end) | ||
694 | break; | ||
695 | /* | ||
696 | * Set [nstart; nend) to intersection of desired address | ||
697 | * range with the first VMA. Also, skip undesirable VMA types. | ||
698 | */ | ||
699 | nend = min(end, vma->vm_end); | ||
700 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | ||
701 | continue; | ||
702 | if (nstart < vma->vm_start) | ||
703 | nstart = vma->vm_start; | ||
704 | /* | ||
705 | * Now fault in a range of pages. populate_vma_page_range() | ||
706 | * double checks the vma flags, so that it won't mlock pages | ||
707 | * if the vma was already munlocked. | ||
708 | */ | ||
709 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | ||
710 | if (ret < 0) { | ||
711 | if (ignore_errors) { | ||
712 | ret = 0; | ||
713 | continue; /* continue at next VMA */ | ||
714 | } | ||
715 | break; | ||
716 | } | ||
717 | nend = nstart + ret * PAGE_SIZE; | ||
718 | ret = 0; | ||
719 | } | ||
720 | if (locked) | ||
721 | up_read(&mm->mmap_sem); | ||
722 | return ret; /* 0 or negative error code */ | ||
723 | } | ||
724 | |||
725 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) | 607 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) |
726 | { | 608 | { |
727 | unsigned long locked; | 609 | unsigned long locked; |