diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2015-04-14 18:44:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:00 -0400 |
commit | acc3c8d15eed6b68c7edf5bfaea884753aaa8e85 (patch) | |
tree | dc5b43408fd72cfd45ac82d1af7c99af7e4e3466 /mm/gup.c | |
parent | c561259ca79a88be540a75e84b45d49123014aa4 (diff) |
mm: move mm_populate()-related code to mm/gup.c
It's odd that we have populate_vma_page_range() and __mm_populate() in
mm/mlock.c. It's implementation of generic memory population and mlocking
is one of possible side effect, if VM_LOCKED is set.
__get_user_pages() is core of the implementation. Let's move the code
into mm/gup.c.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 118 |
1 files changed, 118 insertions, 0 deletions
@@ -819,6 +819,124 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
819 | EXPORT_SYMBOL(get_user_pages); | 819 | EXPORT_SYMBOL(get_user_pages); |
820 | 820 | ||
821 | /** | 821 | /** |
822 | * populate_vma_page_range() - populate a range of pages in the vma. | ||
823 | * @vma: target vma | ||
824 | * @start: start address | ||
825 | * @end: end address | ||
826 | * @nonblocking: | ||
827 | * | ||
828 | * This takes care of mlocking the pages too if VM_LOCKED is set. | ||
829 | * | ||
830 | * return 0 on success, negative error code on error. | ||
831 | * | ||
832 | * vma->vm_mm->mmap_sem must be held. | ||
833 | * | ||
834 | * If @nonblocking is NULL, it may be held for read or write and will | ||
835 | * be unperturbed. | ||
836 | * | ||
837 | * If @nonblocking is non-NULL, it must held for read only and may be | ||
838 | * released. If it's released, *@nonblocking will be set to 0. | ||
839 | */ | ||
840 | long populate_vma_page_range(struct vm_area_struct *vma, | ||
841 | unsigned long start, unsigned long end, int *nonblocking) | ||
842 | { | ||
843 | struct mm_struct *mm = vma->vm_mm; | ||
844 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | ||
845 | int gup_flags; | ||
846 | |||
847 | VM_BUG_ON(start & ~PAGE_MASK); | ||
848 | VM_BUG_ON(end & ~PAGE_MASK); | ||
849 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | ||
850 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | ||
851 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | ||
852 | |||
853 | gup_flags = FOLL_TOUCH | FOLL_POPULATE; | ||
854 | /* | ||
855 | * We want to touch writable mappings with a write fault in order | ||
856 | * to break COW, except for shared mappings because these don't COW | ||
857 | * and we would not want to dirty them for nothing. | ||
858 | */ | ||
859 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | ||
860 | gup_flags |= FOLL_WRITE; | ||
861 | |||
862 | /* | ||
863 | * We want mlock to succeed for regions that have any permissions | ||
864 | * other than PROT_NONE. | ||
865 | */ | ||
866 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | ||
867 | gup_flags |= FOLL_FORCE; | ||
868 | |||
869 | /* | ||
870 | * We made sure addr is within a VMA, so the following will | ||
871 | * not result in a stack expansion that recurses back here. | ||
872 | */ | ||
873 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | ||
874 | NULL, NULL, nonblocking); | ||
875 | } | ||
876 | |||
877 | /* | ||
878 | * __mm_populate - populate and/or mlock pages within a range of address space. | ||
879 | * | ||
880 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | ||
881 | * flags. VMAs must be already marked with the desired vm_flags, and | ||
882 | * mmap_sem must not be held. | ||
883 | */ | ||
884 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | ||
885 | { | ||
886 | struct mm_struct *mm = current->mm; | ||
887 | unsigned long end, nstart, nend; | ||
888 | struct vm_area_struct *vma = NULL; | ||
889 | int locked = 0; | ||
890 | long ret = 0; | ||
891 | |||
892 | VM_BUG_ON(start & ~PAGE_MASK); | ||
893 | VM_BUG_ON(len != PAGE_ALIGN(len)); | ||
894 | end = start + len; | ||
895 | |||
896 | for (nstart = start; nstart < end; nstart = nend) { | ||
897 | /* | ||
898 | * We want to fault in pages for [nstart; end) address range. | ||
899 | * Find first corresponding VMA. | ||
900 | */ | ||
901 | if (!locked) { | ||
902 | locked = 1; | ||
903 | down_read(&mm->mmap_sem); | ||
904 | vma = find_vma(mm, nstart); | ||
905 | } else if (nstart >= vma->vm_end) | ||
906 | vma = vma->vm_next; | ||
907 | if (!vma || vma->vm_start >= end) | ||
908 | break; | ||
909 | /* | ||
910 | * Set [nstart; nend) to intersection of desired address | ||
911 | * range with the first VMA. Also, skip undesirable VMA types. | ||
912 | */ | ||
913 | nend = min(end, vma->vm_end); | ||
914 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | ||
915 | continue; | ||
916 | if (nstart < vma->vm_start) | ||
917 | nstart = vma->vm_start; | ||
918 | /* | ||
919 | * Now fault in a range of pages. populate_vma_page_range() | ||
920 | * double checks the vma flags, so that it won't mlock pages | ||
921 | * if the vma was already munlocked. | ||
922 | */ | ||
923 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | ||
924 | if (ret < 0) { | ||
925 | if (ignore_errors) { | ||
926 | ret = 0; | ||
927 | continue; /* continue at next VMA */ | ||
928 | } | ||
929 | break; | ||
930 | } | ||
931 | nend = nstart + ret * PAGE_SIZE; | ||
932 | ret = 0; | ||
933 | } | ||
934 | if (locked) | ||
935 | up_read(&mm->mmap_sem); | ||
936 | return ret; /* 0 or negative error code */ | ||
937 | } | ||
938 | |||
939 | /** | ||
822 | * get_dump_page() - pin user page in memory while writing it to core dump | 940 | * get_dump_page() - pin user page in memory while writing it to core dump |
823 | * @addr: user address | 941 | * @addr: user address |
824 | * | 942 | * |