aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h33
1 files changed, 33 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d87a5a5fe87d..f3fd70d6029f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -833,6 +833,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
833 struct vm_area_struct **pprev, unsigned long start, 833 struct vm_area_struct **pprev, unsigned long start,
834 unsigned long end, unsigned long newflags); 834 unsigned long end, unsigned long newflags);
835 835
836#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
837/*
838 * get_user_pages_fast provides equivalent functionality to get_user_pages,
839 * operating on current and current->mm (force=0 and doesn't return any vmas).
840 *
841 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
842 * can be made about locking. get_user_pages_fast is to be implemented in a
843 * way that is advantageous (vs get_user_pages()) when the user memory area is
844 * already faulted in and present in ptes. However if the pages have to be
845 * faulted in, it may turn out to be slightly slower).
846 */
847int get_user_pages_fast(unsigned long start, int nr_pages, int write,
848 struct page **pages);
849
850#else
851/*
852 * Should probably be moved to asm-generic, and architectures can include it if
853 * they don't implement their own get_user_pages_fast.
854 */
855#define get_user_pages_fast(start, nr_pages, write, pages) \
856({ \
857 struct mm_struct *mm = current->mm; \
858 int ret; \
859 \
860 down_read(&mm->mmap_sem); \
861 ret = get_user_pages(current, mm, start, nr_pages, \
862 write, 0, pages, NULL); \
863 up_read(&mm->mmap_sem); \
864 \
865 ret; \
866})
867#endif
868
836/* 869/*
837 * A callback you can register to apply pressure to ageable caches. 870 * A callback you can register to apply pressure to ageable caches.
838 * 871 *