aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h37
1 files changed, 36 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d87a5a5fe87d..866a3dbe5c75 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -810,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
810 810
811int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 811int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
812 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 812 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
813void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
814 813
815extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 814extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
816extern void do_invalidatepage(struct page *page, unsigned long offset); 815extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -833,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
833 struct vm_area_struct **pprev, unsigned long start, 832 struct vm_area_struct **pprev, unsigned long start,
834 unsigned long end, unsigned long newflags); 833 unsigned long end, unsigned long newflags);
835 834
835#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
836/*
837 * get_user_pages_fast provides equivalent functionality to get_user_pages,
838 * operating on current and current->mm (force=0 and doesn't return any vmas).
839 *
840 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
841 * can be made about locking. get_user_pages_fast is to be implemented in a
842 * way that is advantageous (vs get_user_pages()) when the user memory area is
843 * already faulted in and present in ptes. However if the pages have to be
844 * faulted in, it may turn out to be slightly slower).
845 */
846int get_user_pages_fast(unsigned long start, int nr_pages, int write,
847 struct page **pages);
848
849#else
850/*
851 * Should probably be moved to asm-generic, and architectures can include it if
852 * they don't implement their own get_user_pages_fast.
853 */
854#define get_user_pages_fast(start, nr_pages, write, pages) \
855({ \
856 struct mm_struct *mm = current->mm; \
857 int ret; \
858 \
859 down_read(&mm->mmap_sem); \
860 ret = get_user_pages(current, mm, start, nr_pages, \
861 write, 0, pages, NULL); \
862 up_read(&mm->mmap_sem); \
863 \
864 ret; \
865})
866#endif
867
836/* 868/*
837 * A callback you can register to apply pressure to ageable caches. 869 * A callback you can register to apply pressure to ageable caches.
838 * 870 *
@@ -1072,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1072 unsigned long addr, unsigned long len, pgoff_t pgoff); 1104 unsigned long addr, unsigned long len, pgoff_t pgoff);
1073extern void exit_mmap(struct mm_struct *); 1105extern void exit_mmap(struct mm_struct *);
1074 1106
1107extern int mm_take_all_locks(struct mm_struct *mm);
1108extern void mm_drop_all_locks(struct mm_struct *mm);
1109
1075#ifdef CONFIG_PROC_FS 1110#ifdef CONFIG_PROC_FS
1076/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ 1111/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1077extern void added_exe_file_vma(struct mm_struct *mm); 1112extern void added_exe_file_vma(struct mm_struct *mm);