diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 34 |
1 files changed, 33 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index d87a5a5fe87d..6e695eaab4ce 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -810,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * | |||
| 810 | 810 | ||
| 811 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 811 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
| 812 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 812 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
| 813 | void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); | ||
| 814 | 813 | ||
| 815 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 814 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
| 816 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 815 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
| @@ -833,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
| 833 | struct vm_area_struct **pprev, unsigned long start, | 832 | struct vm_area_struct **pprev, unsigned long start, |
| 834 | unsigned long end, unsigned long newflags); | 833 | unsigned long end, unsigned long newflags); |
| 835 | 834 | ||
| 835 | #ifdef CONFIG_HAVE_GET_USER_PAGES_FAST | ||
| 836 | /* | ||
| 837 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | ||
| 838 | * operating on current and current->mm (force=0 and doesn't return any vmas). | ||
| 839 | * | ||
| 840 | * get_user_pages_fast may take mmap_sem and page tables, so no assumptions | ||
| 841 | * can be made about locking. get_user_pages_fast is to be implemented in a | ||
| 842 | * way that is advantageous (vs get_user_pages()) when the user memory area is | ||
| 843 | * already faulted in and present in ptes. However if the pages have to be | ||
| 844 | * faulted in, it may turn out to be slightly slower). | ||
| 845 | */ | ||
| 846 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
| 847 | struct page **pages); | ||
| 848 | |||
| 849 | #else | ||
| 850 | /* | ||
| 851 | * Should probably be moved to asm-generic, and architectures can include it if | ||
| 852 | * they don't implement their own get_user_pages_fast. | ||
| 853 | */ | ||
| 854 | #define get_user_pages_fast(start, nr_pages, write, pages) \ | ||
| 855 | ({ \ | ||
| 856 | struct mm_struct *mm = current->mm; \ | ||
| 857 | int ret; \ | ||
| 858 | \ | ||
| 859 | down_read(&mm->mmap_sem); \ | ||
| 860 | ret = get_user_pages(current, mm, start, nr_pages, \ | ||
| 861 | write, 0, pages, NULL); \ | ||
| 862 | up_read(&mm->mmap_sem); \ | ||
| 863 | \ | ||
| 864 | ret; \ | ||
| 865 | }) | ||
| 866 | #endif | ||
| 867 | |||
| 836 | /* | 868 | /* |
| 837 | * A callback you can register to apply pressure to ageable caches. | 869 | * A callback you can register to apply pressure to ageable caches. |
| 838 | * | 870 | * |
