aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 07:06:17 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-17 07:06:17 -0400
commita3d06cc6aa3e765dc2bf98626f87272dcf641dca (patch)
treeaa3e49b58f08d6c0ea55cdca4fb5e6c8ba6ae333 /include/linux/mm.h
parent0990b1c65729012a63e0eeca93aaaafea4e9a064 (diff)
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
Merge branch 'linus' into perfcounters/core
Conflicts: arch/x86/include/asm/kmap_types.h include/linux/mm.h include/asm-generic/kmap_types.h Merge reason: We crossed changes with kmap_types.h cleanups in mainline. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h33
1 files changed, 13 insertions, 20 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b457bc047ab1..cf260d848eb9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -7,7 +7,6 @@
7 7
8#include <linux/gfp.h> 8#include <linux/gfp.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/mmdebug.h>
11#include <linux/mmzone.h> 10#include <linux/mmzone.h>
12#include <linux/rbtree.h> 11#include <linux/rbtree.h>
13#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
@@ -725,7 +724,7 @@ static inline int shmem_lock(struct file *file, int lock,
725 return 0; 724 return 0;
726} 725}
727#endif 726#endif
728struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 727struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
729 728
730int shmem_zero_setup(struct vm_area_struct *); 729int shmem_zero_setup(struct vm_area_struct *);
731 730
@@ -793,6 +792,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
793 struct vm_area_struct *vma); 792 struct vm_area_struct *vma);
794void unmap_mapping_range(struct address_space *mapping, 793void unmap_mapping_range(struct address_space *mapping,
795 loff_t const holebegin, loff_t const holelen, int even_cows); 794 loff_t const holebegin, loff_t const holelen, int even_cows);
795int follow_pfn(struct vm_area_struct *vma, unsigned long address,
796 unsigned long *pfn);
796int follow_phys(struct vm_area_struct *vma, unsigned long address, 797int follow_phys(struct vm_area_struct *vma, unsigned long address,
797 unsigned int flags, unsigned long *prot, resource_size_t *phys); 798 unsigned int flags, unsigned long *prot, resource_size_t *phys);
798int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 799int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -824,8 +825,11 @@ static inline int handle_mm_fault(struct mm_struct *mm,
824extern int make_pages_present(unsigned long addr, unsigned long end); 825extern int make_pages_present(unsigned long addr, unsigned long end);
825extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 826extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
826 827
827int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 828int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
828 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 829 unsigned long start, int len, int write, int force,
830 struct page **pages, struct vm_area_struct **vmas);
831int get_user_pages_fast(unsigned long start, int nr_pages, int write,
832 struct page **pages);
829 833
830extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 834extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
831extern void do_invalidatepage(struct page *page, unsigned long offset); 835extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -850,19 +854,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
850 unsigned long end, unsigned long newflags); 854 unsigned long end, unsigned long newflags);
851 855
852/* 856/*
853 * get_user_pages_fast provides equivalent functionality to get_user_pages,
854 * operating on current and current->mm (force=0 and doesn't return any vmas).
855 *
856 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
857 * can be made about locking. get_user_pages_fast is to be implemented in a
858 * way that is advantageous (vs get_user_pages()) when the user memory area is
859 * already faulted in and present in ptes. However if the pages have to be
860 * faulted in, it may turn out to be slightly slower).
861 */
862int get_user_pages_fast(unsigned long start, int nr_pages, int write,
863 struct page **pages);
864
865/*
866 * doesn't attempt to fault and will return short. 857 * doesn't attempt to fault and will return short.
867 */ 858 */
868int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 859int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -1067,7 +1058,8 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1067extern void set_dma_reserve(unsigned long new_dma_reserve); 1058extern void set_dma_reserve(unsigned long new_dma_reserve);
1068extern void memmap_init_zone(unsigned long, int, unsigned long, 1059extern void memmap_init_zone(unsigned long, int, unsigned long,
1069 unsigned long, enum memmap_context); 1060 unsigned long, enum memmap_context);
1070extern void setup_per_zone_pages_min(void); 1061extern void setup_per_zone_wmarks(void);
1062extern void calculate_zone_inactive_ratio(struct zone *zone);
1071extern void mem_init(void); 1063extern void mem_init(void);
1072extern void __init mmap_init(void); 1064extern void __init mmap_init(void);
1073extern void show_mem(void); 1065extern void show_mem(void);
@@ -1184,8 +1176,6 @@ void task_dirty_inc(struct task_struct *tsk);
1184#define VM_MAX_READAHEAD 128 /* kbytes */ 1176#define VM_MAX_READAHEAD 128 /* kbytes */
1185#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1177#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1186 1178
1187int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1188 pgoff_t offset, unsigned long nr_to_read);
1189int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1179int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1190 pgoff_t offset, unsigned long nr_to_read); 1180 pgoff_t offset, unsigned long nr_to_read);
1191 1181
@@ -1203,6 +1193,9 @@ void page_cache_async_readahead(struct address_space *mapping,
1203 unsigned long size); 1193 unsigned long size);
1204 1194
1205unsigned long max_sane_readahead(unsigned long nr); 1195unsigned long max_sane_readahead(unsigned long nr);
1196unsigned long ra_submit(struct file_ra_state *ra,
1197 struct address_space *mapping,
1198 struct file *filp);
1206 1199
1207/* Do stack extension */ 1200/* Do stack extension */
1208extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1201extern int expand_stack(struct vm_area_struct *vma, unsigned long address);