aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h48
1 files changed, 22 insertions, 26 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bff1f0d475c7..d006e93d5c93 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -7,7 +7,6 @@
7 7
8#include <linux/gfp.h> 8#include <linux/gfp.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/mmdebug.h>
11#include <linux/mmzone.h> 10#include <linux/mmzone.h>
12#include <linux/rbtree.h> 11#include <linux/rbtree.h>
13#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
@@ -19,6 +18,7 @@ struct anon_vma;
19struct file_ra_state; 18struct file_ra_state;
20struct user_struct; 19struct user_struct;
21struct writeback_control; 20struct writeback_control;
21struct rlimit;
22 22
23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
24extern unsigned long max_mapnr; 24extern unsigned long max_mapnr;
@@ -580,12 +580,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
580 */ 580 */
581static inline unsigned long round_hint_to_min(unsigned long hint) 581static inline unsigned long round_hint_to_min(unsigned long hint)
582{ 582{
583#ifdef CONFIG_SECURITY
584 hint &= PAGE_MASK; 583 hint &= PAGE_MASK;
585 if (((void *)hint != NULL) && 584 if (((void *)hint != NULL) &&
586 (hint < mmap_min_addr)) 585 (hint < mmap_min_addr))
587 return PAGE_ALIGN(mmap_min_addr); 586 return PAGE_ALIGN(mmap_min_addr);
588#endif
589 return hint; 587 return hint;
590} 588}
591 589
@@ -726,7 +724,7 @@ static inline int shmem_lock(struct file *file, int lock,
726 return 0; 724 return 0;
727} 725}
728#endif 726#endif
729struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 727struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
730 728
731int shmem_zero_setup(struct vm_area_struct *); 729int shmem_zero_setup(struct vm_area_struct *);
732 730
@@ -794,6 +792,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
794 struct vm_area_struct *vma); 792 struct vm_area_struct *vma);
795void unmap_mapping_range(struct address_space *mapping, 793void unmap_mapping_range(struct address_space *mapping,
796 loff_t const holebegin, loff_t const holelen, int even_cows); 794 loff_t const holebegin, loff_t const holelen, int even_cows);
795int follow_pfn(struct vm_area_struct *vma, unsigned long address,
796 unsigned long *pfn);
797int follow_phys(struct vm_area_struct *vma, unsigned long address, 797int follow_phys(struct vm_area_struct *vma, unsigned long address,
798 unsigned int flags, unsigned long *prot, resource_size_t *phys); 798 unsigned int flags, unsigned long *prot, resource_size_t *phys);
799int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 799int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
810 810
811#ifdef CONFIG_MMU 811#ifdef CONFIG_MMU
812extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 812extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
813 unsigned long address, int write_access); 813 unsigned long address, unsigned int flags);
814#else 814#else
815static inline int handle_mm_fault(struct mm_struct *mm, 815static inline int handle_mm_fault(struct mm_struct *mm,
816 struct vm_area_struct *vma, unsigned long address, 816 struct vm_area_struct *vma, unsigned long address,
817 int write_access) 817 unsigned int flags)
818{ 818{
819 /* should never happen if there's no MMU */ 819 /* should never happen if there's no MMU */
820 BUG(); 820 BUG();
@@ -825,8 +825,11 @@ static inline int handle_mm_fault(struct mm_struct *mm,
825extern int make_pages_present(unsigned long addr, unsigned long end); 825extern int make_pages_present(unsigned long addr, unsigned long end);
826extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 826extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
827 827
828int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 828int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
829 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 829 unsigned long start, int len, int write, int force,
830 struct page **pages, struct vm_area_struct **vmas);
831int get_user_pages_fast(unsigned long start, int nr_pages, int write,
832 struct page **pages);
830 833
831extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 834extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
832extern void do_invalidatepage(struct page *page, unsigned long offset); 835extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -851,17 +854,10 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
851 unsigned long end, unsigned long newflags); 854 unsigned long end, unsigned long newflags);
852 855
853/* 856/*
854 * get_user_pages_fast provides equivalent functionality to get_user_pages, 857 * doesn't attempt to fault and will return short.
855 * operating on current and current->mm (force=0 and doesn't return any vmas).
856 *
857 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
858 * can be made about locking. get_user_pages_fast is to be implemented in a
859 * way that is advantageous (vs get_user_pages()) when the user memory area is
860 * already faulted in and present in ptes. However if the pages have to be
861 * faulted in, it may turn out to be slightly slower).
862 */ 858 */
863int get_user_pages_fast(unsigned long start, int nr_pages, int write, 859int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
864 struct page **pages); 860 struct page **pages);
865 861
866/* 862/*
867 * A callback you can register to apply pressure to ageable caches. 863 * A callback you can register to apply pressure to ageable caches.
@@ -1031,8 +1027,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1031 unsigned long end_pfn); 1027 unsigned long end_pfn);
1032extern void remove_active_range(unsigned int nid, unsigned long start_pfn, 1028extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1033 unsigned long end_pfn); 1029 unsigned long end_pfn);
1034extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
1035 unsigned long end_pfn);
1036extern void remove_all_active_ranges(void); 1030extern void remove_all_active_ranges(void);
1037extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1031extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1038 unsigned long end_pfn); 1032 unsigned long end_pfn);
@@ -1064,7 +1058,8 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1064extern void set_dma_reserve(unsigned long new_dma_reserve); 1058extern void set_dma_reserve(unsigned long new_dma_reserve);
1065extern void memmap_init_zone(unsigned long, int, unsigned long, 1059extern void memmap_init_zone(unsigned long, int, unsigned long,
1066 unsigned long, enum memmap_context); 1060 unsigned long, enum memmap_context);
1067extern void setup_per_zone_pages_min(void); 1061extern void setup_per_zone_wmarks(void);
1062extern void calculate_zone_inactive_ratio(struct zone *zone);
1068extern void mem_init(void); 1063extern void mem_init(void);
1069extern void __init mmap_init(void); 1064extern void __init mmap_init(void);
1070extern void show_mem(void); 1065extern void show_mem(void);
@@ -1181,8 +1176,6 @@ void task_dirty_inc(struct task_struct *tsk);
1181#define VM_MAX_READAHEAD 128 /* kbytes */ 1176#define VM_MAX_READAHEAD 128 /* kbytes */
1182#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1177#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1183 1178
1184int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1185 pgoff_t offset, unsigned long nr_to_read);
1186int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1179int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1187 pgoff_t offset, unsigned long nr_to_read); 1180 pgoff_t offset, unsigned long nr_to_read);
1188 1181
@@ -1200,6 +1193,9 @@ void page_cache_async_readahead(struct address_space *mapping,
1200 unsigned long size); 1193 unsigned long size);
1201 1194
1202unsigned long max_sane_readahead(unsigned long nr); 1195unsigned long max_sane_readahead(unsigned long nr);
1196unsigned long ra_submit(struct file_ra_state *ra,
1197 struct address_space *mapping,
1198 struct file *filp);
1203 1199
1204/* Do stack extension */ 1200/* Do stack extension */
1205extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1201extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -1319,8 +1315,8 @@ int vmemmap_populate_basepages(struct page *start_page,
1319int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1315int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1320void vmemmap_populate_print_last(void); 1316void vmemmap_populate_print_last(void);
1321 1317
1322extern void *alloc_locked_buffer(size_t size); 1318extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1323extern void free_locked_buffer(void *buffer, size_t size); 1319 size_t size);
1324extern void release_locked_buffer(void *buffer, size_t size); 1320extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1325#endif /* __KERNEL__ */ 1321#endif /* __KERNEL__ */
1326#endif /* _LINUX_MM_H */ 1322#endif /* _LINUX_MM_H */