diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 75 |
1 files changed, 57 insertions, 18 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 586a943cab01..866a3dbe5c75 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr; | |||
41 | 41 | ||
42 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) | 42 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) |
43 | 43 | ||
44 | /* to align the pointer to the (next) page boundary */ | ||
45 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) | ||
46 | |||
44 | /* | 47 | /* |
45 | * Linux kernel virtual memory manager primitives. | 48 | * Linux kernel virtual memory manager primitives. |
46 | * The idea being to have a "virtual" mm in the same way | 49 | * The idea being to have a "virtual" mm in the same way |
@@ -100,6 +103,7 @@ extern unsigned int kobjsize(const void *objp); | |||
100 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ | 103 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
101 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ | 104 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ |
102 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ | 105 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
106 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ | ||
103 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 107 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
104 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 108 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
105 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ | 109 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ |
@@ -108,6 +112,7 @@ extern unsigned int kobjsize(const void *objp); | |||
108 | 112 | ||
109 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 113 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
110 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 114 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
115 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | ||
111 | 116 | ||
112 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 117 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
113 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 118 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -165,12 +170,16 @@ struct vm_operations_struct { | |||
165 | void (*open)(struct vm_area_struct * area); | 170 | void (*open)(struct vm_area_struct * area); |
166 | void (*close)(struct vm_area_struct * area); | 171 | void (*close)(struct vm_area_struct * area); |
167 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 172 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
168 | unsigned long (*nopfn)(struct vm_area_struct *area, | ||
169 | unsigned long address); | ||
170 | 173 | ||
171 | /* notification that a previously read-only page is about to become | 174 | /* notification that a previously read-only page is about to become |
172 | * writable, if an error is returned it will cause a SIGBUS */ | 175 | * writable, if an error is returned it will cause a SIGBUS */ |
173 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); | 176 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); |
177 | |||
178 | /* called by access_process_vm when get_user_pages() fails, typically | ||
179 | * for use by special VMAs that can switch between memory and hardware | ||
180 | */ | ||
181 | int (*access)(struct vm_area_struct *vma, unsigned long addr, | ||
182 | void *buf, int len, int write); | ||
174 | #ifdef CONFIG_NUMA | 183 | #ifdef CONFIG_NUMA |
175 | /* | 184 | /* |
176 | * set_policy() op must add a reference to any non-NULL @new mempolicy | 185 | * set_policy() op must add a reference to any non-NULL @new mempolicy |
@@ -674,13 +683,6 @@ static inline int page_mapped(struct page *page) | |||
674 | } | 683 | } |
675 | 684 | ||
676 | /* | 685 | /* |
677 | * Error return values for the *_nopfn functions | ||
678 | */ | ||
679 | #define NOPFN_SIGBUS ((unsigned long) -1) | ||
680 | #define NOPFN_OOM ((unsigned long) -2) | ||
681 | #define NOPFN_REFAULT ((unsigned long) -3) | ||
682 | |||
683 | /* | ||
684 | * Different kinds of faults, as returned by handle_mm_fault(). | 686 | * Different kinds of faults, as returned by handle_mm_fault(). |
685 | * Used to decide whether a process gets delivered SIGBUS or | 687 | * Used to decide whether a process gets delivered SIGBUS or |
686 | * just gets major/minor fault counters bumped up. | 688 | * just gets major/minor fault counters bumped up. |
@@ -771,14 +773,14 @@ struct mm_walk { | |||
771 | 773 | ||
772 | int walk_page_range(unsigned long addr, unsigned long end, | 774 | int walk_page_range(unsigned long addr, unsigned long end, |
773 | struct mm_walk *walk); | 775 | struct mm_walk *walk); |
774 | void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | 776 | void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
775 | unsigned long end, unsigned long floor, unsigned long ceiling); | 777 | unsigned long end, unsigned long floor, unsigned long ceiling); |
776 | void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, | ||
777 | unsigned long floor, unsigned long ceiling); | ||
778 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | 778 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
779 | struct vm_area_struct *vma); | 779 | struct vm_area_struct *vma); |
780 | void unmap_mapping_range(struct address_space *mapping, | 780 | void unmap_mapping_range(struct address_space *mapping, |
781 | loff_t const holebegin, loff_t const holelen, int even_cows); | 781 | loff_t const holebegin, loff_t const holelen, int even_cows); |
782 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | ||
783 | void *buf, int len, int write); | ||
782 | 784 | ||
783 | static inline void unmap_shared_mapping_range(struct address_space *mapping, | 785 | static inline void unmap_shared_mapping_range(struct address_space *mapping, |
784 | loff_t const holebegin, loff_t const holelen) | 786 | loff_t const holebegin, loff_t const holelen) |
@@ -808,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * | |||
808 | 810 | ||
809 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 811 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
810 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 812 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
811 | void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); | ||
812 | 813 | ||
813 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 814 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
814 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 815 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
@@ -831,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
831 | struct vm_area_struct **pprev, unsigned long start, | 832 | struct vm_area_struct **pprev, unsigned long start, |
832 | unsigned long end, unsigned long newflags); | 833 | unsigned long end, unsigned long newflags); |
833 | 834 | ||
835 | #ifdef CONFIG_HAVE_GET_USER_PAGES_FAST | ||
836 | /* | ||
837 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | ||
838 | * operating on current and current->mm (force=0 and doesn't return any vmas). | ||
839 | * | ||
840 | * get_user_pages_fast may take mmap_sem and page tables, so no assumptions | ||
841 | * can be made about locking. get_user_pages_fast is to be implemented in a | ||
842 | * way that is advantageous (vs get_user_pages()) when the user memory area is | ||
843 | * already faulted in and present in ptes. However if the pages have to be | ||
844 | * faulted in, it may turn out to be slightly slower). | ||
845 | */ | ||
846 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
847 | struct page **pages); | ||
848 | |||
849 | #else | ||
850 | /* | ||
851 | * Should probably be moved to asm-generic, and architectures can include it if | ||
852 | * they don't implement their own get_user_pages_fast. | ||
853 | */ | ||
854 | #define get_user_pages_fast(start, nr_pages, write, pages) \ | ||
855 | ({ \ | ||
856 | struct mm_struct *mm = current->mm; \ | ||
857 | int ret; \ | ||
858 | \ | ||
859 | down_read(&mm->mmap_sem); \ | ||
860 | ret = get_user_pages(current, mm, start, nr_pages, \ | ||
861 | write, 0, pages, NULL); \ | ||
862 | up_read(&mm->mmap_sem); \ | ||
863 | \ | ||
864 | ret; \ | ||
865 | }) | ||
866 | #endif | ||
867 | |||
834 | /* | 868 | /* |
835 | * A callback you can register to apply pressure to ageable caches. | 869 | * A callback you can register to apply pressure to ageable caches. |
836 | * | 870 | * |
@@ -964,9 +998,8 @@ static inline void pgtable_page_dtor(struct page *page) | |||
964 | NULL: pte_offset_kernel(pmd, address)) | 998 | NULL: pte_offset_kernel(pmd, address)) |
965 | 999 | ||
966 | extern void free_area_init(unsigned long * zones_size); | 1000 | extern void free_area_init(unsigned long * zones_size); |
967 | extern void free_area_init_node(int nid, pg_data_t *pgdat, | 1001 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
968 | unsigned long * zones_size, unsigned long zone_start_pfn, | 1002 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
969 | unsigned long *zholes_size); | ||
970 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 1003 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
971 | /* | 1004 | /* |
972 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | 1005 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its |
@@ -998,8 +1031,8 @@ extern void free_area_init_node(int nid, pg_data_t *pgdat, | |||
998 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | 1031 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); |
999 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | 1032 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, |
1000 | unsigned long end_pfn); | 1033 | unsigned long end_pfn); |
1001 | extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | 1034 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
1002 | unsigned long new_end_pfn); | 1035 | unsigned long end_pfn); |
1003 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | 1036 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, |
1004 | unsigned long end_pfn); | 1037 | unsigned long end_pfn); |
1005 | extern void remove_all_active_ranges(void); | 1038 | extern void remove_all_active_ranges(void); |
@@ -1011,6 +1044,8 @@ extern unsigned long find_min_pfn_with_active_regions(void); | |||
1011 | extern unsigned long find_max_pfn_with_active_regions(void); | 1044 | extern unsigned long find_max_pfn_with_active_regions(void); |
1012 | extern void free_bootmem_with_active_regions(int nid, | 1045 | extern void free_bootmem_with_active_regions(int nid, |
1013 | unsigned long max_low_pfn); | 1046 | unsigned long max_low_pfn); |
1047 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | ||
1048 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | ||
1014 | extern void sparse_memory_present_with_active_regions(int nid); | 1049 | extern void sparse_memory_present_with_active_regions(int nid); |
1015 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | 1050 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID |
1016 | extern int early_pfn_to_nid(unsigned long pfn); | 1051 | extern int early_pfn_to_nid(unsigned long pfn); |
@@ -1024,6 +1059,7 @@ extern void mem_init(void); | |||
1024 | extern void show_mem(void); | 1059 | extern void show_mem(void); |
1025 | extern void si_meminfo(struct sysinfo * val); | 1060 | extern void si_meminfo(struct sysinfo * val); |
1026 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1061 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
1062 | extern int after_bootmem; | ||
1027 | 1063 | ||
1028 | #ifdef CONFIG_NUMA | 1064 | #ifdef CONFIG_NUMA |
1029 | extern void setup_per_cpu_pageset(void); | 1065 | extern void setup_per_cpu_pageset(void); |
@@ -1068,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | |||
1068 | unsigned long addr, unsigned long len, pgoff_t pgoff); | 1104 | unsigned long addr, unsigned long len, pgoff_t pgoff); |
1069 | extern void exit_mmap(struct mm_struct *); | 1105 | extern void exit_mmap(struct mm_struct *); |
1070 | 1106 | ||
1107 | extern int mm_take_all_locks(struct mm_struct *mm); | ||
1108 | extern void mm_drop_all_locks(struct mm_struct *mm); | ||
1109 | |||
1071 | #ifdef CONFIG_PROC_FS | 1110 | #ifdef CONFIG_PROC_FS |
1072 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ | 1111 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ |
1073 | extern void added_exe_file_vma(struct mm_struct *mm); | 1112 | extern void added_exe_file_vma(struct mm_struct *mm); |