diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 66 |
1 files changed, 43 insertions, 23 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 3414a8813e97..c61ba10768ea 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -42,6 +42,9 @@ extern unsigned long mmap_min_addr; | |||
42 | 42 | ||
43 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) | 43 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) |
44 | 44 | ||
45 | /* to align the pointer to the (next) page boundary */ | ||
46 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) | ||
47 | |||
45 | /* | 48 | /* |
46 | * Linux kernel virtual memory manager primitives. | 49 | * Linux kernel virtual memory manager primitives. |
47 | * The idea being to have a "virtual" mm in the same way | 50 | * The idea being to have a "virtual" mm in the same way |
@@ -71,7 +74,7 @@ extern unsigned int kobjsize(const void *objp); | |||
71 | #endif | 74 | #endif |
72 | 75 | ||
73 | /* | 76 | /* |
74 | * vm_flags.. | 77 | * vm_flags in vm_area_struct, see mm_types.h. |
75 | */ | 78 | */ |
76 | #define VM_READ 0x00000001 /* currently active flags */ | 79 | #define VM_READ 0x00000001 /* currently active flags */ |
77 | #define VM_WRITE 0x00000002 | 80 | #define VM_WRITE 0x00000002 |
@@ -101,6 +104,7 @@ extern unsigned int kobjsize(const void *objp); | |||
101 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ | 104 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
102 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ | 105 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ |
103 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ | 106 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
107 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ | ||
104 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 108 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
105 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 109 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
106 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ | 110 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ |
@@ -109,6 +113,7 @@ extern unsigned int kobjsize(const void *objp); | |||
109 | 113 | ||
110 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 114 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
111 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 115 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
116 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | ||
112 | 117 | ||
113 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 118 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
114 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 119 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -166,12 +171,16 @@ struct vm_operations_struct { | |||
166 | void (*open)(struct vm_area_struct * area); | 171 | void (*open)(struct vm_area_struct * area); |
167 | void (*close)(struct vm_area_struct * area); | 172 | void (*close)(struct vm_area_struct * area); |
168 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 173 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
169 | unsigned long (*nopfn)(struct vm_area_struct *area, | ||
170 | unsigned long address); | ||
171 | 174 | ||
172 | /* notification that a previously read-only page is about to become | 175 | /* notification that a previously read-only page is about to become |
173 | * writable, if an error is returned it will cause a SIGBUS */ | 176 | * writable, if an error is returned it will cause a SIGBUS */ |
174 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); | 177 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); |
178 | |||
179 | /* called by access_process_vm when get_user_pages() fails, typically | ||
180 | * for use by special VMAs that can switch between memory and hardware | ||
181 | */ | ||
182 | int (*access)(struct vm_area_struct *vma, unsigned long addr, | ||
183 | void *buf, int len, int write); | ||
175 | #ifdef CONFIG_NUMA | 184 | #ifdef CONFIG_NUMA |
176 | /* | 185 | /* |
177 | * set_policy() op must add a reference to any non-NULL @new mempolicy | 186 | * set_policy() op must add a reference to any non-NULL @new mempolicy |
@@ -669,13 +678,6 @@ static inline int page_mapped(struct page *page) | |||
669 | } | 678 | } |
670 | 679 | ||
671 | /* | 680 | /* |
672 | * Error return values for the *_nopfn functions | ||
673 | */ | ||
674 | #define NOPFN_SIGBUS ((unsigned long) -1) | ||
675 | #define NOPFN_OOM ((unsigned long) -2) | ||
676 | #define NOPFN_REFAULT ((unsigned long) -3) | ||
677 | |||
678 | /* | ||
679 | * Different kinds of faults, as returned by handle_mm_fault(). | 681 | * Different kinds of faults, as returned by handle_mm_fault(). |
680 | * Used to decide whether a process gets delivered SIGBUS or | 682 | * Used to decide whether a process gets delivered SIGBUS or |
681 | * just gets major/minor fault counters bumped up. | 683 | * just gets major/minor fault counters bumped up. |
@@ -737,6 +739,8 @@ struct zap_details { | |||
737 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | 739 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
738 | pte_t pte); | 740 | pte_t pte); |
739 | 741 | ||
742 | int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, | ||
743 | unsigned long size); | ||
740 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | 744 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
741 | unsigned long size, struct zap_details *); | 745 | unsigned long size, struct zap_details *); |
742 | unsigned long unmap_vmas(struct mmu_gather **tlb, | 746 | unsigned long unmap_vmas(struct mmu_gather **tlb, |
@@ -766,14 +770,14 @@ struct mm_walk { | |||
766 | 770 | ||
767 | int walk_page_range(unsigned long addr, unsigned long end, | 771 | int walk_page_range(unsigned long addr, unsigned long end, |
768 | struct mm_walk *walk); | 772 | struct mm_walk *walk); |
769 | void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | 773 | void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
770 | unsigned long end, unsigned long floor, unsigned long ceiling); | 774 | unsigned long end, unsigned long floor, unsigned long ceiling); |
771 | void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, | ||
772 | unsigned long floor, unsigned long ceiling); | ||
773 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | 775 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
774 | struct vm_area_struct *vma); | 776 | struct vm_area_struct *vma); |
775 | void unmap_mapping_range(struct address_space *mapping, | 777 | void unmap_mapping_range(struct address_space *mapping, |
776 | loff_t const holebegin, loff_t const holelen, int even_cows); | 778 | loff_t const holebegin, loff_t const holelen, int even_cows); |
779 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | ||
780 | void *buf, int len, int write); | ||
777 | 781 | ||
778 | static inline void unmap_shared_mapping_range(struct address_space *mapping, | 782 | static inline void unmap_shared_mapping_range(struct address_space *mapping, |
779 | loff_t const holebegin, loff_t const holelen) | 783 | loff_t const holebegin, loff_t const holelen) |
@@ -803,7 +807,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * | |||
803 | 807 | ||
804 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 808 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
805 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 809 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
806 | void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); | ||
807 | 810 | ||
808 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 811 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
809 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 812 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
@@ -827,6 +830,19 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
827 | unsigned long end, unsigned long newflags); | 830 | unsigned long end, unsigned long newflags); |
828 | 831 | ||
829 | /* | 832 | /* |
833 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | ||
834 | * operating on current and current->mm (force=0 and doesn't return any vmas). | ||
835 | * | ||
836 | * get_user_pages_fast may take mmap_sem and page tables, so no assumptions | ||
837 | * can be made about locking. get_user_pages_fast is to be implemented in a | ||
838 | * way that is advantageous (vs get_user_pages()) when the user memory area is | ||
839 | * already faulted in and present in ptes. However if the pages have to be | ||
840 | * faulted in, it may turn out to be slightly slower). | ||
841 | */ | ||
842 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
843 | struct page **pages); | ||
844 | |||
845 | /* | ||
830 | * A callback you can register to apply pressure to ageable caches. | 846 | * A callback you can register to apply pressure to ageable caches. |
831 | * | 847 | * |
832 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should | 848 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should |
@@ -898,7 +914,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a | |||
898 | } | 914 | } |
899 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ | 915 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ |
900 | 916 | ||
901 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | 917 | #if USE_SPLIT_PTLOCKS |
902 | /* | 918 | /* |
903 | * We tuck a spinlock to guard each pagetable page into its struct page, | 919 | * We tuck a spinlock to guard each pagetable page into its struct page, |
904 | * at page->private, with BUILD_BUG_ON to make sure that this will not | 920 | * at page->private, with BUILD_BUG_ON to make sure that this will not |
@@ -911,14 +927,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a | |||
911 | } while (0) | 927 | } while (0) |
912 | #define pte_lock_deinit(page) ((page)->mapping = NULL) | 928 | #define pte_lock_deinit(page) ((page)->mapping = NULL) |
913 | #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) | 929 | #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) |
914 | #else | 930 | #else /* !USE_SPLIT_PTLOCKS */ |
915 | /* | 931 | /* |
916 | * We use mm->page_table_lock to guard all pagetable pages of the mm. | 932 | * We use mm->page_table_lock to guard all pagetable pages of the mm. |
917 | */ | 933 | */ |
918 | #define pte_lock_init(page) do {} while (0) | 934 | #define pte_lock_init(page) do {} while (0) |
919 | #define pte_lock_deinit(page) do {} while (0) | 935 | #define pte_lock_deinit(page) do {} while (0) |
920 | #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) | 936 | #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) |
921 | #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | 937 | #endif /* USE_SPLIT_PTLOCKS */ |
922 | 938 | ||
923 | static inline void pgtable_page_ctor(struct page *page) | 939 | static inline void pgtable_page_ctor(struct page *page) |
924 | { | 940 | { |
@@ -959,9 +975,8 @@ static inline void pgtable_page_dtor(struct page *page) | |||
959 | NULL: pte_offset_kernel(pmd, address)) | 975 | NULL: pte_offset_kernel(pmd, address)) |
960 | 976 | ||
961 | extern void free_area_init(unsigned long * zones_size); | 977 | extern void free_area_init(unsigned long * zones_size); |
962 | extern void free_area_init_node(int nid, pg_data_t *pgdat, | 978 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
963 | unsigned long * zones_size, unsigned long zone_start_pfn, | 979 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
964 | unsigned long *zholes_size); | ||
965 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 980 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
966 | /* | 981 | /* |
967 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | 982 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its |
@@ -993,8 +1008,8 @@ extern void free_area_init_node(int nid, pg_data_t *pgdat, | |||
993 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | 1008 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); |
994 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | 1009 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, |
995 | unsigned long end_pfn); | 1010 | unsigned long end_pfn); |
996 | extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | 1011 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
997 | unsigned long new_end_pfn); | 1012 | unsigned long end_pfn); |
998 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | 1013 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, |
999 | unsigned long end_pfn); | 1014 | unsigned long end_pfn); |
1000 | extern void remove_all_active_ranges(void); | 1015 | extern void remove_all_active_ranges(void); |
@@ -1003,9 +1018,10 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, | |||
1003 | extern void get_pfn_range_for_nid(unsigned int nid, | 1018 | extern void get_pfn_range_for_nid(unsigned int nid, |
1004 | unsigned long *start_pfn, unsigned long *end_pfn); | 1019 | unsigned long *start_pfn, unsigned long *end_pfn); |
1005 | extern unsigned long find_min_pfn_with_active_regions(void); | 1020 | extern unsigned long find_min_pfn_with_active_regions(void); |
1006 | extern unsigned long find_max_pfn_with_active_regions(void); | ||
1007 | extern void free_bootmem_with_active_regions(int nid, | 1021 | extern void free_bootmem_with_active_regions(int nid, |
1008 | unsigned long max_low_pfn); | 1022 | unsigned long max_low_pfn); |
1023 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | ||
1024 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | ||
1009 | extern void sparse_memory_present_with_active_regions(int nid); | 1025 | extern void sparse_memory_present_with_active_regions(int nid); |
1010 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | 1026 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID |
1011 | extern int early_pfn_to_nid(unsigned long pfn); | 1027 | extern int early_pfn_to_nid(unsigned long pfn); |
@@ -1019,6 +1035,7 @@ extern void mem_init(void); | |||
1019 | extern void show_mem(void); | 1035 | extern void show_mem(void); |
1020 | extern void si_meminfo(struct sysinfo * val); | 1036 | extern void si_meminfo(struct sysinfo * val); |
1021 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1037 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
1038 | extern int after_bootmem; | ||
1022 | 1039 | ||
1023 | #ifdef CONFIG_NUMA | 1040 | #ifdef CONFIG_NUMA |
1024 | extern void setup_per_cpu_pageset(void); | 1041 | extern void setup_per_cpu_pageset(void); |
@@ -1063,6 +1080,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | |||
1063 | unsigned long addr, unsigned long len, pgoff_t pgoff); | 1080 | unsigned long addr, unsigned long len, pgoff_t pgoff); |
1064 | extern void exit_mmap(struct mm_struct *); | 1081 | extern void exit_mmap(struct mm_struct *); |
1065 | 1082 | ||
1083 | extern int mm_take_all_locks(struct mm_struct *mm); | ||
1084 | extern void mm_drop_all_locks(struct mm_struct *mm); | ||
1085 | |||
1066 | #ifdef CONFIG_PROC_FS | 1086 | #ifdef CONFIG_PROC_FS |
1067 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ | 1087 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ |
1068 | extern void added_exe_file_vma(struct mm_struct *mm); | 1088 | extern void added_exe_file_vma(struct mm_struct *mm); |