diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 140 |
1 files changed, 83 insertions, 57 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 311be906b57d..fa0680402738 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/mmzone.h> | 11 | #include <linux/mmzone.h> |
12 | #include <linux/rbtree.h> | 12 | #include <linux/rbtree.h> |
13 | #include <linux/prio_tree.h> | ||
14 | #include <linux/atomic.h> | 13 | #include <linux/atomic.h> |
15 | #include <linux/debug_locks.h> | 14 | #include <linux/debug_locks.h> |
16 | #include <linux/mm_types.h> | 15 | #include <linux/mm_types.h> |
@@ -21,6 +20,7 @@ | |||
21 | 20 | ||
22 | struct mempolicy; | 21 | struct mempolicy; |
23 | struct anon_vma; | 22 | struct anon_vma; |
23 | struct anon_vma_chain; | ||
24 | struct file_ra_state; | 24 | struct file_ra_state; |
25 | struct user_struct; | 25 | struct user_struct; |
26 | struct writeback_control; | 26 | struct writeback_control; |
@@ -70,6 +70,8 @@ extern unsigned int kobjsize(const void *objp); | |||
70 | /* | 70 | /* |
71 | * vm_flags in vm_area_struct, see mm_types.h. | 71 | * vm_flags in vm_area_struct, see mm_types.h. |
72 | */ | 72 | */ |
73 | #define VM_NONE 0x00000000 | ||
74 | |||
73 | #define VM_READ 0x00000001 /* currently active flags */ | 75 | #define VM_READ 0x00000001 /* currently active flags */ |
74 | #define VM_WRITE 0x00000002 | 76 | #define VM_WRITE 0x00000002 |
75 | #define VM_EXEC 0x00000004 | 77 | #define VM_EXEC 0x00000004 |
@@ -82,16 +84,9 @@ extern unsigned int kobjsize(const void *objp); | |||
82 | #define VM_MAYSHARE 0x00000080 | 84 | #define VM_MAYSHARE 0x00000080 |
83 | 85 | ||
84 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 86 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
85 | #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) | ||
86 | #define VM_GROWSUP 0x00000200 | ||
87 | #else | ||
88 | #define VM_GROWSUP 0x00000000 | ||
89 | #define VM_NOHUGEPAGE 0x00000200 /* MADV_NOHUGEPAGE marked this vma */ | ||
90 | #endif | ||
91 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 87 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
92 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 88 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
93 | 89 | ||
94 | #define VM_EXECUTABLE 0x00001000 | ||
95 | #define VM_LOCKED 0x00002000 | 90 | #define VM_LOCKED 0x00002000 |
96 | #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ | 91 | #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ |
97 | 92 | ||
@@ -101,25 +96,34 @@ extern unsigned int kobjsize(const void *objp); | |||
101 | 96 | ||
102 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ | 97 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ |
103 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ | 98 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
104 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ | ||
105 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ | 99 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
106 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ | 100 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ |
107 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 101 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
108 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 102 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
109 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | 103 | #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ |
110 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ | 104 | #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ |
111 | #else | ||
112 | #define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ | ||
113 | #endif | ||
114 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ | ||
115 | #define VM_NODUMP 0x04000000 /* Do not include in the core dump */ | ||
116 | 105 | ||
117 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | ||
118 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 106 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
119 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | 107 | #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ |
120 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | 108 | #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ |
121 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ | 109 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ |
122 | 110 | ||
111 | #if defined(CONFIG_X86) | ||
112 | # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ | ||
113 | #elif defined(CONFIG_PPC) | ||
114 | # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ | ||
115 | #elif defined(CONFIG_PARISC) | ||
116 | # define VM_GROWSUP VM_ARCH_1 | ||
117 | #elif defined(CONFIG_IA64) | ||
118 | # define VM_GROWSUP VM_ARCH_1 | ||
119 | #elif !defined(CONFIG_MMU) | ||
120 | # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ | ||
121 | #endif | ||
122 | |||
123 | #ifndef VM_GROWSUP | ||
124 | # define VM_GROWSUP VM_NONE | ||
125 | #endif | ||
126 | |||
123 | /* Bits set in the VMA until the stack is in its final location */ | 127 | /* Bits set in the VMA until the stack is in its final location */ |
124 | #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) | 128 | #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) |
125 | 129 | ||
@@ -143,7 +147,7 @@ extern unsigned int kobjsize(const void *objp); | |||
143 | * Special vmas that are non-mergable, non-mlock()able. | 147 | * Special vmas that are non-mergable, non-mlock()able. |
144 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. | 148 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. |
145 | */ | 149 | */ |
146 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) | 150 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) |
147 | 151 | ||
148 | /* | 152 | /* |
149 | * mapping from the currently active vm_flags protection bits (the | 153 | * mapping from the currently active vm_flags protection bits (the |
@@ -157,24 +161,7 @@ extern pgprot_t protection_map[16]; | |||
157 | #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ | 161 | #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ |
158 | #define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ | 162 | #define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ |
159 | #define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ | 163 | #define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ |
160 | 164 | #define FAULT_FLAG_TRIED 0x40 /* second try */ | |
161 | /* | ||
162 | * This interface is used by x86 PAT code to identify a pfn mapping that is | ||
163 | * linear over entire vma. This is to optimize PAT code that deals with | ||
164 | * marking the physical region with a particular prot. This is not for generic | ||
165 | * mm use. Note also that this check will not work if the pfn mapping is | ||
166 | * linear for a vma starting at physical address 0. In which case PAT code | ||
167 | * falls back to slow path of reserving physical range page by page. | ||
168 | */ | ||
169 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | ||
170 | { | ||
171 | return !!(vma->vm_flags & VM_PFN_AT_MMAP); | ||
172 | } | ||
173 | |||
174 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | ||
175 | { | ||
176 | return !!(vma->vm_flags & VM_PFNMAP); | ||
177 | } | ||
178 | 165 | ||
179 | /* | 166 | /* |
180 | * vm_fault is filled by the the pagefault handler and passed to the vma's | 167 | * vm_fault is filled by the the pagefault handler and passed to the vma's |
@@ -182,8 +169,7 @@ static inline int is_pfn_mapping(struct vm_area_struct *vma) | |||
182 | * of VM_FAULT_xxx flags that give details about how the fault was handled. | 169 | * of VM_FAULT_xxx flags that give details about how the fault was handled. |
183 | * | 170 | * |
184 | * pgoff should be used in favour of virtual_address, if possible. If pgoff | 171 | * pgoff should be used in favour of virtual_address, if possible. If pgoff |
185 | * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear | 172 | * is used, one may implement ->remap_pages to get nonlinear mapping support. |
186 | * mapping support. | ||
187 | */ | 173 | */ |
188 | struct vm_fault { | 174 | struct vm_fault { |
189 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | 175 | unsigned int flags; /* FAULT_FLAG_xxx flags */ |
@@ -241,6 +227,9 @@ struct vm_operations_struct { | |||
241 | int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, | 227 | int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, |
242 | const nodemask_t *to, unsigned long flags); | 228 | const nodemask_t *to, unsigned long flags); |
243 | #endif | 229 | #endif |
230 | /* called by sys_remap_file_pages() to populate non-linear mapping */ | ||
231 | int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, | ||
232 | unsigned long size, pgoff_t pgoff); | ||
244 | }; | 233 | }; |
245 | 234 | ||
246 | struct mmu_gather; | 235 | struct mmu_gather; |
@@ -249,6 +238,18 @@ struct inode; | |||
249 | #define page_private(page) ((page)->private) | 238 | #define page_private(page) ((page)->private) |
250 | #define set_page_private(page, v) ((page)->private = (v)) | 239 | #define set_page_private(page, v) ((page)->private = (v)) |
251 | 240 | ||
241 | /* It's valid only if the page is free path or free_list */ | ||
242 | static inline void set_freepage_migratetype(struct page *page, int migratetype) | ||
243 | { | ||
244 | page->index = migratetype; | ||
245 | } | ||
246 | |||
247 | /* It's valid only if the page is free path or free_list */ | ||
248 | static inline int get_freepage_migratetype(struct page *page) | ||
249 | { | ||
250 | return page->index; | ||
251 | } | ||
252 | |||
252 | /* | 253 | /* |
253 | * FIXME: take this include out, include page-flags.h in | 254 | * FIXME: take this include out, include page-flags.h in |
254 | * files which need it (119 of them) | 255 | * files which need it (119 of them) |
@@ -454,6 +455,7 @@ void put_pages_list(struct list_head *pages); | |||
454 | 455 | ||
455 | void split_page(struct page *page, unsigned int order); | 456 | void split_page(struct page *page, unsigned int order); |
456 | int split_free_page(struct page *page); | 457 | int split_free_page(struct page *page); |
458 | int capture_free_page(struct page *page, int alloc_order, int migratetype); | ||
457 | 459 | ||
458 | /* | 460 | /* |
459 | * Compound pages have a destructor function. Provide a | 461 | * Compound pages have a destructor function. Provide a |
@@ -1071,7 +1073,8 @@ vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); | |||
1071 | 1073 | ||
1072 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 1074 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
1073 | unsigned long old_addr, struct vm_area_struct *new_vma, | 1075 | unsigned long old_addr, struct vm_area_struct *new_vma, |
1074 | unsigned long new_addr, unsigned long len); | 1076 | unsigned long new_addr, unsigned long len, |
1077 | bool need_rmap_locks); | ||
1075 | extern unsigned long do_mremap(unsigned long addr, | 1078 | extern unsigned long do_mremap(unsigned long addr, |
1076 | unsigned long old_len, unsigned long new_len, | 1079 | unsigned long old_len, unsigned long new_len, |
1077 | unsigned long flags, unsigned long new_addr); | 1080 | unsigned long flags, unsigned long new_addr); |
@@ -1366,24 +1369,45 @@ extern void zone_pcp_reset(struct zone *zone); | |||
1366 | extern atomic_long_t mmap_pages_allocated; | 1369 | extern atomic_long_t mmap_pages_allocated; |
1367 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); | 1370 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); |
1368 | 1371 | ||
1369 | /* prio_tree.c */ | 1372 | /* interval_tree.c */ |
1370 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1373 | void vma_interval_tree_insert(struct vm_area_struct *node, |
1371 | void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); | 1374 | struct rb_root *root); |
1372 | void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); | 1375 | void vma_interval_tree_insert_after(struct vm_area_struct *node, |
1373 | struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, | 1376 | struct vm_area_struct *prev, |
1374 | struct prio_tree_iter *iter); | 1377 | struct rb_root *root); |
1375 | 1378 | void vma_interval_tree_remove(struct vm_area_struct *node, | |
1376 | #define vma_prio_tree_foreach(vma, iter, root, begin, end) \ | 1379 | struct rb_root *root); |
1377 | for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \ | 1380 | struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, |
1378 | (vma = vma_prio_tree_next(vma, iter)); ) | 1381 | unsigned long start, unsigned long last); |
1382 | struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, | ||
1383 | unsigned long start, unsigned long last); | ||
1384 | |||
1385 | #define vma_interval_tree_foreach(vma, root, start, last) \ | ||
1386 | for (vma = vma_interval_tree_iter_first(root, start, last); \ | ||
1387 | vma; vma = vma_interval_tree_iter_next(vma, start, last)) | ||
1379 | 1388 | ||
1380 | static inline void vma_nonlinear_insert(struct vm_area_struct *vma, | 1389 | static inline void vma_nonlinear_insert(struct vm_area_struct *vma, |
1381 | struct list_head *list) | 1390 | struct list_head *list) |
1382 | { | 1391 | { |
1383 | vma->shared.vm_set.parent = NULL; | 1392 | list_add_tail(&vma->shared.nonlinear, list); |
1384 | list_add_tail(&vma->shared.vm_set.list, list); | ||
1385 | } | 1393 | } |
1386 | 1394 | ||
1395 | void anon_vma_interval_tree_insert(struct anon_vma_chain *node, | ||
1396 | struct rb_root *root); | ||
1397 | void anon_vma_interval_tree_remove(struct anon_vma_chain *node, | ||
1398 | struct rb_root *root); | ||
1399 | struct anon_vma_chain *anon_vma_interval_tree_iter_first( | ||
1400 | struct rb_root *root, unsigned long start, unsigned long last); | ||
1401 | struct anon_vma_chain *anon_vma_interval_tree_iter_next( | ||
1402 | struct anon_vma_chain *node, unsigned long start, unsigned long last); | ||
1403 | #ifdef CONFIG_DEBUG_VM_RB | ||
1404 | void anon_vma_interval_tree_verify(struct anon_vma_chain *node); | ||
1405 | #endif | ||
1406 | |||
1407 | #define anon_vma_interval_tree_foreach(avc, root, start, last) \ | ||
1408 | for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ | ||
1409 | avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) | ||
1410 | |||
1387 | /* mmap.c */ | 1411 | /* mmap.c */ |
1388 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); | 1412 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); |
1389 | extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, | 1413 | extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, |
@@ -1400,15 +1424,13 @@ extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, | |||
1400 | struct rb_node **, struct rb_node *); | 1424 | struct rb_node **, struct rb_node *); |
1401 | extern void unlink_file_vma(struct vm_area_struct *); | 1425 | extern void unlink_file_vma(struct vm_area_struct *); |
1402 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | 1426 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, |
1403 | unsigned long addr, unsigned long len, pgoff_t pgoff); | 1427 | unsigned long addr, unsigned long len, pgoff_t pgoff, |
1428 | bool *need_rmap_locks); | ||
1404 | extern void exit_mmap(struct mm_struct *); | 1429 | extern void exit_mmap(struct mm_struct *); |
1405 | 1430 | ||
1406 | extern int mm_take_all_locks(struct mm_struct *mm); | 1431 | extern int mm_take_all_locks(struct mm_struct *mm); |
1407 | extern void mm_drop_all_locks(struct mm_struct *mm); | 1432 | extern void mm_drop_all_locks(struct mm_struct *mm); |
1408 | 1433 | ||
1409 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ | ||
1410 | extern void added_exe_file_vma(struct mm_struct *mm); | ||
1411 | extern void removed_exe_file_vma(struct mm_struct *mm); | ||
1412 | extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); | 1434 | extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); |
1413 | extern struct file *get_mm_exe_file(struct mm_struct *mm); | 1435 | extern struct file *get_mm_exe_file(struct mm_struct *mm); |
1414 | 1436 | ||
@@ -1662,5 +1684,9 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } | |||
1662 | static inline bool page_is_guard(struct page *page) { return false; } | 1684 | static inline bool page_is_guard(struct page *page) { return false; } |
1663 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1685 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1664 | 1686 | ||
1687 | extern void reset_zone_present_pages(void); | ||
1688 | extern void fixup_zone_present_pages(int nid, unsigned long start_pfn, | ||
1689 | unsigned long end_pfn); | ||
1690 | |||
1665 | #endif /* __KERNEL__ */ | 1691 | #endif /* __KERNEL__ */ |
1666 | #endif /* _LINUX_MM_H */ | 1692 | #endif /* _LINUX_MM_H */ |