aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h36
1 files changed, 10 insertions, 26 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 07b67361a40a..7f22a11fcc66 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -134,7 +134,7 @@ struct compact_control {
134 unsigned long nr_migratepages; /* Number of pages to migrate */ 134 unsigned long nr_migratepages; /* Number of pages to migrate */
135 unsigned long free_pfn; /* isolate_freepages search base */ 135 unsigned long free_pfn; /* isolate_freepages search base */
136 unsigned long migrate_pfn; /* isolate_migratepages search base */ 136 unsigned long migrate_pfn; /* isolate_migratepages search base */
137 bool sync; /* Synchronous migration */ 137 enum migrate_mode mode; /* Async or sync migration mode */
138 bool ignore_skip_hint; /* Scan blocks even if marked skip */ 138 bool ignore_skip_hint; /* Scan blocks even if marked skip */
139 bool finished_update_free; /* True when the zone cached pfns are 139 bool finished_update_free; /* True when the zone cached pfns are
140 * no longer being updated 140 * no longer being updated
@@ -144,7 +144,10 @@ struct compact_control {
144 int order; /* order a direct compactor needs */ 144 int order; /* order a direct compactor needs */
145 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 145 int migratetype; /* MOVABLE, RECLAIMABLE etc */
146 struct zone *zone; 146 struct zone *zone;
147 bool contended; /* True if a lock was contended */ 147 bool contended; /* True if a lock was contended, or
148 * need_resched() true during async
149 * compaction
150 */
148}; 151};
149 152
150unsigned long 153unsigned long
@@ -169,6 +172,11 @@ static inline unsigned long page_order(struct page *page)
169 return page_private(page); 172 return page_private(page);
170} 173}
171 174
175static inline bool is_cow_mapping(vm_flags_t flags)
176{
177 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
178}
179
172/* mm/util.c */ 180/* mm/util.c */
173void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 181void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
174 struct vm_area_struct *prev, struct rb_node *rb_parent); 182 struct vm_area_struct *prev, struct rb_node *rb_parent);
@@ -184,26 +192,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
184} 192}
185 193
186/* 194/*
187 * Called only in fault path, to determine if a new page is being
188 * mapped into a LOCKED vma. If it is, mark page as mlocked.
189 */
190static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
191 struct page *page)
192{
193 VM_BUG_ON_PAGE(PageLRU(page), page);
194
195 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
196 return 0;
197
198 if (!TestSetPageMlocked(page)) {
199 mod_zone_page_state(page_zone(page), NR_MLOCK,
200 hpage_nr_pages(page));
201 count_vm_event(UNEVICTABLE_PGMLOCKED);
202 }
203 return 1;
204}
205
206/*
207 * must be called with vma's mmap_sem held for read or write, and page locked. 195 * must be called with vma's mmap_sem held for read or write, and page locked.
208 */ 196 */
209extern void mlock_vma_page(struct page *page); 197extern void mlock_vma_page(struct page *page);
@@ -245,10 +233,6 @@ extern unsigned long vma_address(struct page *page,
245 struct vm_area_struct *vma); 233 struct vm_area_struct *vma);
246#endif 234#endif
247#else /* !CONFIG_MMU */ 235#else /* !CONFIG_MMU */
248static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
249{
250 return 0;
251}
252static inline void clear_page_mlock(struct page *page) { } 236static inline void clear_page_mlock(struct page *page) { }
253static inline void mlock_vma_page(struct page *page) { } 237static inline void mlock_vma_page(struct page *page) { }
254static inline void mlock_migrate_page(struct page *new, struct page *old) { } 238static inline void mlock_migrate_page(struct page *new, struct page *old) { }