diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 52 |
1 files changed, 32 insertions, 20 deletions
diff --git a/mm/internal.h b/mm/internal.h index b8c91b342e24..a4fa284f6bc2 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -118,26 +118,27 @@ struct compact_control { | |||
118 | unsigned long nr_freepages; /* Number of isolated free pages */ | 118 | unsigned long nr_freepages; /* Number of isolated free pages */ |
119 | unsigned long nr_migratepages; /* Number of pages to migrate */ | 119 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
120 | unsigned long free_pfn; /* isolate_freepages search base */ | 120 | unsigned long free_pfn; /* isolate_freepages search base */ |
121 | unsigned long start_free_pfn; /* where we started the search */ | ||
122 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | 121 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
123 | bool sync; /* Synchronous migration */ | 122 | bool sync; /* Synchronous migration */ |
124 | bool wrapped; /* Order > 0 compactions are | 123 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
125 | incremental, once free_pfn | 124 | bool finished_update_free; /* True when the zone cached pfns are |
126 | and migrate_pfn meet, we restart | 125 | * no longer being updated |
127 | from the top of the zone; | 126 | */ |
128 | remember we wrapped around. */ | 127 | bool finished_update_migrate; |
129 | 128 | ||
130 | int order; /* order a direct compactor needs */ | 129 | int order; /* order a direct compactor needs */ |
131 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | 130 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
132 | struct zone *zone; | 131 | struct zone *zone; |
133 | bool *contended; /* True if a lock was contended */ | 132 | bool contended; /* True if a lock was contended */ |
133 | struct page **page; /* Page captured of requested size */ | ||
134 | }; | 134 | }; |
135 | 135 | ||
136 | unsigned long | 136 | unsigned long |
137 | isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn); | 137 | isolate_freepages_range(struct compact_control *cc, |
138 | unsigned long start_pfn, unsigned long end_pfn); | ||
138 | unsigned long | 139 | unsigned long |
139 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | 140 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
140 | unsigned long low_pfn, unsigned long end_pfn); | 141 | unsigned long low_pfn, unsigned long end_pfn, bool unevictable); |
141 | 142 | ||
142 | #endif | 143 | #endif |
143 | 144 | ||
@@ -167,9 +168,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
167 | } | 168 | } |
168 | 169 | ||
169 | /* | 170 | /* |
170 | * Called only in fault path via page_evictable() for a new page | 171 | * Called only in fault path, to determine if a new page is being |
171 | * to determine if it's being mapped into a LOCKED vma. | 172 | * mapped into a LOCKED vma. If it is, mark page as mlocked. |
172 | * If so, mark page as mlocked. | ||
173 | */ | 173 | */ |
174 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | 174 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, |
175 | struct page *page) | 175 | struct page *page) |
@@ -180,7 +180,8 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | |||
180 | return 0; | 180 | return 0; |
181 | 181 | ||
182 | if (!TestSetPageMlocked(page)) { | 182 | if (!TestSetPageMlocked(page)) { |
183 | inc_zone_page_state(page, NR_MLOCK); | 183 | mod_zone_page_state(page_zone(page), NR_MLOCK, |
184 | hpage_nr_pages(page)); | ||
184 | count_vm_event(UNEVICTABLE_PGMLOCKED); | 185 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
185 | } | 186 | } |
186 | return 1; | 187 | return 1; |
@@ -201,12 +202,7 @@ extern void munlock_vma_page(struct page *page); | |||
201 | * If called for a page that is still mapped by mlocked vmas, all we do | 202 | * If called for a page that is still mapped by mlocked vmas, all we do |
202 | * is revert to lazy LRU behaviour -- semantics are not broken. | 203 | * is revert to lazy LRU behaviour -- semantics are not broken. |
203 | */ | 204 | */ |
204 | extern void __clear_page_mlock(struct page *page); | 205 | extern void clear_page_mlock(struct page *page); |
205 | static inline void clear_page_mlock(struct page *page) | ||
206 | { | ||
207 | if (unlikely(TestClearPageMlocked(page))) | ||
208 | __clear_page_mlock(page); | ||
209 | } | ||
210 | 206 | ||
211 | /* | 207 | /* |
212 | * mlock_migrate_page - called only from migrate_page_copy() to | 208 | * mlock_migrate_page - called only from migrate_page_copy() to |
@@ -340,7 +336,6 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |||
340 | #define ZONE_RECLAIM_FULL -1 | 336 | #define ZONE_RECLAIM_FULL -1 |
341 | #define ZONE_RECLAIM_SOME 0 | 337 | #define ZONE_RECLAIM_SOME 0 |
342 | #define ZONE_RECLAIM_SUCCESS 1 | 338 | #define ZONE_RECLAIM_SUCCESS 1 |
343 | #endif | ||
344 | 339 | ||
345 | extern int hwpoison_filter(struct page *p); | 340 | extern int hwpoison_filter(struct page *p); |
346 | 341 | ||
@@ -356,3 +351,20 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, | |||
356 | unsigned long, unsigned long); | 351 | unsigned long, unsigned long); |
357 | 352 | ||
358 | extern void set_pageblock_order(void); | 353 | extern void set_pageblock_order(void); |
354 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, | ||
355 | struct list_head *page_list); | ||
356 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ | ||
357 | #define ALLOC_WMARK_MIN WMARK_MIN | ||
358 | #define ALLOC_WMARK_LOW WMARK_LOW | ||
359 | #define ALLOC_WMARK_HIGH WMARK_HIGH | ||
360 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | ||
361 | |||
362 | /* Mask to get the watermark bits */ | ||
363 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | ||
364 | |||
365 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | ||
366 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | ||
367 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | ||
368 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | ||
369 | |||
370 | #endif /* __MM_INTERNAL_H */ | ||