diff options
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r-- | mm/page_isolation.c | 48 |
1 files changed, 30 insertions, 18 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index ce323e56b34d..bf4159d771c7 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -160,27 +160,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) | |||
160 | return NULL; | 160 | return NULL; |
161 | } | 161 | } |
162 | 162 | ||
163 | /* | 163 | /** |
164 | * start_isolate_page_range() -- make page-allocation-type of range of pages | 164 | * start_isolate_page_range() - make page-allocation-type of range of pages to |
165 | * to be MIGRATE_ISOLATE. | 165 | * be MIGRATE_ISOLATE. |
166 | * @start_pfn: The lower PFN of the range to be isolated. | 166 | * @start_pfn: The lower PFN of the range to be isolated. |
167 | * @end_pfn: The upper PFN of the range to be isolated. | 167 | * @end_pfn: The upper PFN of the range to be isolated. |
168 | * @migratetype: migrate type to set in error recovery. | 168 | * start_pfn/end_pfn must be aligned to pageblock_order. |
169 | * @migratetype: Migrate type to set in error recovery. | ||
170 | * @flags: The following flags are allowed (they can be combined in | ||
171 | * a bit mask) | ||
172 | * SKIP_HWPOISON - ignore hwpoison pages | ||
173 | * REPORT_FAILURE - report details about the failure to | ||
174 | * isolate the range | ||
169 | * | 175 | * |
170 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in | 176 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in |
171 | * the range will never be allocated. Any free pages and pages freed in the | 177 | * the range will never be allocated. Any free pages and pages freed in the |
172 | * future will not be allocated again. | 178 | * future will not be allocated again. If specified range includes migrate types |
173 | * | 179 | * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all |
174 | * start_pfn/end_pfn must be aligned to pageblock_order. | 180 | * pages in the range finally, the caller have to free all pages in the range. |
175 | * Return 0 on success and -EBUSY if any part of range cannot be isolated. | 181 | * test_page_isolated() can be used for test it. |
176 | * | 182 | * |
177 | * There is no high level synchronization mechanism that prevents two threads | 183 | * There is no high level synchronization mechanism that prevents two threads |
178 | * from trying to isolate overlapping ranges. If this happens, one thread | 184 | * from trying to isolate overlapping ranges. If this happens, one thread |
179 | * will notice pageblocks in the overlapping range already set to isolate. | 185 | * will notice pageblocks in the overlapping range already set to isolate. |
180 | * This happens in set_migratetype_isolate, and set_migratetype_isolate | 186 | * This happens in set_migratetype_isolate, and set_migratetype_isolate |
181 | * returns an error. We then clean up by restoring the migration type on | 187 | * returns an error. We then clean up by restoring the migration type on |
182 | * pageblocks we may have modified and return -EBUSY to caller. This | 188 | * pageblocks we may have modified and return -EBUSY to caller. This |
183 | * prevents two threads from simultaneously working on overlapping ranges. | 189 | * prevents two threads from simultaneously working on overlapping ranges. |
190 | * | ||
191 | * Return: the number of isolated pageblocks on success and -EBUSY if any part | ||
192 | * of range cannot be isolated. | ||
184 | */ | 193 | */ |
185 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 194 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
186 | unsigned migratetype, int flags) | 195 | unsigned migratetype, int flags) |
@@ -188,6 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
188 | unsigned long pfn; | 197 | unsigned long pfn; |
189 | unsigned long undo_pfn; | 198 | unsigned long undo_pfn; |
190 | struct page *page; | 199 | struct page *page; |
200 | int nr_isolate_pageblock = 0; | ||
191 | 201 | ||
192 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | 202 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); |
193 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | 203 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); |
@@ -196,13 +206,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
196 | pfn < end_pfn; | 206 | pfn < end_pfn; |
197 | pfn += pageblock_nr_pages) { | 207 | pfn += pageblock_nr_pages) { |
198 | page = __first_valid_page(pfn, pageblock_nr_pages); | 208 | page = __first_valid_page(pfn, pageblock_nr_pages); |
199 | if (page && | 209 | if (page) { |
200 | set_migratetype_isolate(page, migratetype, flags)) { | 210 | if (set_migratetype_isolate(page, migratetype, flags)) { |
201 | undo_pfn = pfn; | 211 | undo_pfn = pfn; |
202 | goto undo; | 212 | goto undo; |
213 | } | ||
214 | nr_isolate_pageblock++; | ||
203 | } | 215 | } |
204 | } | 216 | } |
205 | return 0; | 217 | return nr_isolate_pageblock; |
206 | undo: | 218 | undo: |
207 | for (pfn = start_pfn; | 219 | for (pfn = start_pfn; |
208 | pfn < undo_pfn; | 220 | pfn < undo_pfn; |