summaryrefslogtreecommitdiffstats
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2016-01-14 18:18:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commitfea85cff11de4377040deff0e85eb2793fb078aa (patch)
treea1daea7d7c44d4d11ff139be637c565621944432 /mm/page_isolation.c
parent4a8c7bb59ac85b038c29adf6d32ff56e11fbb267 (diff)
mm/page_isolation.c: return last tested pfn rather than failure indicator
This is preparation step to report test failed pfn in new tracepoint to analyze cma allocation failure problem. There is no functional change in this patch. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4568fd58f70a..029a171d35dc 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -212,7 +212,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
212 * 212 *
213 * Returns 1 if all pages in the range are isolated. 213 * Returns 1 if all pages in the range are isolated.
214 */ 214 */
215static int 215static unsigned long
216__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, 216__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
217 bool skip_hwpoisoned_pages) 217 bool skip_hwpoisoned_pages)
218{ 218{
@@ -237,9 +237,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
237 else 237 else
238 break; 238 break;
239 } 239 }
240 if (pfn < end_pfn) 240
241 return 0; 241 return pfn;
242 return 1;
243} 242}
244 243
245int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, 244int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
@@ -248,7 +247,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
248 unsigned long pfn, flags; 247 unsigned long pfn, flags;
249 struct page *page; 248 struct page *page;
250 struct zone *zone; 249 struct zone *zone;
251 int ret;
252 250
253 /* 251 /*
254 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages 252 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
@@ -266,10 +264,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
266 /* Check all pages are free or marked as ISOLATED */ 264 /* Check all pages are free or marked as ISOLATED */
267 zone = page_zone(page); 265 zone = page_zone(page);
268 spin_lock_irqsave(&zone->lock, flags); 266 spin_lock_irqsave(&zone->lock, flags);
269 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, 267 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
270 skip_hwpoisoned_pages); 268 skip_hwpoisoned_pages);
271 spin_unlock_irqrestore(&zone->lock, flags); 269 spin_unlock_irqrestore(&zone->lock, flags);
272 return ret ? 0 : -EBUSY; 270
271 return pfn < end_pfn ? -EBUSY : 0;
273} 272}
274 273
275struct page *alloc_migrate_target(struct page *page, unsigned long private, 274struct page *alloc_migrate_target(struct page *page, unsigned long private,