summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d4096f4a5c1f..73f5d4556b3d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)
2507 if (WARN_ON_ONCE(!mm_percpu_wq)) 2507 if (WARN_ON_ONCE(!mm_percpu_wq))
2508 return; 2508 return;
2509 2509
2510 /* Workqueues cannot recurse */
2511 if (current->flags & PF_WQ_WORKER)
2512 return;
2513
2514 /* 2510 /*
2515 * Do not drain if one is already in progress unless it's specific to 2511 * Do not drain if one is already in progress unless it's specific to
2516 * a zone. Such callers are primarily CMA and memory hotplug and need 2512 * a zone. Such callers are primarily CMA and memory hotplug and need
@@ -7656,11 +7652,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7656 7652
7657 /* 7653 /*
7658 * In case of -EBUSY, we'd like to know which page causes problem. 7654 * In case of -EBUSY, we'd like to know which page causes problem.
7659 * So, just fall through. We will check it in test_pages_isolated(). 7655 * So, just fall through. test_pages_isolated() has a tracepoint
7656 * which will report the busy page.
7657 *
7658 * It is possible that busy pages could become available before
7659 * the call to test_pages_isolated, and the range will actually be
7660 * allocated. So, if we fall through be sure to clear ret so that
7661 * -EBUSY is not accidentally used or returned to caller.
7660 */ 7662 */
7661 ret = __alloc_contig_migrate_range(&cc, start, end); 7663 ret = __alloc_contig_migrate_range(&cc, start, end);
7662 if (ret && ret != -EBUSY) 7664 if (ret && ret != -EBUSY)
7663 goto done; 7665 goto done;
7666 ret =0;
7664 7667
7665 /* 7668 /*
7666 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 7669 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES