diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d4096f4a5c1f..7e5e775e97f4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone) | |||
2507 | if (WARN_ON_ONCE(!mm_percpu_wq)) | 2507 | if (WARN_ON_ONCE(!mm_percpu_wq)) |
2508 | return; | 2508 | return; |
2509 | 2509 | ||
2510 | /* Workqueues cannot recurse */ | ||
2511 | if (current->flags & PF_WQ_WORKER) | ||
2512 | return; | ||
2513 | |||
2514 | /* | 2510 | /* |
2515 | * Do not drain if one is already in progress unless it's specific to | 2511 | * Do not drain if one is already in progress unless it's specific to |
2516 | * a zone. Such callers are primarily CMA and memory hotplug and need | 2512 | * a zone. Such callers are primarily CMA and memory hotplug and need |
@@ -2688,6 +2684,7 @@ void free_unref_page_list(struct list_head *list) | |||
2688 | { | 2684 | { |
2689 | struct page *page, *next; | 2685 | struct page *page, *next; |
2690 | unsigned long flags, pfn; | 2686 | unsigned long flags, pfn; |
2687 | int batch_count = 0; | ||
2691 | 2688 | ||
2692 | /* Prepare pages for freeing */ | 2689 | /* Prepare pages for freeing */ |
2693 | list_for_each_entry_safe(page, next, list, lru) { | 2690 | list_for_each_entry_safe(page, next, list, lru) { |
@@ -2704,6 +2701,16 @@ void free_unref_page_list(struct list_head *list) | |||
2704 | set_page_private(page, 0); | 2701 | set_page_private(page, 0); |
2705 | trace_mm_page_free_batched(page); | 2702 | trace_mm_page_free_batched(page); |
2706 | free_unref_page_commit(page, pfn); | 2703 | free_unref_page_commit(page, pfn); |
2704 | |||
2705 | /* | ||
2706 | * Guard against excessive IRQ disabled times when we get | ||
2707 | * a large list of pages to free. | ||
2708 | */ | ||
2709 | if (++batch_count == SWAP_CLUSTER_MAX) { | ||
2710 | local_irq_restore(flags); | ||
2711 | batch_count = 0; | ||
2712 | local_irq_save(flags); | ||
2713 | } | ||
2707 | } | 2714 | } |
2708 | local_irq_restore(flags); | 2715 | local_irq_restore(flags); |
2709 | } | 2716 | } |
@@ -7656,11 +7663,18 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
7656 | 7663 | ||
7657 | /* | 7664 | /* |
7658 | * In case of -EBUSY, we'd like to know which page causes problem. | 7665 | * In case of -EBUSY, we'd like to know which page causes problem. |
7659 | * So, just fall through. We will check it in test_pages_isolated(). | 7666 | * So, just fall through. test_pages_isolated() has a tracepoint |
7667 | * which will report the busy page. | ||
7668 | * | ||
7669 | * It is possible that busy pages could become available before | ||
7670 | * the call to test_pages_isolated, and the range will actually be | ||
7671 | * allocated. So, if we fall through be sure to clear ret so that | ||
7672 | * -EBUSY is not accidentally used or returned to caller. | ||
7660 | */ | 7673 | */ |
7661 | ret = __alloc_contig_migrate_range(&cc, start, end); | 7674 | ret = __alloc_contig_migrate_range(&cc, start, end); |
7662 | if (ret && ret != -EBUSY) | 7675 | if (ret && ret != -EBUSY) |
7663 | goto done; | 7676 | goto done; |
7677 | ret =0; | ||
7664 | 7678 | ||
7665 | /* | 7679 | /* |
7666 | * Pages from [start, end) are within a MAX_ORDER_NR_PAGES | 7680 | * Pages from [start, end) are within a MAX_ORDER_NR_PAGES |