aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73f5d4556b3d..76c9688b6a0a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
2684{ 2684{
2685 struct page *page, *next; 2685 struct page *page, *next;
2686 unsigned long flags, pfn; 2686 unsigned long flags, pfn;
2687 int batch_count = 0;
2687 2688
2688 /* Prepare pages for freeing */ 2689 /* Prepare pages for freeing */
2689 list_for_each_entry_safe(page, next, list, lru) { 2690 list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
2700 set_page_private(page, 0); 2701 set_page_private(page, 0);
2701 trace_mm_page_free_batched(page); 2702 trace_mm_page_free_batched(page);
2702 free_unref_page_commit(page, pfn); 2703 free_unref_page_commit(page, pfn);
2704
2705 /*
2706 * Guard against excessive IRQ disabled times when we get
2707 * a large list of pages to free.
2708 */
2709 if (++batch_count == SWAP_CLUSTER_MAX) {
2710 local_irq_restore(flags);
2711 batch_count = 0;
2712 local_irq_save(flags);
2713 }
2703 } 2714 }
2704 local_irq_restore(flags); 2715 local_irq_restore(flags);
2705} 2716}
@@ -6249,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)
6249 pgcnt = 0; 6260 pgcnt = 0;
6250 for_each_resv_unavail_range(i, &start, &end) { 6261 for_each_resv_unavail_range(i, &start, &end) {
6251 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { 6262 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
6263 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
6264 continue;
6252 mm_zero_struct_page(pfn_to_page(pfn)); 6265 mm_zero_struct_page(pfn_to_page(pfn));
6253 pgcnt++; 6266 pgcnt++;
6254 } 6267 }