diff options
author | Ross Zwisler <ross.zwisler@linux.intel.com> | 2018-02-03 02:26:10 -0500 |
---|---|---|
committer | Ross Zwisler <ross.zwisler@linux.intel.com> | 2018-02-03 02:26:10 -0500 |
commit | d121f07691415df824e6b60520f782f6d13b3c81 (patch) | |
tree | 422ad3cc6fd631604fef4e469e49bacba8202e52 /mm/page_alloc.c | |
parent | 59858d3d54cfad1f8db67c2c07e4dd33bb6ed955 (diff) | |
parent | 569d0365f571fa6421a5c80bc30d1b2cdab857fe (diff) |
Merge branch 'for-4.16/dax' into libnvdimm-for-next
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 73f5d4556b3d..1748dd4a4b1b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list) | |||
2684 | { | 2684 | { |
2685 | struct page *page, *next; | 2685 | struct page *page, *next; |
2686 | unsigned long flags, pfn; | 2686 | unsigned long flags, pfn; |
2687 | int batch_count = 0; | ||
2687 | 2688 | ||
2688 | /* Prepare pages for freeing */ | 2689 | /* Prepare pages for freeing */ |
2689 | list_for_each_entry_safe(page, next, list, lru) { | 2690 | list_for_each_entry_safe(page, next, list, lru) { |
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list) | |||
2700 | set_page_private(page, 0); | 2701 | set_page_private(page, 0); |
2701 | trace_mm_page_free_batched(page); | 2702 | trace_mm_page_free_batched(page); |
2702 | free_unref_page_commit(page, pfn); | 2703 | free_unref_page_commit(page, pfn); |
2704 | |||
2705 | /* | ||
2706 | * Guard against excessive IRQ disabled times when we get | ||
2707 | * a large list of pages to free. | ||
2708 | */ | ||
2709 | if (++batch_count == SWAP_CLUSTER_MAX) { | ||
2710 | local_irq_restore(flags); | ||
2711 | batch_count = 0; | ||
2712 | local_irq_save(flags); | ||
2713 | } | ||
2703 | } | 2714 | } |
2704 | local_irq_restore(flags); | 2715 | local_irq_restore(flags); |
2705 | } | 2716 | } |
@@ -5303,9 +5314,9 @@ void __ref build_all_zonelists(pg_data_t *pgdat) | |||
5303 | * done. Non-atomic initialization, single-pass. | 5314 | * done. Non-atomic initialization, single-pass. |
5304 | */ | 5315 | */ |
5305 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | 5316 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, |
5306 | unsigned long start_pfn, enum memmap_context context) | 5317 | unsigned long start_pfn, enum memmap_context context, |
5318 | struct vmem_altmap *altmap) | ||
5307 | { | 5319 | { |
5308 | struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn)); | ||
5309 | unsigned long end_pfn = start_pfn + size; | 5320 | unsigned long end_pfn = start_pfn + size; |
5310 | pg_data_t *pgdat = NODE_DATA(nid); | 5321 | pg_data_t *pgdat = NODE_DATA(nid); |
5311 | unsigned long pfn; | 5322 | unsigned long pfn; |
@@ -5406,7 +5417,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) | |||
5406 | 5417 | ||
5407 | #ifndef __HAVE_ARCH_MEMMAP_INIT | 5418 | #ifndef __HAVE_ARCH_MEMMAP_INIT |
5408 | #define memmap_init(size, nid, zone, start_pfn) \ | 5419 | #define memmap_init(size, nid, zone, start_pfn) \ |
5409 | memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) | 5420 | memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL) |
5410 | #endif | 5421 | #endif |
5411 | 5422 | ||
5412 | static int zone_batchsize(struct zone *zone) | 5423 | static int zone_batchsize(struct zone *zone) |