diff options
| -rw-r--r-- | include/linux/mmzone.h | 9 | ||||
| -rw-r--r-- | mm/page_alloc.c | 60 |
2 files changed, 69 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8c1335f3c3a3..26f2040b8b04 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -63,8 +63,10 @@ enum { | |||
| 63 | 63 | ||
| 64 | #ifdef CONFIG_CMA | 64 | #ifdef CONFIG_CMA |
| 65 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | 65 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
| 66 | # define cma_wmark_pages(zone) zone->min_cma_pages | ||
| 66 | #else | 67 | #else |
| 67 | # define is_migrate_cma(migratetype) false | 68 | # define is_migrate_cma(migratetype) false |
| 69 | # define cma_wmark_pages(zone) 0 | ||
| 68 | #endif | 70 | #endif |
| 69 | 71 | ||
| 70 | #define for_each_migratetype_order(order, type) \ | 72 | #define for_each_migratetype_order(order, type) \ |
| @@ -371,6 +373,13 @@ struct zone { | |||
| 371 | /* see spanned/present_pages for more description */ | 373 | /* see spanned/present_pages for more description */ |
| 372 | seqlock_t span_seqlock; | 374 | seqlock_t span_seqlock; |
| 373 | #endif | 375 | #endif |
| 376 | #ifdef CONFIG_CMA | ||
| 377 | /* | ||
| 378 | * CMA needs to increase watermark levels during the allocation | ||
| 379 | * process to make sure that the system is not starved. | ||
| 380 | */ | ||
| 381 | unsigned long min_cma_pages; | ||
| 382 | #endif | ||
| 374 | struct free_area free_area[MAX_ORDER]; | 383 | struct free_area free_area[MAX_ORDER]; |
| 375 | 384 | ||
| 376 | #ifndef CONFIG_SPARSEMEM | 385 | #ifndef CONFIG_SPARSEMEM |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4615531dcf66..22348ae1005d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -5079,6 +5079,11 @@ static void __setup_per_zone_wmarks(void) | |||
| 5079 | 5079 | ||
| 5080 | zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); | 5080 | zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); |
| 5081 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); | 5081 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); |
| 5082 | |||
| 5083 | zone->watermark[WMARK_MIN] += cma_wmark_pages(zone); | ||
| 5084 | zone->watermark[WMARK_LOW] += cma_wmark_pages(zone); | ||
| 5085 | zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone); | ||
| 5086 | |||
| 5082 | setup_zone_migrate_reserve(zone); | 5087 | setup_zone_migrate_reserve(zone); |
| 5083 | spin_unlock_irqrestore(&zone->lock, flags); | 5088 | spin_unlock_irqrestore(&zone->lock, flags); |
| 5084 | } | 5089 | } |
| @@ -5684,6 +5689,54 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) | |||
| 5684 | return ret > 0 ? 0 : ret; | 5689 | return ret > 0 ? 0 : ret; |
| 5685 | } | 5690 | } |
| 5686 | 5691 | ||
| 5692 | /* | ||
| 5693 | * Update zone's cma pages counter used for watermark level calculation. | ||
| 5694 | */ | ||
| 5695 | static inline void __update_cma_watermarks(struct zone *zone, int count) | ||
| 5696 | { | ||
| 5697 | unsigned long flags; | ||
| 5698 | spin_lock_irqsave(&zone->lock, flags); | ||
| 5699 | zone->min_cma_pages += count; | ||
| 5700 | spin_unlock_irqrestore(&zone->lock, flags); | ||
| 5701 | setup_per_zone_wmarks(); | ||
| 5702 | } | ||
| 5703 | |||
| 5704 | /* | ||
| 5705 | * Trigger memory pressure bump to reclaim some pages in order to be able to | ||
| 5706 | * allocate 'count' pages in single page units. Does similar work as | ||
| 5707 | *__alloc_pages_slowpath() function. | ||
| 5708 | */ | ||
| 5709 | static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count) | ||
| 5710 | { | ||
| 5711 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | ||
| 5712 | struct zonelist *zonelist = node_zonelist(0, gfp_mask); | ||
| 5713 | int did_some_progress = 0; | ||
| 5714 | int order = 1; | ||
| 5715 | |||
| 5716 | /* | ||
| 5717 | * Increase level of watermarks to force kswapd do his job | ||
| 5718 | * to stabilise at new watermark level. | ||
| 5719 | */ | ||
| 5720 | __update_cma_watermarks(zone, count); | ||
| 5721 | |||
| 5722 | /* Obey watermarks as if the page was being allocated */ | ||
| 5723 | while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) { | ||
| 5724 | wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone)); | ||
| 5725 | |||
| 5726 | did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, | ||
| 5727 | NULL); | ||
| 5728 | if (!did_some_progress) { | ||
| 5729 | /* Exhausted what can be done so it's blamo time */ | ||
| 5730 | out_of_memory(zonelist, gfp_mask, order, NULL, false); | ||
| 5731 | } | ||
| 5732 | } | ||
| 5733 | |||
| 5734 | /* Restore original watermark levels. */ | ||
| 5735 | __update_cma_watermarks(zone, -count); | ||
| 5736 | |||
| 5737 | return count; | ||
| 5738 | } | ||
| 5739 | |||
| 5687 | /** | 5740 | /** |
| 5688 | * alloc_contig_range() -- tries to allocate given range of pages | 5741 | * alloc_contig_range() -- tries to allocate given range of pages |
| 5689 | * @start: start PFN to allocate | 5742 | * @start: start PFN to allocate |
| @@ -5782,6 +5835,13 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
| 5782 | goto done; | 5835 | goto done; |
| 5783 | } | 5836 | } |
| 5784 | 5837 | ||
| 5838 | /* | ||
| 5839 | * Reclaim enough pages to make sure that contiguous allocation | ||
| 5840 | * will not starve the system. | ||
| 5841 | */ | ||
| 5842 | __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start); | ||
| 5843 | |||
| 5844 | /* Grab isolated pages from freelists. */ | ||
| 5785 | outer_end = isolate_freepages_range(outer_start, end); | 5845 | outer_end = isolate_freepages_range(outer_start, end); |
| 5786 | if (!outer_end) { | 5846 | if (!outer_end) { |
| 5787 | ret = -EBUSY; | 5847 | ret = -EBUSY; |
