diff options
-rw-r--r-- | mm/compaction.c | 4 | ||||
-rw-r--r-- | mm/internal.h | 1 | ||||
-rw-r--r-- | mm/page_alloc.c | 28 |
3 files changed, 4 insertions, 29 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 29bd1df18b98..028b7210a669 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1450,14 +1450,12 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, | |||
1450 | * if compaction succeeds. | 1450 | * if compaction succeeds. |
1451 | * For costly orders, we require low watermark instead of min for | 1451 | * For costly orders, we require low watermark instead of min for |
1452 | * compaction to proceed to increase its chances. | 1452 | * compaction to proceed to increase its chances. |
1453 | * ALLOC_CMA is used, as pages in CMA pageblocks are considered | ||
1454 | * suitable migration targets | ||
1455 | */ | 1453 | */ |
1456 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? | 1454 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? |
1457 | low_wmark_pages(zone) : min_wmark_pages(zone); | 1455 | low_wmark_pages(zone) : min_wmark_pages(zone); |
1458 | watermark += compact_gap(order); | 1456 | watermark += compact_gap(order); |
1459 | if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, | 1457 | if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, |
1460 | ALLOC_CMA, wmark_target)) | 1458 | 0, wmark_target)) |
1461 | return COMPACT_SKIPPED; | 1459 | return COMPACT_SKIPPED; |
1462 | 1460 | ||
1463 | return COMPACT_CONTINUE; | 1461 | return COMPACT_CONTINUE; |
diff --git a/mm/internal.h b/mm/internal.h index 228dd6642951..62d8c34e63d5 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -498,7 +498,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |||
498 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | 498 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
499 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | 499 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
500 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | 500 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
501 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | ||
502 | 501 | ||
503 | enum ttu_flags; | 502 | enum ttu_flags; |
504 | struct tlbflush_unmap_batch; | 503 | struct tlbflush_unmap_batch; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index facc25ee6e2d..b4390db64da3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2893,7 +2893,7 @@ int __isolate_free_page(struct page *page, unsigned int order) | |||
2893 | * exists. | 2893 | * exists. |
2894 | */ | 2894 | */ |
2895 | watermark = min_wmark_pages(zone) + (1UL << order); | 2895 | watermark = min_wmark_pages(zone) + (1UL << order); |
2896 | if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) | 2896 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) |
2897 | return 0; | 2897 | return 0; |
2898 | 2898 | ||
2899 | __mod_zone_freepage_state(zone, -(1UL << order), mt); | 2899 | __mod_zone_freepage_state(zone, -(1UL << order), mt); |
@@ -3169,12 +3169,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | |||
3169 | } | 3169 | } |
3170 | 3170 | ||
3171 | 3171 | ||
3172 | #ifdef CONFIG_CMA | ||
3173 | /* If allocation can't use CMA areas don't use free CMA pages */ | ||
3174 | if (!(alloc_flags & ALLOC_CMA)) | ||
3175 | free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); | ||
3176 | #endif | ||
3177 | |||
3178 | /* | 3172 | /* |
3179 | * Check watermarks for an order-0 allocation request. If these | 3173 | * Check watermarks for an order-0 allocation request. If these |
3180 | * are not met, then a high-order request also cannot go ahead | 3174 | * are not met, then a high-order request also cannot go ahead |
@@ -3201,10 +3195,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | |||
3201 | } | 3195 | } |
3202 | 3196 | ||
3203 | #ifdef CONFIG_CMA | 3197 | #ifdef CONFIG_CMA |
3204 | if ((alloc_flags & ALLOC_CMA) && | 3198 | if (!list_empty(&area->free_list[MIGRATE_CMA])) |
3205 | !list_empty(&area->free_list[MIGRATE_CMA])) { | ||
3206 | return true; | 3199 | return true; |
3207 | } | ||
3208 | #endif | 3200 | #endif |
3209 | if (alloc_harder && | 3201 | if (alloc_harder && |
3210 | !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) | 3202 | !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) |
@@ -3224,13 +3216,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, | |||
3224 | unsigned long mark, int classzone_idx, unsigned int alloc_flags) | 3216 | unsigned long mark, int classzone_idx, unsigned int alloc_flags) |
3225 | { | 3217 | { |
3226 | long free_pages = zone_page_state(z, NR_FREE_PAGES); | 3218 | long free_pages = zone_page_state(z, NR_FREE_PAGES); |
3227 | long cma_pages = 0; | ||
3228 | |||
3229 | #ifdef CONFIG_CMA | ||
3230 | /* If allocation can't use CMA areas don't use free CMA pages */ | ||
3231 | if (!(alloc_flags & ALLOC_CMA)) | ||
3232 | cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); | ||
3233 | #endif | ||
3234 | 3219 | ||
3235 | /* | 3220 | /* |
3236 | * Fast check for order-0 only. If this fails then the reserves | 3221 | * Fast check for order-0 only. If this fails then the reserves |
@@ -3239,7 +3224,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, | |||
3239 | * the caller is !atomic then it'll uselessly search the free | 3224 | * the caller is !atomic then it'll uselessly search the free |
3240 | * list. That corner case is then slower but it is harmless. | 3225 | * list. That corner case is then slower but it is harmless. |
3241 | */ | 3226 | */ |
3242 | if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) | 3227 | if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx]) |
3243 | return true; | 3228 | return true; |
3244 | 3229 | ||
3245 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 3230 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, |
@@ -3875,10 +3860,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
3875 | } else if (unlikely(rt_task(current)) && !in_interrupt()) | 3860 | } else if (unlikely(rt_task(current)) && !in_interrupt()) |
3876 | alloc_flags |= ALLOC_HARDER; | 3861 | alloc_flags |= ALLOC_HARDER; |
3877 | 3862 | ||
3878 | #ifdef CONFIG_CMA | ||
3879 | if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | ||
3880 | alloc_flags |= ALLOC_CMA; | ||
3881 | #endif | ||
3882 | return alloc_flags; | 3863 | return alloc_flags; |
3883 | } | 3864 | } |
3884 | 3865 | ||
@@ -4345,9 +4326,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, | |||
4345 | if (should_fail_alloc_page(gfp_mask, order)) | 4326 | if (should_fail_alloc_page(gfp_mask, order)) |
4346 | return false; | 4327 | return false; |
4347 | 4328 | ||
4348 | if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) | ||
4349 | *alloc_flags |= ALLOC_CMA; | ||
4350 | |||
4351 | return true; | 4329 | return true; |
4352 | } | 4330 | } |
4353 | 4331 | ||