aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6969a8abdba2..f2c7cc6a3039 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1541,19 +1541,6 @@ failed:
1541 return NULL; 1541 return NULL;
1542} 1542}
1543 1543
1544/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1545#define ALLOC_WMARK_MIN WMARK_MIN
1546#define ALLOC_WMARK_LOW WMARK_LOW
1547#define ALLOC_WMARK_HIGH WMARK_HIGH
1548#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1549
1550/* Mask to get the watermark bits */
1551#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1552
1553#define ALLOC_HARDER 0x10 /* try to alloc harder */
1554#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1555#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1556
1557#ifdef CONFIG_FAIL_PAGE_ALLOC 1544#ifdef CONFIG_FAIL_PAGE_ALLOC
1558 1545
1559static struct { 1546static struct {
@@ -1648,7 +1635,11 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1648 min -= min / 2; 1635 min -= min / 2;
1649 if (alloc_flags & ALLOC_HARDER) 1636 if (alloc_flags & ALLOC_HARDER)
1650 min -= min / 4; 1637 min -= min / 4;
1651 1638#ifdef CONFIG_CMA
1639 /* If allocation can't use CMA areas don't use free CMA pages */
1640 if (!(alloc_flags & ALLOC_CMA))
1641 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1642#endif
1652 if (free_pages <= min + lowmem_reserve) 1643 if (free_pages <= min + lowmem_reserve)
1653 return false; 1644 return false;
1654 for (o = 0; o < order; o++) { 1645 for (o = 0; o < order; o++) {
@@ -2362,7 +2353,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2362 unlikely(test_thread_flag(TIF_MEMDIE)))) 2353 unlikely(test_thread_flag(TIF_MEMDIE))))
2363 alloc_flags |= ALLOC_NO_WATERMARKS; 2354 alloc_flags |= ALLOC_NO_WATERMARKS;
2364 } 2355 }
2365 2356#ifdef CONFIG_CMA
2357 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2358 alloc_flags |= ALLOC_CMA;
2359#endif
2366 return alloc_flags; 2360 return alloc_flags;
2367} 2361}
2368 2362
@@ -2587,6 +2581,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2587 struct page *page = NULL; 2581 struct page *page = NULL;
2588 int migratetype = allocflags_to_migratetype(gfp_mask); 2582 int migratetype = allocflags_to_migratetype(gfp_mask);
2589 unsigned int cpuset_mems_cookie; 2583 unsigned int cpuset_mems_cookie;
2584 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2590 2585
2591 gfp_mask &= gfp_allowed_mask; 2586 gfp_mask &= gfp_allowed_mask;
2592 2587
@@ -2615,9 +2610,13 @@ retry_cpuset:
2615 if (!preferred_zone) 2610 if (!preferred_zone)
2616 goto out; 2611 goto out;
2617 2612
2613#ifdef CONFIG_CMA
2614 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2615 alloc_flags |= ALLOC_CMA;
2616#endif
2618 /* First allocation attempt */ 2617 /* First allocation attempt */
2619 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2618 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2620 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, 2619 zonelist, high_zoneidx, alloc_flags,
2621 preferred_zone, migratetype); 2620 preferred_zone, migratetype);
2622 if (unlikely(!page)) 2621 if (unlikely(!page))
2623 page = __alloc_pages_slowpath(gfp_mask, order, 2622 page = __alloc_pages_slowpath(gfp_mask, order,