aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/page_alloc.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a88cb0cbf352..30a3250c0a21 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -970,7 +970,8 @@ static inline int check_new_page(struct page *page)
970 return 0; 970 return 0;
971} 971}
972 972
973static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) 973static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
974 int alloc_flags)
974{ 975{
975 int i; 976 int i;
976 977
@@ -994,6 +995,14 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
994 995
995 set_page_owner(page, order, gfp_flags); 996 set_page_owner(page, order, gfp_flags);
996 997
998 /*
999 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
1000 * allocate the page. The expectation is that the caller is taking
1001 * steps that will free more memory. The caller should avoid the page
1002 * being used for !PFMEMALLOC purposes.
1003 */
1004 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1005
997 return 0; 1006 return 0;
998} 1007}
999 1008
@@ -1642,9 +1651,7 @@ int split_free_page(struct page *page)
1642} 1651}
1643 1652
1644/* 1653/*
1645 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1654 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1646 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1647 * or two.
1648 */ 1655 */
1649static inline 1656static inline
1650struct page *buffered_rmqueue(struct zone *preferred_zone, 1657struct page *buffered_rmqueue(struct zone *preferred_zone,
@@ -1655,7 +1662,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
1655 struct page *page; 1662 struct page *page;
1656 bool cold = ((gfp_flags & __GFP_COLD) != 0); 1663 bool cold = ((gfp_flags & __GFP_COLD) != 0);
1657 1664
1658again:
1659 if (likely(order == 0)) { 1665 if (likely(order == 0)) {
1660 struct per_cpu_pages *pcp; 1666 struct per_cpu_pages *pcp;
1661 struct list_head *list; 1667 struct list_head *list;
@@ -1711,8 +1717,6 @@ again:
1711 local_irq_restore(flags); 1717 local_irq_restore(flags);
1712 1718
1713 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1719 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1714 if (prep_new_page(page, order, gfp_flags))
1715 goto again;
1716 return page; 1720 return page;
1717 1721
1718failed: 1722failed:
@@ -2177,25 +2181,16 @@ zonelist_scan:
2177try_this_zone: 2181try_this_zone:
2178 page = buffered_rmqueue(preferred_zone, zone, order, 2182 page = buffered_rmqueue(preferred_zone, zone, order,
2179 gfp_mask, migratetype); 2183 gfp_mask, migratetype);
2180 if (page) 2184 if (page) {
2181 break; 2185 if (prep_new_page(page, order, gfp_mask, alloc_flags))
2186 goto try_this_zone;
2187 return page;
2188 }
2182this_zone_full: 2189this_zone_full:
2183 if (IS_ENABLED(CONFIG_NUMA) && zlc_active) 2190 if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
2184 zlc_mark_zone_full(zonelist, z); 2191 zlc_mark_zone_full(zonelist, z);
2185 } 2192 }
2186 2193
2187 if (page) {
2188 /*
2189 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
2190 * necessary to allocate the page. The expectation is
2191 * that the caller is taking steps that will free more
2192 * memory. The caller should avoid the page being used
2193 * for !PFMEMALLOC purposes.
2194 */
2195 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
2196 return page;
2197 }
2198
2199 /* 2194 /*
2200 * The first pass makes sure allocations are spread fairly within the 2195 * The first pass makes sure allocations are spread fairly within the
2201 * local node. However, the local node might have free pages left 2196 * local node. However, the local node might have free pages left