aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c45
1 files changed, 35 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9404b38bcdc5..cb5723c491f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1981,14 +1981,20 @@ static struct page *
1981__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1981__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1982 struct zonelist *zonelist, enum zone_type high_zoneidx, 1982 struct zonelist *zonelist, enum zone_type high_zoneidx,
1983 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1983 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1984 int migratetype, unsigned long *did_some_progress, 1984 int migratetype, bool sync_migration,
1985 bool sync_migration) 1985 bool *deferred_compaction,
1986 unsigned long *did_some_progress)
1986{ 1987{
1987 struct page *page; 1988 struct page *page;
1988 1989
1989 if (!order || compaction_deferred(preferred_zone)) 1990 if (!order)
1990 return NULL; 1991 return NULL;
1991 1992
1993 if (compaction_deferred(preferred_zone)) {
1994 *deferred_compaction = true;
1995 return NULL;
1996 }
1997
1992 current->flags |= PF_MEMALLOC; 1998 current->flags |= PF_MEMALLOC;
1993 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1999 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1994 nodemask, sync_migration); 2000 nodemask, sync_migration);
@@ -2016,7 +2022,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2016 * but not enough to satisfy watermarks. 2022 * but not enough to satisfy watermarks.
2017 */ 2023 */
2018 count_vm_event(COMPACTFAIL); 2024 count_vm_event(COMPACTFAIL);
2019 defer_compaction(preferred_zone); 2025
2026 /*
2027 * As async compaction considers a subset of pageblocks, only
2028 * defer if the failure was a sync compaction failure.
2029 */
2030 if (sync_migration)
2031 defer_compaction(preferred_zone);
2020 2032
2021 cond_resched(); 2033 cond_resched();
2022 } 2034 }
@@ -2028,8 +2040,9 @@ static inline struct page *
2028__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2040__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2029 struct zonelist *zonelist, enum zone_type high_zoneidx, 2041 struct zonelist *zonelist, enum zone_type high_zoneidx,
2030 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2042 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2031 int migratetype, unsigned long *did_some_progress, 2043 int migratetype, bool sync_migration,
2032 bool sync_migration) 2044 bool *deferred_compaction,
2045 unsigned long *did_some_progress)
2033{ 2046{
2034 return NULL; 2047 return NULL;
2035} 2048}
@@ -2179,6 +2192,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2179 unsigned long pages_reclaimed = 0; 2192 unsigned long pages_reclaimed = 0;
2180 unsigned long did_some_progress; 2193 unsigned long did_some_progress;
2181 bool sync_migration = false; 2194 bool sync_migration = false;
2195 bool deferred_compaction = false;
2182 2196
2183 /* 2197 /*
2184 * In the slowpath, we sanity check order to avoid ever trying to 2198 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2259,12 +2273,22 @@ rebalance:
2259 zonelist, high_zoneidx, 2273 zonelist, high_zoneidx,
2260 nodemask, 2274 nodemask,
2261 alloc_flags, preferred_zone, 2275 alloc_flags, preferred_zone,
2262 migratetype, &did_some_progress, 2276 migratetype, sync_migration,
2263 sync_migration); 2277 &deferred_compaction,
2278 &did_some_progress);
2264 if (page) 2279 if (page)
2265 goto got_pg; 2280 goto got_pg;
2266 sync_migration = true; 2281 sync_migration = true;
2267 2282
2283 /*
2284 * If compaction is deferred for high-order allocations, it is because
2285 * sync compaction recently failed. In this is the case and the caller
2286 * has requested the system not be heavily disrupted, fail the
2287 * allocation now instead of entering direct reclaim
2288 */
2289 if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
2290 goto nopage;
2291
2268 /* Try direct reclaim and then allocating */ 2292 /* Try direct reclaim and then allocating */
2269 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2293 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2270 zonelist, high_zoneidx, 2294 zonelist, high_zoneidx,
@@ -2328,8 +2352,9 @@ rebalance:
2328 zonelist, high_zoneidx, 2352 zonelist, high_zoneidx,
2329 nodemask, 2353 nodemask,
2330 alloc_flags, preferred_zone, 2354 alloc_flags, preferred_zone,
2331 migratetype, &did_some_progress, 2355 migratetype, sync_migration,
2332 sync_migration); 2356 &deferred_compaction,
2357 &did_some_progress);
2333 if (page) 2358 if (page)
2334 goto got_pg; 2359 goto got_pg;
2335 } 2360 }