diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 38 |
1 files changed, 22 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 009ac285fea7..c66fb875104a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1928,6 +1928,17 @@ this_zone_full: | |||
1928 | zlc_active = 0; | 1928 | zlc_active = 0; |
1929 | goto zonelist_scan; | 1929 | goto zonelist_scan; |
1930 | } | 1930 | } |
1931 | |||
1932 | if (page) | ||
1933 | /* | ||
1934 | * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was | ||
1935 | * necessary to allocate the page. The expectation is | ||
1936 | * that the caller is taking steps that will free more | ||
1937 | * memory. The caller should avoid the page being used | ||
1938 | * for !PFMEMALLOC purposes. | ||
1939 | */ | ||
1940 | page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); | ||
1941 | |||
1931 | return page; | 1942 | return page; |
1932 | } | 1943 | } |
1933 | 1944 | ||
@@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2091 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2102 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2092 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2103 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2093 | int migratetype, bool sync_migration, | 2104 | int migratetype, bool sync_migration, |
2094 | bool *deferred_compaction, | 2105 | bool *contended_compaction, bool *deferred_compaction, |
2095 | unsigned long *did_some_progress) | 2106 | unsigned long *did_some_progress) |
2096 | { | 2107 | { |
2097 | struct page *page; | 2108 | struct page *page; |
@@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2106 | 2117 | ||
2107 | current->flags |= PF_MEMALLOC; | 2118 | current->flags |= PF_MEMALLOC; |
2108 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 2119 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
2109 | nodemask, sync_migration); | 2120 | nodemask, sync_migration, |
2121 | contended_compaction); | ||
2110 | current->flags &= ~PF_MEMALLOC; | 2122 | current->flags &= ~PF_MEMALLOC; |
2111 | if (*did_some_progress != COMPACT_SKIPPED) { | 2123 | if (*did_some_progress != COMPACT_SKIPPED) { |
2112 | 2124 | ||
@@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2152 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2164 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2153 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2165 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2154 | int migratetype, bool sync_migration, | 2166 | int migratetype, bool sync_migration, |
2155 | bool *deferred_compaction, | 2167 | bool *contended_compaction, bool *deferred_compaction, |
2156 | unsigned long *did_some_progress) | 2168 | unsigned long *did_some_progress) |
2157 | { | 2169 | { |
2158 | return NULL; | 2170 | return NULL; |
@@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2325 | unsigned long did_some_progress; | 2337 | unsigned long did_some_progress; |
2326 | bool sync_migration = false; | 2338 | bool sync_migration = false; |
2327 | bool deferred_compaction = false; | 2339 | bool deferred_compaction = false; |
2340 | bool contended_compaction = false; | ||
2328 | 2341 | ||
2329 | /* | 2342 | /* |
2330 | * In the slowpath, we sanity check order to avoid ever trying to | 2343 | * In the slowpath, we sanity check order to avoid ever trying to |
@@ -2389,14 +2402,6 @@ rebalance: | |||
2389 | zonelist, high_zoneidx, nodemask, | 2402 | zonelist, high_zoneidx, nodemask, |
2390 | preferred_zone, migratetype); | 2403 | preferred_zone, migratetype); |
2391 | if (page) { | 2404 | if (page) { |
2392 | /* | ||
2393 | * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was | ||
2394 | * necessary to allocate the page. The expectation is | ||
2395 | * that the caller is taking steps that will free more | ||
2396 | * memory. The caller should avoid the page being used | ||
2397 | * for !PFMEMALLOC purposes. | ||
2398 | */ | ||
2399 | page->pfmemalloc = true; | ||
2400 | goto got_pg; | 2405 | goto got_pg; |
2401 | } | 2406 | } |
2402 | } | 2407 | } |
@@ -2422,6 +2427,7 @@ rebalance: | |||
2422 | nodemask, | 2427 | nodemask, |
2423 | alloc_flags, preferred_zone, | 2428 | alloc_flags, preferred_zone, |
2424 | migratetype, sync_migration, | 2429 | migratetype, sync_migration, |
2430 | &contended_compaction, | ||
2425 | &deferred_compaction, | 2431 | &deferred_compaction, |
2426 | &did_some_progress); | 2432 | &did_some_progress); |
2427 | if (page) | 2433 | if (page) |
@@ -2431,10 +2437,11 @@ rebalance: | |||
2431 | /* | 2437 | /* |
2432 | * If compaction is deferred for high-order allocations, it is because | 2438 | * If compaction is deferred for high-order allocations, it is because |
2433 | * sync compaction recently failed. In this is the case and the caller | 2439 | * sync compaction recently failed. In this is the case and the caller |
2434 | * has requested the system not be heavily disrupted, fail the | 2440 | * requested a movable allocation that does not heavily disrupt the |
2435 | * allocation now instead of entering direct reclaim | 2441 | * system then fail the allocation instead of entering direct reclaim. |
2436 | */ | 2442 | */ |
2437 | if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) | 2443 | if ((deferred_compaction || contended_compaction) && |
2444 | (gfp_mask & __GFP_NO_KSWAPD)) | ||
2438 | goto nopage; | 2445 | goto nopage; |
2439 | 2446 | ||
2440 | /* Try direct reclaim and then allocating */ | 2447 | /* Try direct reclaim and then allocating */ |
@@ -2505,6 +2512,7 @@ rebalance: | |||
2505 | nodemask, | 2512 | nodemask, |
2506 | alloc_flags, preferred_zone, | 2513 | alloc_flags, preferred_zone, |
2507 | migratetype, sync_migration, | 2514 | migratetype, sync_migration, |
2515 | &contended_compaction, | ||
2508 | &deferred_compaction, | 2516 | &deferred_compaction, |
2509 | &did_some_progress); | 2517 | &did_some_progress); |
2510 | if (page) | 2518 | if (page) |
@@ -2569,8 +2577,6 @@ retry_cpuset: | |||
2569 | page = __alloc_pages_slowpath(gfp_mask, order, | 2577 | page = __alloc_pages_slowpath(gfp_mask, order, |
2570 | zonelist, high_zoneidx, nodemask, | 2578 | zonelist, high_zoneidx, nodemask, |
2571 | preferred_zone, migratetype); | 2579 | preferred_zone, migratetype); |
2572 | else | ||
2573 | page->pfmemalloc = false; | ||
2574 | 2580 | ||
2575 | trace_mm_page_alloc(page, order, gfp_mask, migratetype); | 2581 | trace_mm_page_alloc(page, order, gfp_mask, migratetype); |
2576 | 2582 | ||