summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-02-11 18:25:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:02 -0500
commita9263751e11a07af40a98dba88021821cd430cfd (patch)
tree59ea04ed669897daf7c9a96a6949811fe0862d8f /mm
parent753791910e23a95aade78f69e49713acddf8bb8c (diff)
mm, page_alloc: reduce number of alloc_pages* functions' parameters
Introduce struct alloc_context to accumulate the numerous parameters passed between the alloc_pages* family of functions and get_page_from_freelist(). This excludes gfp_flags and alloc_info, which mutate too much along the way, and allocation order, which is conceptually different. The result is shorter function signatures, as well as overal code size and stack usage reductions. bloat-o-meter: add/remove: 0/0 grow/shrink: 1/2 up/down: 127/-310 (-183) function old new delta get_page_from_freelist 2525 2652 +127 __alloc_pages_direct_compact 329 283 -46 __alloc_pages_nodemask 2564 2300 -264 checkstack.pl: function old new __alloc_pages_nodemask 248 200 get_page_from_freelist 168 184 __alloc_pages_direct_compact 40 24 Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Minchan Kim <minchan@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c229
1 files changed, 108 insertions, 121 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 30a3250c0a21..4aead0bd8d44 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -232,6 +232,27 @@ EXPORT_SYMBOL(nr_node_ids);
232EXPORT_SYMBOL(nr_online_nodes); 232EXPORT_SYMBOL(nr_online_nodes);
233#endif 233#endif
234 234
235/*
236 * Structure for holding the mostly immutable allocation parameters passed
237 * between alloc_pages* family of functions.
238 *
239 * nodemask, migratetype and high_zoneidx are initialized only once in
240 * __alloc_pages_nodemask() and then never change.
241 *
242 * zonelist, preferred_zone and classzone_idx are set first in
243 * __alloc_pages_nodemask() for the fast path, and might be later changed
244 * in __alloc_pages_slowpath(). All other functions pass the whole strucure
245 * by a const pointer.
246 */
247struct alloc_context {
248 struct zonelist *zonelist;
249 nodemask_t *nodemask;
250 struct zone *preferred_zone;
251 int classzone_idx;
252 int migratetype;
253 enum zone_type high_zoneidx;
254};
255
235int page_group_by_mobility_disabled __read_mostly; 256int page_group_by_mobility_disabled __read_mostly;
236 257
237void set_pageblock_migratetype(struct page *page, int migratetype) 258void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -2037,10 +2058,10 @@ static void reset_alloc_batches(struct zone *preferred_zone)
2037 * a page. 2058 * a page.
2038 */ 2059 */
2039static struct page * 2060static struct page *
2040get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 2061get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2041 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 2062 const struct alloc_context *ac)
2042 struct zone *preferred_zone, int classzone_idx, int migratetype)
2043{ 2063{
2064 struct zonelist *zonelist = ac->zonelist;
2044 struct zoneref *z; 2065 struct zoneref *z;
2045 struct page *page = NULL; 2066 struct page *page = NULL;
2046 struct zone *zone; 2067 struct zone *zone;
@@ -2059,8 +2080,8 @@ zonelist_scan:
2059 * Scan zonelist, looking for a zone with enough free. 2080 * Scan zonelist, looking for a zone with enough free.
2060 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2081 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
2061 */ 2082 */
2062 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2083 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2063 high_zoneidx, nodemask) { 2084 ac->nodemask) {
2064 unsigned long mark; 2085 unsigned long mark;
2065 2086
2066 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 2087 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
@@ -2077,7 +2098,7 @@ zonelist_scan:
2077 * time the page has in memory before being reclaimed. 2098 * time the page has in memory before being reclaimed.
2078 */ 2099 */
2079 if (alloc_flags & ALLOC_FAIR) { 2100 if (alloc_flags & ALLOC_FAIR) {
2080 if (!zone_local(preferred_zone, zone)) 2101 if (!zone_local(ac->preferred_zone, zone))
2081 break; 2102 break;
2082 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { 2103 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
2083 nr_fair_skipped++; 2104 nr_fair_skipped++;
@@ -2115,7 +2136,7 @@ zonelist_scan:
2115 2136
2116 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 2137 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2117 if (!zone_watermark_ok(zone, order, mark, 2138 if (!zone_watermark_ok(zone, order, mark,
2118 classzone_idx, alloc_flags)) { 2139 ac->classzone_idx, alloc_flags)) {
2119 int ret; 2140 int ret;
2120 2141
2121 /* Checked here to keep the fast path fast */ 2142 /* Checked here to keep the fast path fast */
@@ -2136,7 +2157,7 @@ zonelist_scan:
2136 } 2157 }
2137 2158
2138 if (zone_reclaim_mode == 0 || 2159 if (zone_reclaim_mode == 0 ||
2139 !zone_allows_reclaim(preferred_zone, zone)) 2160 !zone_allows_reclaim(ac->preferred_zone, zone))
2140 goto this_zone_full; 2161 goto this_zone_full;
2141 2162
2142 /* 2163 /*
@@ -2158,7 +2179,7 @@ zonelist_scan:
2158 default: 2179 default:
2159 /* did we reclaim enough */ 2180 /* did we reclaim enough */
2160 if (zone_watermark_ok(zone, order, mark, 2181 if (zone_watermark_ok(zone, order, mark,
2161 classzone_idx, alloc_flags)) 2182 ac->classzone_idx, alloc_flags))
2162 goto try_this_zone; 2183 goto try_this_zone;
2163 2184
2164 /* 2185 /*
@@ -2179,8 +2200,8 @@ zonelist_scan:
2179 } 2200 }
2180 2201
2181try_this_zone: 2202try_this_zone:
2182 page = buffered_rmqueue(preferred_zone, zone, order, 2203 page = buffered_rmqueue(ac->preferred_zone, zone, order,
2183 gfp_mask, migratetype); 2204 gfp_mask, ac->migratetype);
2184 if (page) { 2205 if (page) {
2185 if (prep_new_page(page, order, gfp_mask, alloc_flags)) 2206 if (prep_new_page(page, order, gfp_mask, alloc_flags))
2186 goto try_this_zone; 2207 goto try_this_zone;
@@ -2203,7 +2224,7 @@ this_zone_full:
2203 alloc_flags &= ~ALLOC_FAIR; 2224 alloc_flags &= ~ALLOC_FAIR;
2204 if (nr_fair_skipped) { 2225 if (nr_fair_skipped) {
2205 zonelist_rescan = true; 2226 zonelist_rescan = true;
2206 reset_alloc_batches(preferred_zone); 2227 reset_alloc_batches(ac->preferred_zone);
2207 } 2228 }
2208 if (nr_online_nodes > 1) 2229 if (nr_online_nodes > 1)
2209 zonelist_rescan = true; 2230 zonelist_rescan = true;
@@ -2325,9 +2346,7 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2325 2346
2326static inline struct page * 2347static inline struct page *
2327__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2348__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2328 struct zonelist *zonelist, enum zone_type high_zoneidx, 2349 const struct alloc_context *ac, unsigned long *did_some_progress)
2329 nodemask_t *nodemask, struct zone *preferred_zone,
2330 int classzone_idx, int migratetype, unsigned long *did_some_progress)
2331{ 2350{
2332 struct page *page; 2351 struct page *page;
2333 2352
@@ -2340,7 +2359,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2340 * Acquire the per-zone oom lock for each zone. If that 2359 * Acquire the per-zone oom lock for each zone. If that
2341 * fails, somebody else is making progress for us. 2360 * fails, somebody else is making progress for us.
2342 */ 2361 */
2343 if (!oom_zonelist_trylock(zonelist, gfp_mask)) { 2362 if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) {
2344 *did_some_progress = 1; 2363 *did_some_progress = 1;
2345 schedule_timeout_uninterruptible(1); 2364 schedule_timeout_uninterruptible(1);
2346 return NULL; 2365 return NULL;
@@ -2359,10 +2378,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2359 * here, this is only to catch a parallel oom killing, we must fail if 2378 * here, this is only to catch a parallel oom killing, we must fail if
2360 * we're still under heavy pressure. 2379 * we're still under heavy pressure.
2361 */ 2380 */
2362 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 2381 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2363 order, zonelist, high_zoneidx, 2382 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
2364 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2365 preferred_zone, classzone_idx, migratetype);
2366 if (page) 2383 if (page)
2367 goto out; 2384 goto out;
2368 2385
@@ -2374,7 +2391,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2374 if (order > PAGE_ALLOC_COSTLY_ORDER) 2391 if (order > PAGE_ALLOC_COSTLY_ORDER)
2375 goto out; 2392 goto out;
2376 /* The OOM killer does not needlessly kill tasks for lowmem */ 2393 /* The OOM killer does not needlessly kill tasks for lowmem */
2377 if (high_zoneidx < ZONE_NORMAL) 2394 if (ac->high_zoneidx < ZONE_NORMAL)
2378 goto out; 2395 goto out;
2379 /* The OOM killer does not compensate for light reclaim */ 2396 /* The OOM killer does not compensate for light reclaim */
2380 if (!(gfp_mask & __GFP_FS)) 2397 if (!(gfp_mask & __GFP_FS))
@@ -2390,10 +2407,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2390 goto out; 2407 goto out;
2391 } 2408 }
2392 /* Exhausted what can be done so it's blamo time */ 2409 /* Exhausted what can be done so it's blamo time */
2393 out_of_memory(zonelist, gfp_mask, order, nodemask, false); 2410 out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false);
2394 *did_some_progress = 1; 2411 *did_some_progress = 1;
2395out: 2412out:
2396 oom_zonelist_unlock(zonelist, gfp_mask); 2413 oom_zonelist_unlock(ac->zonelist, gfp_mask);
2397 return page; 2414 return page;
2398} 2415}
2399 2416
@@ -2401,10 +2418,9 @@ out:
2401/* Try memory compaction for high-order allocations before reclaim */ 2418/* Try memory compaction for high-order allocations before reclaim */
2402static struct page * 2419static struct page *
2403__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2420__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2404 struct zonelist *zonelist, enum zone_type high_zoneidx, 2421 int alloc_flags, const struct alloc_context *ac,
2405 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2422 enum migrate_mode mode, int *contended_compaction,
2406 int classzone_idx, int migratetype, enum migrate_mode mode, 2423 bool *deferred_compaction)
2407 int *contended_compaction, bool *deferred_compaction)
2408{ 2424{
2409 unsigned long compact_result; 2425 unsigned long compact_result;
2410 struct page *page; 2426 struct page *page;
@@ -2413,10 +2429,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2413 return NULL; 2429 return NULL;
2414 2430
2415 current->flags |= PF_MEMALLOC; 2431 current->flags |= PF_MEMALLOC;
2416 compact_result = try_to_compact_pages(zonelist, order, gfp_mask, 2432 compact_result = try_to_compact_pages(ac->zonelist, order, gfp_mask,
2417 nodemask, mode, 2433 ac->nodemask, mode,
2418 contended_compaction, 2434 contended_compaction,
2419 alloc_flags, classzone_idx); 2435 alloc_flags, ac->classzone_idx);
2420 current->flags &= ~PF_MEMALLOC; 2436 current->flags &= ~PF_MEMALLOC;
2421 2437
2422 switch (compact_result) { 2438 switch (compact_result) {
@@ -2435,10 +2451,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2435 */ 2451 */
2436 count_vm_event(COMPACTSTALL); 2452 count_vm_event(COMPACTSTALL);
2437 2453
2438 page = get_page_from_freelist(gfp_mask, nodemask, 2454 page = get_page_from_freelist(gfp_mask, order,
2439 order, zonelist, high_zoneidx, 2455 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2440 alloc_flags & ~ALLOC_NO_WATERMARKS,
2441 preferred_zone, classzone_idx, migratetype);
2442 2456
2443 if (page) { 2457 if (page) {
2444 struct zone *zone = page_zone(page); 2458 struct zone *zone = page_zone(page);
@@ -2462,10 +2476,9 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2462#else 2476#else
2463static inline struct page * 2477static inline struct page *
2464__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2478__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2465 struct zonelist *zonelist, enum zone_type high_zoneidx, 2479 int alloc_flags, const struct alloc_context *ac,
2466 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2480 enum migrate_mode mode, int *contended_compaction,
2467 int classzone_idx, int migratetype, enum migrate_mode mode, 2481 bool *deferred_compaction)
2468 int *contended_compaction, bool *deferred_compaction)
2469{ 2482{
2470 return NULL; 2483 return NULL;
2471} 2484}
@@ -2473,8 +2486,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2473 2486
2474/* Perform direct synchronous page reclaim */ 2487/* Perform direct synchronous page reclaim */
2475static int 2488static int
2476__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, 2489__perform_reclaim(gfp_t gfp_mask, unsigned int order,
2477 nodemask_t *nodemask) 2490 const struct alloc_context *ac)
2478{ 2491{
2479 struct reclaim_state reclaim_state; 2492 struct reclaim_state reclaim_state;
2480 int progress; 2493 int progress;
@@ -2488,7 +2501,8 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2488 reclaim_state.reclaimed_slab = 0; 2501 reclaim_state.reclaimed_slab = 0;
2489 current->reclaim_state = &reclaim_state; 2502 current->reclaim_state = &reclaim_state;
2490 2503
2491 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 2504 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2505 ac->nodemask);
2492 2506
2493 current->reclaim_state = NULL; 2507 current->reclaim_state = NULL;
2494 lockdep_clear_current_reclaim_state(); 2508 lockdep_clear_current_reclaim_state();
@@ -2502,28 +2516,23 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2502/* The really slow allocator path where we enter direct reclaim */ 2516/* The really slow allocator path where we enter direct reclaim */
2503static inline struct page * 2517static inline struct page *
2504__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2518__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2505 struct zonelist *zonelist, enum zone_type high_zoneidx, 2519 int alloc_flags, const struct alloc_context *ac,
2506 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2520 unsigned long *did_some_progress)
2507 int classzone_idx, int migratetype, unsigned long *did_some_progress)
2508{ 2521{
2509 struct page *page = NULL; 2522 struct page *page = NULL;
2510 bool drained = false; 2523 bool drained = false;
2511 2524
2512 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, 2525 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
2513 nodemask);
2514 if (unlikely(!(*did_some_progress))) 2526 if (unlikely(!(*did_some_progress)))
2515 return NULL; 2527 return NULL;
2516 2528
2517 /* After successful reclaim, reconsider all zones for allocation */ 2529 /* After successful reclaim, reconsider all zones for allocation */
2518 if (IS_ENABLED(CONFIG_NUMA)) 2530 if (IS_ENABLED(CONFIG_NUMA))
2519 zlc_clear_zones_full(zonelist); 2531 zlc_clear_zones_full(ac->zonelist);
2520 2532
2521retry: 2533retry:
2522 page = get_page_from_freelist(gfp_mask, nodemask, order, 2534 page = get_page_from_freelist(gfp_mask, order,
2523 zonelist, high_zoneidx, 2535 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2524 alloc_flags & ~ALLOC_NO_WATERMARKS,
2525 preferred_zone, classzone_idx,
2526 migratetype);
2527 2536
2528 /* 2537 /*
2529 * If an allocation failed after direct reclaim, it could be because 2538 * If an allocation failed after direct reclaim, it could be because
@@ -2544,36 +2553,30 @@ retry:
2544 */ 2553 */
2545static inline struct page * 2554static inline struct page *
2546__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 2555__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2547 struct zonelist *zonelist, enum zone_type high_zoneidx, 2556 const struct alloc_context *ac)
2548 nodemask_t *nodemask, struct zone *preferred_zone,
2549 int classzone_idx, int migratetype)
2550{ 2557{
2551 struct page *page; 2558 struct page *page;
2552 2559
2553 do { 2560 do {
2554 page = get_page_from_freelist(gfp_mask, nodemask, order, 2561 page = get_page_from_freelist(gfp_mask, order,
2555 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 2562 ALLOC_NO_WATERMARKS, ac);
2556 preferred_zone, classzone_idx, migratetype);
2557 2563
2558 if (!page && gfp_mask & __GFP_NOFAIL) 2564 if (!page && gfp_mask & __GFP_NOFAIL)
2559 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2565 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
2566 HZ/50);
2560 } while (!page && (gfp_mask & __GFP_NOFAIL)); 2567 } while (!page && (gfp_mask & __GFP_NOFAIL));
2561 2568
2562 return page; 2569 return page;
2563} 2570}
2564 2571
2565static void wake_all_kswapds(unsigned int order, 2572static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
2566 struct zonelist *zonelist,
2567 enum zone_type high_zoneidx,
2568 struct zone *preferred_zone,
2569 nodemask_t *nodemask)
2570{ 2573{
2571 struct zoneref *z; 2574 struct zoneref *z;
2572 struct zone *zone; 2575 struct zone *zone;
2573 2576
2574 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2577 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2575 high_zoneidx, nodemask) 2578 ac->high_zoneidx, ac->nodemask)
2576 wakeup_kswapd(zone, order, zone_idx(preferred_zone)); 2579 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
2577} 2580}
2578 2581
2579static inline int 2582static inline int
@@ -2632,9 +2635,7 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2632 2635
2633static inline struct page * 2636static inline struct page *
2634__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2637__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2635 struct zonelist *zonelist, enum zone_type high_zoneidx, 2638 struct alloc_context *ac)
2636 nodemask_t *nodemask, struct zone *preferred_zone,
2637 int classzone_idx, int migratetype)
2638{ 2639{
2639 const gfp_t wait = gfp_mask & __GFP_WAIT; 2640 const gfp_t wait = gfp_mask & __GFP_WAIT;
2640 struct page *page = NULL; 2641 struct page *page = NULL;
@@ -2670,8 +2671,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2670 2671
2671retry: 2672retry:
2672 if (!(gfp_mask & __GFP_NO_KSWAPD)) 2673 if (!(gfp_mask & __GFP_NO_KSWAPD))
2673 wake_all_kswapds(order, zonelist, high_zoneidx, 2674 wake_all_kswapds(order, ac);
2674 preferred_zone, nodemask);
2675 2675
2676 /* 2676 /*
2677 * OK, we're below the kswapd watermark and have kicked background 2677 * OK, we're below the kswapd watermark and have kicked background
@@ -2684,17 +2684,16 @@ retry:
2684 * Find the true preferred zone if the allocation is unconstrained by 2684 * Find the true preferred zone if the allocation is unconstrained by
2685 * cpusets. 2685 * cpusets.
2686 */ 2686 */
2687 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) { 2687 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
2688 struct zoneref *preferred_zoneref; 2688 struct zoneref *preferred_zoneref;
2689 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, 2689 preferred_zoneref = first_zones_zonelist(ac->zonelist,
2690 NULL, &preferred_zone); 2690 ac->high_zoneidx, NULL, &ac->preferred_zone);
2691 classzone_idx = zonelist_zone_idx(preferred_zoneref); 2691 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
2692 } 2692 }
2693 2693
2694 /* This is the last chance, in general, before the goto nopage. */ 2694 /* This is the last chance, in general, before the goto nopage. */
2695 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2695 page = get_page_from_freelist(gfp_mask, order,
2696 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2696 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2697 preferred_zone, classzone_idx, migratetype);
2698 if (page) 2697 if (page)
2699 goto got_pg; 2698 goto got_pg;
2700 2699
@@ -2705,11 +2704,10 @@ retry:
2705 * the allocation is high priority and these type of 2704 * the allocation is high priority and these type of
2706 * allocations are system rather than user orientated 2705 * allocations are system rather than user orientated
2707 */ 2706 */
2708 zonelist = node_zonelist(numa_node_id(), gfp_mask); 2707 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
2708
2709 page = __alloc_pages_high_priority(gfp_mask, order, ac);
2709 2710
2710 page = __alloc_pages_high_priority(gfp_mask, order,
2711 zonelist, high_zoneidx, nodemask,
2712 preferred_zone, classzone_idx, migratetype);
2713 if (page) { 2711 if (page) {
2714 goto got_pg; 2712 goto got_pg;
2715 } 2713 }
@@ -2738,11 +2736,9 @@ retry:
2738 * Try direct compaction. The first pass is asynchronous. Subsequent 2736 * Try direct compaction. The first pass is asynchronous. Subsequent
2739 * attempts after direct reclaim are synchronous 2737 * attempts after direct reclaim are synchronous
2740 */ 2738 */
2741 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, 2739 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
2742 high_zoneidx, nodemask, alloc_flags, 2740 migration_mode,
2743 preferred_zone, 2741 &contended_compaction,
2744 classzone_idx, migratetype,
2745 migration_mode, &contended_compaction,
2746 &deferred_compaction); 2742 &deferred_compaction);
2747 if (page) 2743 if (page)
2748 goto got_pg; 2744 goto got_pg;
@@ -2788,12 +2784,8 @@ retry:
2788 migration_mode = MIGRATE_SYNC_LIGHT; 2784 migration_mode = MIGRATE_SYNC_LIGHT;
2789 2785
2790 /* Try direct reclaim and then allocating */ 2786 /* Try direct reclaim and then allocating */
2791 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2787 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
2792 zonelist, high_zoneidx, 2788 &did_some_progress);
2793 nodemask,
2794 alloc_flags, preferred_zone,
2795 classzone_idx, migratetype,
2796 &did_some_progress);
2797 if (page) 2789 if (page)
2798 goto got_pg; 2790 goto got_pg;
2799 2791
@@ -2807,17 +2799,15 @@ retry:
2807 * start OOM killing tasks. 2799 * start OOM killing tasks.
2808 */ 2800 */
2809 if (!did_some_progress) { 2801 if (!did_some_progress) {
2810 page = __alloc_pages_may_oom(gfp_mask, order, zonelist, 2802 page = __alloc_pages_may_oom(gfp_mask, order, ac,
2811 high_zoneidx, nodemask, 2803 &did_some_progress);
2812 preferred_zone, classzone_idx,
2813 migratetype,&did_some_progress);
2814 if (page) 2804 if (page)
2815 goto got_pg; 2805 goto got_pg;
2816 if (!did_some_progress) 2806 if (!did_some_progress)
2817 goto nopage; 2807 goto nopage;
2818 } 2808 }
2819 /* Wait for some write requests to complete then retry */ 2809 /* Wait for some write requests to complete then retry */
2820 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2810 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
2821 goto retry; 2811 goto retry;
2822 } else { 2812 } else {
2823 /* 2813 /*
@@ -2825,11 +2815,9 @@ retry:
2825 * direct reclaim and reclaim/compaction depends on compaction 2815 * direct reclaim and reclaim/compaction depends on compaction
2826 * being called after reclaim so call directly if necessary 2816 * being called after reclaim so call directly if necessary
2827 */ 2817 */
2828 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, 2818 page = __alloc_pages_direct_compact(gfp_mask, order,
2829 high_zoneidx, nodemask, alloc_flags, 2819 alloc_flags, ac, migration_mode,
2830 preferred_zone, 2820 &contended_compaction,
2831 classzone_idx, migratetype,
2832 migration_mode, &contended_compaction,
2833 &deferred_compaction); 2821 &deferred_compaction);
2834 if (page) 2822 if (page)
2835 goto got_pg; 2823 goto got_pg;
@@ -2848,15 +2836,16 @@ struct page *
2848__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 2836__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2849 struct zonelist *zonelist, nodemask_t *nodemask) 2837 struct zonelist *zonelist, nodemask_t *nodemask)
2850{ 2838{
2851 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2852 struct zone *preferred_zone;
2853 struct zoneref *preferred_zoneref; 2839 struct zoneref *preferred_zoneref;
2854 struct page *page = NULL; 2840 struct page *page = NULL;
2855 int migratetype = gfpflags_to_migratetype(gfp_mask);
2856 unsigned int cpuset_mems_cookie; 2841 unsigned int cpuset_mems_cookie;
2857 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; 2842 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
2858 int classzone_idx;
2859 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 2843 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
2844 struct alloc_context ac = {
2845 .high_zoneidx = gfp_zone(gfp_mask),
2846 .nodemask = nodemask,
2847 .migratetype = gfpflags_to_migratetype(gfp_mask),
2848 };
2860 2849
2861 gfp_mask &= gfp_allowed_mask; 2850 gfp_mask &= gfp_allowed_mask;
2862 2851
@@ -2875,25 +2864,25 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2875 if (unlikely(!zonelist->_zonerefs->zone)) 2864 if (unlikely(!zonelist->_zonerefs->zone))
2876 return NULL; 2865 return NULL;
2877 2866
2878 if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE) 2867 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
2879 alloc_flags |= ALLOC_CMA; 2868 alloc_flags |= ALLOC_CMA;
2880 2869
2881retry_cpuset: 2870retry_cpuset:
2882 cpuset_mems_cookie = read_mems_allowed_begin(); 2871 cpuset_mems_cookie = read_mems_allowed_begin();
2883 2872
2873 /* We set it here, as __alloc_pages_slowpath might have changed it */
2874 ac.zonelist = zonelist;
2884 /* The preferred zone is used for statistics later */ 2875 /* The preferred zone is used for statistics later */
2885 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, 2876 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
2886 nodemask ? : &cpuset_current_mems_allowed, 2877 ac.nodemask ? : &cpuset_current_mems_allowed,
2887 &preferred_zone); 2878 &ac.preferred_zone);
2888 if (!preferred_zone) 2879 if (!ac.preferred_zone)
2889 goto out; 2880 goto out;
2890 classzone_idx = zonelist_zone_idx(preferred_zoneref); 2881 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
2891 2882
2892 /* First allocation attempt */ 2883 /* First allocation attempt */
2893 alloc_mask = gfp_mask|__GFP_HARDWALL; 2884 alloc_mask = gfp_mask|__GFP_HARDWALL;
2894 page = get_page_from_freelist(alloc_mask, nodemask, order, zonelist, 2885 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
2895 high_zoneidx, alloc_flags, preferred_zone,
2896 classzone_idx, migratetype);
2897 if (unlikely(!page)) { 2886 if (unlikely(!page)) {
2898 /* 2887 /*
2899 * Runtime PM, block IO and its error handling path 2888 * Runtime PM, block IO and its error handling path
@@ -2902,15 +2891,13 @@ retry_cpuset:
2902 */ 2891 */
2903 alloc_mask = memalloc_noio_flags(gfp_mask); 2892 alloc_mask = memalloc_noio_flags(gfp_mask);
2904 2893
2905 page = __alloc_pages_slowpath(alloc_mask, order, 2894 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
2906 zonelist, high_zoneidx, nodemask,
2907 preferred_zone, classzone_idx, migratetype);
2908 } 2895 }
2909 2896
2910 if (kmemcheck_enabled && page) 2897 if (kmemcheck_enabled && page)
2911 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 2898 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2912 2899
2913 trace_mm_page_alloc(page, order, alloc_mask, migratetype); 2900 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
2914 2901
2915out: 2902out:
2916 /* 2903 /*