diff options
author | Mel Gorman <mgorman@suse.de> | 2014-06-04 19:10:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:10 -0400 |
commit | d8846374a85f4290a473a4e2a64c1ba046c4a0e1 (patch) | |
tree | 6dcc9b3450599d0aa63644a1396b97816823194f /mm | |
parent | 2457aec63745e235bcafb7ef312b182d8682f0fc (diff) |
mm: page_alloc: calculate classzone_idx once from the zonelist ref
There is no need to calculate zone_idx(preferred_zone) multiple times
or use the pgdat to figure it out.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 59 |
1 files changed, 34 insertions, 25 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 09345ab7fb63..8f785b1534a3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1916,11 +1916,10 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | |||
1916 | static struct page * | 1916 | static struct page * |
1917 | get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, | 1917 | get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, |
1918 | struct zonelist *zonelist, int high_zoneidx, int alloc_flags, | 1918 | struct zonelist *zonelist, int high_zoneidx, int alloc_flags, |
1919 | struct zone *preferred_zone, int migratetype) | 1919 | struct zone *preferred_zone, int classzone_idx, int migratetype) |
1920 | { | 1920 | { |
1921 | struct zoneref *z; | 1921 | struct zoneref *z; |
1922 | struct page *page = NULL; | 1922 | struct page *page = NULL; |
1923 | int classzone_idx; | ||
1924 | struct zone *zone; | 1923 | struct zone *zone; |
1925 | nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ | 1924 | nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ |
1926 | int zlc_active = 0; /* set if using zonelist_cache */ | 1925 | int zlc_active = 0; /* set if using zonelist_cache */ |
@@ -1928,7 +1927,6 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, | |||
1928 | bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) && | 1927 | bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) && |
1929 | (gfp_mask & __GFP_WRITE); | 1928 | (gfp_mask & __GFP_WRITE); |
1930 | 1929 | ||
1931 | classzone_idx = zone_idx(preferred_zone); | ||
1932 | zonelist_scan: | 1930 | zonelist_scan: |
1933 | /* | 1931 | /* |
1934 | * Scan zonelist, looking for a zone with enough free. | 1932 | * Scan zonelist, looking for a zone with enough free. |
@@ -2186,7 +2184,7 @@ static inline struct page * | |||
2186 | __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | 2184 | __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, |
2187 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2185 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2188 | nodemask_t *nodemask, struct zone *preferred_zone, | 2186 | nodemask_t *nodemask, struct zone *preferred_zone, |
2189 | int migratetype) | 2187 | int classzone_idx, int migratetype) |
2190 | { | 2188 | { |
2191 | struct page *page; | 2189 | struct page *page; |
2192 | 2190 | ||
@@ -2204,7 +2202,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2204 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, | 2202 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, |
2205 | order, zonelist, high_zoneidx, | 2203 | order, zonelist, high_zoneidx, |
2206 | ALLOC_WMARK_HIGH|ALLOC_CPUSET, | 2204 | ALLOC_WMARK_HIGH|ALLOC_CPUSET, |
2207 | preferred_zone, migratetype); | 2205 | preferred_zone, classzone_idx, migratetype); |
2208 | if (page) | 2206 | if (page) |
2209 | goto out; | 2207 | goto out; |
2210 | 2208 | ||
@@ -2239,7 +2237,7 @@ static struct page * | |||
2239 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | 2237 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
2240 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2238 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2241 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2239 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2242 | int migratetype, enum migrate_mode mode, | 2240 | int classzone_idx, int migratetype, enum migrate_mode mode, |
2243 | bool *contended_compaction, bool *deferred_compaction, | 2241 | bool *contended_compaction, bool *deferred_compaction, |
2244 | unsigned long *did_some_progress) | 2242 | unsigned long *did_some_progress) |
2245 | { | 2243 | { |
@@ -2267,7 +2265,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2267 | page = get_page_from_freelist(gfp_mask, nodemask, | 2265 | page = get_page_from_freelist(gfp_mask, nodemask, |
2268 | order, zonelist, high_zoneidx, | 2266 | order, zonelist, high_zoneidx, |
2269 | alloc_flags & ~ALLOC_NO_WATERMARKS, | 2267 | alloc_flags & ~ALLOC_NO_WATERMARKS, |
2270 | preferred_zone, migratetype); | 2268 | preferred_zone, classzone_idx, migratetype); |
2271 | if (page) { | 2269 | if (page) { |
2272 | preferred_zone->compact_blockskip_flush = false; | 2270 | preferred_zone->compact_blockskip_flush = false; |
2273 | compaction_defer_reset(preferred_zone, order, true); | 2271 | compaction_defer_reset(preferred_zone, order, true); |
@@ -2299,7 +2297,8 @@ static inline struct page * | |||
2299 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | 2297 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
2300 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2298 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2301 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2299 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2302 | int migratetype, enum migrate_mode mode, bool *contended_compaction, | 2300 | int classzone_idx, int migratetype, |
2301 | enum migrate_mode mode, bool *contended_compaction, | ||
2303 | bool *deferred_compaction, unsigned long *did_some_progress) | 2302 | bool *deferred_compaction, unsigned long *did_some_progress) |
2304 | { | 2303 | { |
2305 | return NULL; | 2304 | return NULL; |
@@ -2339,7 +2338,7 @@ static inline struct page * | |||
2339 | __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | 2338 | __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, |
2340 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2339 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2341 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2340 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2342 | int migratetype, unsigned long *did_some_progress) | 2341 | int classzone_idx, int migratetype, unsigned long *did_some_progress) |
2343 | { | 2342 | { |
2344 | struct page *page = NULL; | 2343 | struct page *page = NULL; |
2345 | bool drained = false; | 2344 | bool drained = false; |
@@ -2357,7 +2356,8 @@ retry: | |||
2357 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 2356 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
2358 | zonelist, high_zoneidx, | 2357 | zonelist, high_zoneidx, |
2359 | alloc_flags & ~ALLOC_NO_WATERMARKS, | 2358 | alloc_flags & ~ALLOC_NO_WATERMARKS, |
2360 | preferred_zone, migratetype); | 2359 | preferred_zone, classzone_idx, |
2360 | migratetype); | ||
2361 | 2361 | ||
2362 | /* | 2362 | /* |
2363 | * If an allocation failed after direct reclaim, it could be because | 2363 | * If an allocation failed after direct reclaim, it could be because |
@@ -2380,14 +2380,14 @@ static inline struct page * | |||
2380 | __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, | 2380 | __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, |
2381 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2381 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2382 | nodemask_t *nodemask, struct zone *preferred_zone, | 2382 | nodemask_t *nodemask, struct zone *preferred_zone, |
2383 | int migratetype) | 2383 | int classzone_idx, int migratetype) |
2384 | { | 2384 | { |
2385 | struct page *page; | 2385 | struct page *page; |
2386 | 2386 | ||
2387 | do { | 2387 | do { |
2388 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 2388 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
2389 | zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, | 2389 | zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, |
2390 | preferred_zone, migratetype); | 2390 | preferred_zone, classzone_idx, migratetype); |
2391 | 2391 | ||
2392 | if (!page && gfp_mask & __GFP_NOFAIL) | 2392 | if (!page && gfp_mask & __GFP_NOFAIL) |
2393 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); | 2393 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
@@ -2488,7 +2488,7 @@ static inline struct page * | |||
2488 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | 2488 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
2489 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2489 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2490 | nodemask_t *nodemask, struct zone *preferred_zone, | 2490 | nodemask_t *nodemask, struct zone *preferred_zone, |
2491 | int migratetype) | 2491 | int classzone_idx, int migratetype) |
2492 | { | 2492 | { |
2493 | const gfp_t wait = gfp_mask & __GFP_WAIT; | 2493 | const gfp_t wait = gfp_mask & __GFP_WAIT; |
2494 | struct page *page = NULL; | 2494 | struct page *page = NULL; |
@@ -2537,15 +2537,18 @@ restart: | |||
2537 | * Find the true preferred zone if the allocation is unconstrained by | 2537 | * Find the true preferred zone if the allocation is unconstrained by |
2538 | * cpusets. | 2538 | * cpusets. |
2539 | */ | 2539 | */ |
2540 | if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) | 2540 | if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) { |
2541 | first_zones_zonelist(zonelist, high_zoneidx, NULL, | 2541 | struct zoneref *preferred_zoneref; |
2542 | &preferred_zone); | 2542 | preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, |
2543 | NULL, &preferred_zone); | ||
2544 | classzone_idx = zonelist_zone_idx(preferred_zoneref); | ||
2545 | } | ||
2543 | 2546 | ||
2544 | rebalance: | 2547 | rebalance: |
2545 | /* This is the last chance, in general, before the goto nopage. */ | 2548 | /* This is the last chance, in general, before the goto nopage. */ |
2546 | page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, | 2549 | page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, |
2547 | high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, | 2550 | high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, |
2548 | preferred_zone, migratetype); | 2551 | preferred_zone, classzone_idx, migratetype); |
2549 | if (page) | 2552 | if (page) |
2550 | goto got_pg; | 2553 | goto got_pg; |
2551 | 2554 | ||
@@ -2560,7 +2563,7 @@ rebalance: | |||
2560 | 2563 | ||
2561 | page = __alloc_pages_high_priority(gfp_mask, order, | 2564 | page = __alloc_pages_high_priority(gfp_mask, order, |
2562 | zonelist, high_zoneidx, nodemask, | 2565 | zonelist, high_zoneidx, nodemask, |
2563 | preferred_zone, migratetype); | 2566 | preferred_zone, classzone_idx, migratetype); |
2564 | if (page) { | 2567 | if (page) { |
2565 | goto got_pg; | 2568 | goto got_pg; |
2566 | } | 2569 | } |
@@ -2591,7 +2594,8 @@ rebalance: | |||
2591 | */ | 2594 | */ |
2592 | page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, | 2595 | page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, |
2593 | high_zoneidx, nodemask, alloc_flags, | 2596 | high_zoneidx, nodemask, alloc_flags, |
2594 | preferred_zone, migratetype, | 2597 | preferred_zone, |
2598 | classzone_idx, migratetype, | ||
2595 | migration_mode, &contended_compaction, | 2599 | migration_mode, &contended_compaction, |
2596 | &deferred_compaction, | 2600 | &deferred_compaction, |
2597 | &did_some_progress); | 2601 | &did_some_progress); |
@@ -2621,7 +2625,8 @@ rebalance: | |||
2621 | zonelist, high_zoneidx, | 2625 | zonelist, high_zoneidx, |
2622 | nodemask, | 2626 | nodemask, |
2623 | alloc_flags, preferred_zone, | 2627 | alloc_flags, preferred_zone, |
2624 | migratetype, &did_some_progress); | 2628 | classzone_idx, migratetype, |
2629 | &did_some_progress); | ||
2625 | if (page) | 2630 | if (page) |
2626 | goto got_pg; | 2631 | goto got_pg; |
2627 | 2632 | ||
@@ -2640,7 +2645,7 @@ rebalance: | |||
2640 | page = __alloc_pages_may_oom(gfp_mask, order, | 2645 | page = __alloc_pages_may_oom(gfp_mask, order, |
2641 | zonelist, high_zoneidx, | 2646 | zonelist, high_zoneidx, |
2642 | nodemask, preferred_zone, | 2647 | nodemask, preferred_zone, |
2643 | migratetype); | 2648 | classzone_idx, migratetype); |
2644 | if (page) | 2649 | if (page) |
2645 | goto got_pg; | 2650 | goto got_pg; |
2646 | 2651 | ||
@@ -2681,7 +2686,8 @@ rebalance: | |||
2681 | */ | 2686 | */ |
2682 | page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, | 2687 | page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, |
2683 | high_zoneidx, nodemask, alloc_flags, | 2688 | high_zoneidx, nodemask, alloc_flags, |
2684 | preferred_zone, migratetype, | 2689 | preferred_zone, |
2690 | classzone_idx, migratetype, | ||
2685 | migration_mode, &contended_compaction, | 2691 | migration_mode, &contended_compaction, |
2686 | &deferred_compaction, | 2692 | &deferred_compaction, |
2687 | &did_some_progress); | 2693 | &did_some_progress); |
@@ -2708,10 +2714,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |||
2708 | { | 2714 | { |
2709 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | 2715 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
2710 | struct zone *preferred_zone; | 2716 | struct zone *preferred_zone; |
2717 | struct zoneref *preferred_zoneref; | ||
2711 | struct page *page = NULL; | 2718 | struct page *page = NULL; |
2712 | int migratetype = allocflags_to_migratetype(gfp_mask); | 2719 | int migratetype = allocflags_to_migratetype(gfp_mask); |
2713 | unsigned int cpuset_mems_cookie; | 2720 | unsigned int cpuset_mems_cookie; |
2714 | int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; | 2721 | int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; |
2722 | int classzone_idx; | ||
2715 | 2723 | ||
2716 | gfp_mask &= gfp_allowed_mask; | 2724 | gfp_mask &= gfp_allowed_mask; |
2717 | 2725 | ||
@@ -2734,11 +2742,12 @@ retry_cpuset: | |||
2734 | cpuset_mems_cookie = read_mems_allowed_begin(); | 2742 | cpuset_mems_cookie = read_mems_allowed_begin(); |
2735 | 2743 | ||
2736 | /* The preferred zone is used for statistics later */ | 2744 | /* The preferred zone is used for statistics later */ |
2737 | first_zones_zonelist(zonelist, high_zoneidx, | 2745 | preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, |
2738 | nodemask ? : &cpuset_current_mems_allowed, | 2746 | nodemask ? : &cpuset_current_mems_allowed, |
2739 | &preferred_zone); | 2747 | &preferred_zone); |
2740 | if (!preferred_zone) | 2748 | if (!preferred_zone) |
2741 | goto out; | 2749 | goto out; |
2750 | classzone_idx = zonelist_zone_idx(preferred_zoneref); | ||
2742 | 2751 | ||
2743 | #ifdef CONFIG_CMA | 2752 | #ifdef CONFIG_CMA |
2744 | if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | 2753 | if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) |
@@ -2748,7 +2757,7 @@ retry: | |||
2748 | /* First allocation attempt */ | 2757 | /* First allocation attempt */ |
2749 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, | 2758 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, |
2750 | zonelist, high_zoneidx, alloc_flags, | 2759 | zonelist, high_zoneidx, alloc_flags, |
2751 | preferred_zone, migratetype); | 2760 | preferred_zone, classzone_idx, migratetype); |
2752 | if (unlikely(!page)) { | 2761 | if (unlikely(!page)) { |
2753 | /* | 2762 | /* |
2754 | * The first pass makes sure allocations are spread | 2763 | * The first pass makes sure allocations are spread |
@@ -2774,7 +2783,7 @@ retry: | |||
2774 | gfp_mask = memalloc_noio_flags(gfp_mask); | 2783 | gfp_mask = memalloc_noio_flags(gfp_mask); |
2775 | page = __alloc_pages_slowpath(gfp_mask, order, | 2784 | page = __alloc_pages_slowpath(gfp_mask, order, |
2776 | zonelist, high_zoneidx, nodemask, | 2785 | zonelist, high_zoneidx, nodemask, |
2777 | preferred_zone, migratetype); | 2786 | preferred_zone, classzone_idx, migratetype); |
2778 | } | 2787 | } |
2779 | 2788 | ||
2780 | trace_mm_page_alloc(page, order, gfp_mask, migratetype); | 2789 | trace_mm_page_alloc(page, order, gfp_mask, migratetype); |