summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-05-19 20:14:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commitc33d6c06f60f710f0305ae792773e1c2560e1e51 (patch)
tree97fc4be2e392b0b5c80f6d7458ab8d3a0e940c48 /mm/page_alloc.c
parent48ee5f3696f62496481a8b6d852bcad9b3ebbe37 (diff)
mm, page_alloc: avoid looking up the first zone in a zonelist twice
The allocator fast path looks up the first usable zone in a zonelist and then get_page_from_freelist does the same job in the zonelist iterator. This patch preserves the necessary information. 4.6.0-rc2 4.6.0-rc2 fastmark-v1r20 initonce-v1r20 Min alloc-odr0-1 364.00 ( 0.00%) 359.00 ( 1.37%) Min alloc-odr0-2 262.00 ( 0.00%) 260.00 ( 0.76%) Min alloc-odr0-4 214.00 ( 0.00%) 214.00 ( 0.00%) Min alloc-odr0-8 186.00 ( 0.00%) 186.00 ( 0.00%) Min alloc-odr0-16 173.00 ( 0.00%) 173.00 ( 0.00%) Min alloc-odr0-32 165.00 ( 0.00%) 165.00 ( 0.00%) Min alloc-odr0-64 161.00 ( 0.00%) 162.00 ( -0.62%) Min alloc-odr0-128 159.00 ( 0.00%) 161.00 ( -1.26%) Min alloc-odr0-256 168.00 ( 0.00%) 170.00 ( -1.19%) Min alloc-odr0-512 180.00 ( 0.00%) 181.00 ( -0.56%) Min alloc-odr0-1024 190.00 ( 0.00%) 190.00 ( 0.00%) Min alloc-odr0-2048 196.00 ( 0.00%) 196.00 ( 0.00%) Min alloc-odr0-4096 202.00 ( 0.00%) 202.00 ( 0.00%) Min alloc-odr0-8192 206.00 ( 0.00%) 205.00 ( 0.49%) Min alloc-odr0-16384 206.00 ( 0.00%) 205.00 ( 0.49%) The benefit is negligible and the results are within the noise but each cycle counts. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 732875b1bdfb..dba8cfd0b2d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2704,7 +2704,7 @@ static struct page *
2704get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 2704get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2705 const struct alloc_context *ac) 2705 const struct alloc_context *ac)
2706{ 2706{
2707 struct zoneref *z; 2707 struct zoneref *z = ac->preferred_zoneref;
2708 struct zone *zone; 2708 struct zone *zone;
2709 bool fair_skipped = false; 2709 bool fair_skipped = false;
2710 bool apply_fair = (alloc_flags & ALLOC_FAIR); 2710 bool apply_fair = (alloc_flags & ALLOC_FAIR);
@@ -2714,7 +2714,7 @@ zonelist_scan:
2714 * Scan zonelist, looking for a zone with enough free. 2714 * Scan zonelist, looking for a zone with enough free.
2715 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2715 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
2716 */ 2716 */
2717 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 2717 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
2718 ac->nodemask) { 2718 ac->nodemask) {
2719 struct page *page; 2719 struct page *page;
2720 unsigned long mark; 2720 unsigned long mark;
@@ -2734,7 +2734,7 @@ zonelist_scan:
2734 fair_skipped = true; 2734 fair_skipped = true;
2735 continue; 2735 continue;
2736 } 2736 }
2737 if (!zone_local(ac->preferred_zone, zone)) { 2737 if (!zone_local(ac->preferred_zoneref->zone, zone)) {
2738 if (fair_skipped) 2738 if (fair_skipped)
2739 goto reset_fair; 2739 goto reset_fair;
2740 apply_fair = false; 2740 apply_fair = false;
@@ -2780,7 +2780,7 @@ zonelist_scan:
2780 goto try_this_zone; 2780 goto try_this_zone;
2781 2781
2782 if (zone_reclaim_mode == 0 || 2782 if (zone_reclaim_mode == 0 ||
2783 !zone_allows_reclaim(ac->preferred_zone, zone)) 2783 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
2784 continue; 2784 continue;
2785 2785
2786 ret = zone_reclaim(zone, gfp_mask, order); 2786 ret = zone_reclaim(zone, gfp_mask, order);
@@ -2802,7 +2802,7 @@ zonelist_scan:
2802 } 2802 }
2803 2803
2804try_this_zone: 2804try_this_zone:
2805 page = buffered_rmqueue(ac->preferred_zone, zone, order, 2805 page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
2806 gfp_mask, alloc_flags, ac->migratetype); 2806 gfp_mask, alloc_flags, ac->migratetype);
2807 if (page) { 2807 if (page) {
2808 if (prep_new_page(page, order, gfp_mask, alloc_flags)) 2808 if (prep_new_page(page, order, gfp_mask, alloc_flags))
@@ -2831,7 +2831,7 @@ try_this_zone:
2831reset_fair: 2831reset_fair:
2832 apply_fair = false; 2832 apply_fair = false;
2833 fair_skipped = false; 2833 fair_skipped = false;
2834 reset_alloc_batches(ac->preferred_zone); 2834 reset_alloc_batches(ac->preferred_zoneref->zone);
2835 goto zonelist_scan; 2835 goto zonelist_scan;
2836 } 2836 }
2837 2837
@@ -3114,7 +3114,7 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3114 3114
3115 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3115 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3116 ac->high_zoneidx, ac->nodemask) 3116 ac->high_zoneidx, ac->nodemask)
3117 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); 3117 wakeup_kswapd(zone, order, zonelist_zone_idx(ac->preferred_zoneref));
3118} 3118}
3119 3119
3120static inline unsigned int 3120static inline unsigned int
@@ -3332,7 +3332,7 @@ retry:
3332 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || 3332 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3333 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { 3333 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
3334 /* Wait for some write requests to complete then retry */ 3334 /* Wait for some write requests to complete then retry */
3335 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); 3335 wait_iff_congested(ac->preferred_zoneref->zone, BLK_RW_ASYNC, HZ/50);
3336 goto retry; 3336 goto retry;
3337 } 3337 }
3338 3338
@@ -3370,7 +3370,6 @@ struct page *
3370__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 3370__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3371 struct zonelist *zonelist, nodemask_t *nodemask) 3371 struct zonelist *zonelist, nodemask_t *nodemask)
3372{ 3372{
3373 struct zoneref *preferred_zoneref;
3374 struct page *page; 3373 struct page *page;
3375 unsigned int cpuset_mems_cookie; 3374 unsigned int cpuset_mems_cookie;
3376 unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR; 3375 unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
@@ -3416,14 +3415,14 @@ retry_cpuset:
3416 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3415 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3417 3416
3418 /* The preferred zone is used for statistics later */ 3417 /* The preferred zone is used for statistics later */
3419 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, 3418 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3420 ac.nodemask, &ac.preferred_zone); 3419 ac.high_zoneidx, ac.nodemask);
3421 if (!ac.preferred_zone) { 3420 if (!ac.preferred_zoneref) {
3422 page = NULL; 3421 page = NULL;
3423 goto no_zone; 3422 goto no_zone;
3424 } 3423 }
3425 3424
3426 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); 3425 ac.classzone_idx = zonelist_zone_idx(ac.preferred_zoneref);
3427 3426
3428 /* First allocation attempt */ 3427 /* First allocation attempt */
3429 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 3428 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
@@ -4462,13 +4461,12 @@ static void build_zonelists(pg_data_t *pgdat)
4462 */ 4461 */
4463int local_memory_node(int node) 4462int local_memory_node(int node)
4464{ 4463{
4465 struct zone *zone; 4464 struct zoneref *z;
4466 4465
4467 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 4466 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4468 gfp_zone(GFP_KERNEL), 4467 gfp_zone(GFP_KERNEL),
4469 NULL, 4468 NULL);
4470 &zone); 4469 return z->zone->node;
4471 return zone->node;
4472} 4470}
4473#endif 4471#endif
4474 4472