summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2017-02-24 17:56:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:54 -0500
commit9cd7555875bb09dad875e89a76f41f576e11c638 (patch)
treecd57343fa7266348c69070a21ccb0ad28f50c375 /mm/page_alloc.c
parent066b23935578d3913c2df9bed7addbcdf4711f1a (diff)
mm, page_alloc: split alloc_pages_nodemask()
alloc_pages_nodemask does a number of preperation steps that determine what zones can be used for the allocation depending on a variety of factors. This is fine but a hypothetical caller that wanted multiple order-0 pages has to do the preparation steps multiple times. This patch structures __alloc_pages_nodemask such that it's relatively easy to build a bulk order-0 page allocator. There is no functional change. Link: http://lkml.kernel.org/r/20170123153906.3122-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c75
1 files changed, 46 insertions, 29 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 284153d3e0fc..678b2882faaa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3855,60 +3855,77 @@ got_pg:
3855 return page; 3855 return page;
3856} 3856}
3857 3857
3858/* 3858static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
3859 * This is the 'heart' of the zoned buddy allocator. 3859 struct zonelist *zonelist, nodemask_t *nodemask,
3860 */ 3860 struct alloc_context *ac, gfp_t *alloc_mask,
3861struct page * 3861 unsigned int *alloc_flags)
3862__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3863 struct zonelist *zonelist, nodemask_t *nodemask)
3864{ 3862{
3865 struct page *page; 3863 ac->high_zoneidx = gfp_zone(gfp_mask);
3866 unsigned int alloc_flags = ALLOC_WMARK_LOW; 3864 ac->zonelist = zonelist;
3867 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3865 ac->nodemask = nodemask;
3868 struct alloc_context ac = { 3866 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
3869 .high_zoneidx = gfp_zone(gfp_mask),
3870 .zonelist = zonelist,
3871 .nodemask = nodemask,
3872 .migratetype = gfpflags_to_migratetype(gfp_mask),
3873 };
3874 3867
3875 if (cpusets_enabled()) { 3868 if (cpusets_enabled()) {
3876 alloc_mask |= __GFP_HARDWALL; 3869 *alloc_mask |= __GFP_HARDWALL;
3877 alloc_flags |= ALLOC_CPUSET; 3870 *alloc_flags |= ALLOC_CPUSET;
3878 if (!ac.nodemask) 3871 if (!ac->nodemask)
3879 ac.nodemask = &cpuset_current_mems_allowed; 3872 ac->nodemask = &cpuset_current_mems_allowed;
3880 } 3873 }
3881 3874
3882 gfp_mask &= gfp_allowed_mask;
3883
3884 lockdep_trace_alloc(gfp_mask); 3875 lockdep_trace_alloc(gfp_mask);
3885 3876
3886 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 3877 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
3887 3878
3888 if (should_fail_alloc_page(gfp_mask, order)) 3879 if (should_fail_alloc_page(gfp_mask, order))
3889 return NULL; 3880 return false;
3890 3881
3891 /* 3882 /*
3892 * Check the zones suitable for the gfp_mask contain at least one 3883 * Check the zones suitable for the gfp_mask contain at least one
3893 * valid zone. It's possible to have an empty zonelist as a result 3884 * valid zone. It's possible to have an empty zonelist as a result
3894 * of __GFP_THISNODE and a memoryless node 3885 * of __GFP_THISNODE and a memoryless node
3895 */ 3886 */
3896 if (unlikely(!zonelist->_zonerefs->zone)) 3887 if (unlikely(!ac->zonelist->_zonerefs->zone))
3897 return NULL; 3888 return false;
3898 3889
3899 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3890 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
3900 alloc_flags |= ALLOC_CMA; 3891 *alloc_flags |= ALLOC_CMA;
3892
3893 return true;
3894}
3901 3895
3896/* Determine whether to spread dirty pages and what the first usable zone */
3897static inline void finalise_ac(gfp_t gfp_mask,
3898 unsigned int order, struct alloc_context *ac)
3899{
3902 /* Dirty zone balancing only done in the fast path */ 3900 /* Dirty zone balancing only done in the fast path */
3903 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3901 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3904 3902
3905 /* 3903 /*
3906 * The preferred zone is used for statistics but crucially it is 3904 * The preferred zone is used for statistics but crucially it is
3907 * also used as the starting point for the zonelist iterator. It 3905 * also used as the starting point for the zonelist iterator. It
3908 * may get reset for allocations that ignore memory policies. 3906 * may get reset for allocations that ignore memory policies.
3909 */ 3907 */
3910 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3908 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3911 ac.high_zoneidx, ac.nodemask); 3909 ac->high_zoneidx, ac->nodemask);
3910}
3911
3912/*
3913 * This is the 'heart' of the zoned buddy allocator.
3914 */
3915struct page *
3916__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3917 struct zonelist *zonelist, nodemask_t *nodemask)
3918{
3919 struct page *page;
3920 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3921 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
3922 struct alloc_context ac = { };
3923
3924 gfp_mask &= gfp_allowed_mask;
3925 if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
3926 return NULL;
3927
3928 finalise_ac(gfp_mask, order, &ac);
3912 if (!ac.preferred_zoneref->zone) { 3929 if (!ac.preferred_zoneref->zone) {
3913 page = NULL; 3930 page = NULL;
3914 /* 3931 /*