aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2015-02-11 18:25:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:01 -0500
commit91fbdc0f89807bb97792ea6893717a8d3154b871 (patch)
tree189e5604fa8be6662b9d691465d81037cbc4136f
parent6de226191d12fce30331ebf024ca3ed24834f0ee (diff)
mm/page_alloc.c:__alloc_pages_nodemask(): don't alter arg gfp_mask
__alloc_pages_nodemask() strips __GFP_IO when retrying the page allocation. But it does this by altering the function-wide variable gfp_mask. This will cause subsequent allocation attempts to inadvertently use the modified gfp_mask. Also, pass the correct mask (the mask we actually used) into trace_mm_page_alloc(). Cc: Ming Lei <ming.lei@canonical.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f121050e8530..1c7d90f7a84a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2865,6 +2865,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2865 unsigned int cpuset_mems_cookie; 2865 unsigned int cpuset_mems_cookie;
2866 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; 2866 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
2867 int classzone_idx; 2867 int classzone_idx;
2868 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
2868 2869
2869 gfp_mask &= gfp_allowed_mask; 2870 gfp_mask &= gfp_allowed_mask;
2870 2871
@@ -2898,22 +2899,24 @@ retry_cpuset:
2898 classzone_idx = zonelist_zone_idx(preferred_zoneref); 2899 classzone_idx = zonelist_zone_idx(preferred_zoneref);
2899 2900
2900 /* First allocation attempt */ 2901 /* First allocation attempt */
2901 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2902 alloc_mask = gfp_mask|__GFP_HARDWALL;
2902 zonelist, high_zoneidx, alloc_flags, 2903 page = get_page_from_freelist(alloc_mask, nodemask, order, zonelist,
2903 preferred_zone, classzone_idx, migratetype); 2904 high_zoneidx, alloc_flags, preferred_zone,
2905 classzone_idx, migratetype);
2904 if (unlikely(!page)) { 2906 if (unlikely(!page)) {
2905 /* 2907 /*
2906 * Runtime PM, block IO and its error handling path 2908 * Runtime PM, block IO and its error handling path
2907 * can deadlock because I/O on the device might not 2909 * can deadlock because I/O on the device might not
2908 * complete. 2910 * complete.
2909 */ 2911 */
2910 gfp_mask = memalloc_noio_flags(gfp_mask); 2912 alloc_mask = memalloc_noio_flags(gfp_mask);
2911 page = __alloc_pages_slowpath(gfp_mask, order, 2913
2914 page = __alloc_pages_slowpath(alloc_mask, order,
2912 zonelist, high_zoneidx, nodemask, 2915 zonelist, high_zoneidx, nodemask,
2913 preferred_zone, classzone_idx, migratetype); 2916 preferred_zone, classzone_idx, migratetype);
2914 } 2917 }
2915 2918
2916 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2919 trace_mm_page_alloc(page, order, alloc_mask, migratetype);
2917 2920
2918out: 2921out:
2919 /* 2922 /*