summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-05-19 20:13:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commitc603844bdcb5238980de8d58b393f52d7729d651 (patch)
treec6aefae3a21374dc65f21b89d47965c65fba9f4a /mm/page_alloc.c
parentf75fb889d18d362e336f8d3fba158a8636d0a063 (diff)
mm, page_alloc: convert alloc_flags to unsigned
alloc_flags is a bitmask of flags but it is signed which does not necessarily generate the best code depending on the compiler. Even without an impact, it makes more sense that this be unsigned. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7f328cfb137d..094587a4ed81 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1525,7 +1525,7 @@ static inline bool free_pages_prezeroed(bool poisoned)
1525} 1525}
1526 1526
1527static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1527static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1528 int alloc_flags) 1528 unsigned int alloc_flags)
1529{ 1529{
1530 int i; 1530 int i;
1531 bool poisoned = true; 1531 bool poisoned = true;
@@ -2391,7 +2391,8 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2391static inline 2391static inline
2392struct page *buffered_rmqueue(struct zone *preferred_zone, 2392struct page *buffered_rmqueue(struct zone *preferred_zone,
2393 struct zone *zone, unsigned int order, 2393 struct zone *zone, unsigned int order,
2394 gfp_t gfp_flags, int alloc_flags, int migratetype) 2394 gfp_t gfp_flags, unsigned int alloc_flags,
2395 int migratetype)
2395{ 2396{
2396 unsigned long flags; 2397 unsigned long flags;
2397 struct page *page; 2398 struct page *page;
@@ -2545,12 +2546,13 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2545 * to check in the allocation paths if no pages are free. 2546 * to check in the allocation paths if no pages are free.
2546 */ 2547 */
2547static bool __zone_watermark_ok(struct zone *z, unsigned int order, 2548static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2548 unsigned long mark, int classzone_idx, int alloc_flags, 2549 unsigned long mark, int classzone_idx,
2550 unsigned int alloc_flags,
2549 long free_pages) 2551 long free_pages)
2550{ 2552{
2551 long min = mark; 2553 long min = mark;
2552 int o; 2554 int o;
2553 const int alloc_harder = (alloc_flags & ALLOC_HARDER); 2555 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
2554 2556
2555 /* free_pages may go negative - that's OK */ 2557 /* free_pages may go negative - that's OK */
2556 free_pages -= (1 << order) - 1; 2558 free_pages -= (1 << order) - 1;
@@ -2613,7 +2615,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2613} 2615}
2614 2616
2615bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2617bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2616 int classzone_idx, int alloc_flags) 2618 int classzone_idx, unsigned int alloc_flags)
2617{ 2619{
2618 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2620 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2619 zone_page_state(z, NR_FREE_PAGES)); 2621 zone_page_state(z, NR_FREE_PAGES));
@@ -2957,7 +2959,7 @@ out:
2957/* Try memory compaction for high-order allocations before reclaim */ 2959/* Try memory compaction for high-order allocations before reclaim */
2958static struct page * 2960static struct page *
2959__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2961__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2960 int alloc_flags, const struct alloc_context *ac, 2962 unsigned int alloc_flags, const struct alloc_context *ac,
2961 enum migrate_mode mode, int *contended_compaction, 2963 enum migrate_mode mode, int *contended_compaction,
2962 bool *deferred_compaction) 2964 bool *deferred_compaction)
2963{ 2965{
@@ -3013,7 +3015,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3013#else 3015#else
3014static inline struct page * 3016static inline struct page *
3015__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3017__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3016 int alloc_flags, const struct alloc_context *ac, 3018 unsigned int alloc_flags, const struct alloc_context *ac,
3017 enum migrate_mode mode, int *contended_compaction, 3019 enum migrate_mode mode, int *contended_compaction,
3018 bool *deferred_compaction) 3020 bool *deferred_compaction)
3019{ 3021{
@@ -3053,7 +3055,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3053/* The really slow allocator path where we enter direct reclaim */ 3055/* The really slow allocator path where we enter direct reclaim */
3054static inline struct page * 3056static inline struct page *
3055__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3057__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3056 int alloc_flags, const struct alloc_context *ac, 3058 unsigned int alloc_flags, const struct alloc_context *ac,
3057 unsigned long *did_some_progress) 3059 unsigned long *did_some_progress)
3058{ 3060{
3059 struct page *page = NULL; 3061 struct page *page = NULL;
@@ -3092,10 +3094,10 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3092 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); 3094 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
3093} 3095}
3094 3096
3095static inline int 3097static inline unsigned int
3096gfp_to_alloc_flags(gfp_t gfp_mask) 3098gfp_to_alloc_flags(gfp_t gfp_mask)
3097{ 3099{
3098 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3100 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3099 3101
3100 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 3102 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
3101 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 3103 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -3156,7 +3158,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3156{ 3158{
3157 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3159 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3158 struct page *page = NULL; 3160 struct page *page = NULL;
3159 int alloc_flags; 3161 unsigned int alloc_flags;
3160 unsigned long pages_reclaimed = 0; 3162 unsigned long pages_reclaimed = 0;
3161 unsigned long did_some_progress; 3163 unsigned long did_some_progress;
3162 enum migrate_mode migration_mode = MIGRATE_ASYNC; 3164 enum migrate_mode migration_mode = MIGRATE_ASYNC;
@@ -3348,7 +3350,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3348 struct zoneref *preferred_zoneref; 3350 struct zoneref *preferred_zoneref;
3349 struct page *page = NULL; 3351 struct page *page = NULL;
3350 unsigned int cpuset_mems_cookie; 3352 unsigned int cpuset_mems_cookie;
3351 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR; 3353 unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
3352 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 3354 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
3353 struct alloc_context ac = { 3355 struct alloc_context ac = {
3354 .high_zoneidx = gfp_zone(gfp_mask), 3356 .high_zoneidx = gfp_zone(gfp_mask),