summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorRasmus Villemoes <linux@rasmusvillemoes.dk>2017-07-10 18:49:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 19:32:32 -0400
commitb002529d256307602c669d1886c0b953b52b8700 (patch)
tree9219954404a5b6124356dd8a1785d6f5c0653126 /mm/page_alloc.c
parent8c03cc85a035ae7a208c28c4382ecfeb6adf79a6 (diff)
mm/page_alloc.c: eliminate unsigned confusion in __rmqueue_fallback
Since current_order starts as MAX_ORDER-1 and is then only decremented, the second half of the loop condition seems superfluous. However, if order is 0, we may decrement current_order past 0, making it UINT_MAX. This is obviously too subtle ([1], [2]). Since we need to add some comment anyway, change the two variables to signed, making the counting-down for loop look more familiar, and apparently also making gcc generate slightly smaller code. [1] https://lkml.org/lkml/2016/6/20/493 [2] https://lkml.org/lkml/2017/6/19/345 [akpm@linux-foundation.org: fix up reject fixupping] Link: http://lkml.kernel.org/r/20170621185529.2265-1-linux@rasmusvillemoes.dk Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Hao Lee <haolee.swjtu@gmail.com> Acked-by: Wei Yang <weiyang@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 869035717048..d90c31951b90 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2206,12 +2206,16 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2206 * list of requested migratetype, possibly along with other pages from the same 2206 * list of requested migratetype, possibly along with other pages from the same
2207 * block, depending on fragmentation avoidance heuristics. Returns true if 2207 * block, depending on fragmentation avoidance heuristics. Returns true if
2208 * fallback was found so that __rmqueue_smallest() can grab it. 2208 * fallback was found so that __rmqueue_smallest() can grab it.
2209 *
2210 * The use of signed ints for order and current_order is a deliberate
2211 * deviation from the rest of this file, to make the for loop
2212 * condition simpler.
2209 */ 2213 */
2210static inline bool 2214static inline bool
2211__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 2215__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
2212{ 2216{
2213 struct free_area *area; 2217 struct free_area *area;
2214 unsigned int current_order; 2218 int current_order;
2215 struct page *page; 2219 struct page *page;
2216 int fallback_mt; 2220 int fallback_mt;
2217 bool can_steal; 2221 bool can_steal;
@@ -2221,8 +2225,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2221 * approximates finding the pageblock with the most free pages, which 2225 * approximates finding the pageblock with the most free pages, which
2222 * would be too costly to do exactly. 2226 * would be too costly to do exactly.
2223 */ 2227 */
2224 for (current_order = MAX_ORDER-1; 2228 for (current_order = MAX_ORDER - 1; current_order >= order;
2225 current_order >= order && current_order <= MAX_ORDER-1;
2226 --current_order) { 2229 --current_order) {
2227 area = &(zone->free_area[current_order]); 2230 area = &(zone->free_area[current_order]);
2228 fallback_mt = find_suitable_fallback(area, current_order, 2231 fallback_mt = find_suitable_fallback(area, current_order,