diff options
author | Mel Gorman <mgorman@suse.de> | 2014-06-04 19:10:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:09 -0400 |
commit | 7aeb09f9104b760fc53c98cb7d20d06640baf9e6 (patch) | |
tree | c16390f09077fc3f5e8e3424a916522a9abe88da /mm | |
parent | cfc47a2803db42140167b92d991ef04018e162c7 (diff) |
mm: page_alloc: use unsigned int for order in more places
X86 prefers the use of unsigned types for iterators and there is a
tendency to mix whether a signed or unsigned type if used for page order.
This converts a number of sites in mm/page_alloc.c to use unsigned int for
order where possible.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 43 |
1 files changed, 23 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce4d3716214c..37ef1b87f1f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -409,7 +409,8 @@ static int destroy_compound_page(struct page *page, unsigned long order) | |||
409 | return bad; | 409 | return bad; |
410 | } | 410 | } |
411 | 411 | ||
412 | static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | 412 | static inline void prep_zero_page(struct page *page, unsigned int order, |
413 | gfp_t gfp_flags) | ||
413 | { | 414 | { |
414 | int i; | 415 | int i; |
415 | 416 | ||
@@ -453,7 +454,7 @@ static inline void set_page_guard_flag(struct page *page) { } | |||
453 | static inline void clear_page_guard_flag(struct page *page) { } | 454 | static inline void clear_page_guard_flag(struct page *page) { } |
454 | #endif | 455 | #endif |
455 | 456 | ||
456 | static inline void set_page_order(struct page *page, int order) | 457 | static inline void set_page_order(struct page *page, unsigned int order) |
457 | { | 458 | { |
458 | set_page_private(page, order); | 459 | set_page_private(page, order); |
459 | __SetPageBuddy(page); | 460 | __SetPageBuddy(page); |
@@ -504,7 +505,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order) | |||
504 | * For recording page's order, we use page_private(page). | 505 | * For recording page's order, we use page_private(page). |
505 | */ | 506 | */ |
506 | static inline int page_is_buddy(struct page *page, struct page *buddy, | 507 | static inline int page_is_buddy(struct page *page, struct page *buddy, |
507 | int order) | 508 | unsigned int order) |
508 | { | 509 | { |
509 | if (!pfn_valid_within(page_to_pfn(buddy))) | 510 | if (!pfn_valid_within(page_to_pfn(buddy))) |
510 | return 0; | 511 | return 0; |
@@ -726,7 +727,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
726 | 727 | ||
727 | static void free_one_page(struct zone *zone, | 728 | static void free_one_page(struct zone *zone, |
728 | struct page *page, unsigned long pfn, | 729 | struct page *page, unsigned long pfn, |
729 | int order, | 730 | unsigned int order, |
730 | int migratetype) | 731 | int migratetype) |
731 | { | 732 | { |
732 | spin_lock(&zone->lock); | 733 | spin_lock(&zone->lock); |
@@ -897,7 +898,7 @@ static inline int check_new_page(struct page *page) | |||
897 | return 0; | 898 | return 0; |
898 | } | 899 | } |
899 | 900 | ||
900 | static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | 901 | static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) |
901 | { | 902 | { |
902 | int i; | 903 | int i; |
903 | 904 | ||
@@ -1108,16 +1109,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page, | |||
1108 | 1109 | ||
1109 | /* Remove an element from the buddy allocator from the fallback list */ | 1110 | /* Remove an element from the buddy allocator from the fallback list */ |
1110 | static inline struct page * | 1111 | static inline struct page * |
1111 | __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | 1112 | __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) |
1112 | { | 1113 | { |
1113 | struct free_area *area; | 1114 | struct free_area *area; |
1114 | int current_order; | 1115 | unsigned int current_order; |
1115 | struct page *page; | 1116 | struct page *page; |
1116 | int migratetype, new_type, i; | 1117 | int migratetype, new_type, i; |
1117 | 1118 | ||
1118 | /* Find the largest possible block of pages in the other list */ | 1119 | /* Find the largest possible block of pages in the other list */ |
1119 | for (current_order = MAX_ORDER-1; current_order >= order; | 1120 | for (current_order = MAX_ORDER-1; |
1120 | --current_order) { | 1121 | current_order >= order && current_order <= MAX_ORDER-1; |
1122 | --current_order) { | ||
1121 | for (i = 0;; i++) { | 1123 | for (i = 0;; i++) { |
1122 | migratetype = fallbacks[start_migratetype][i]; | 1124 | migratetype = fallbacks[start_migratetype][i]; |
1123 | 1125 | ||
@@ -1345,7 +1347,7 @@ void mark_free_pages(struct zone *zone) | |||
1345 | { | 1347 | { |
1346 | unsigned long pfn, max_zone_pfn; | 1348 | unsigned long pfn, max_zone_pfn; |
1347 | unsigned long flags; | 1349 | unsigned long flags; |
1348 | int order, t; | 1350 | unsigned int order, t; |
1349 | struct list_head *curr; | 1351 | struct list_head *curr; |
1350 | 1352 | ||
1351 | if (zone_is_empty(zone)) | 1353 | if (zone_is_empty(zone)) |
@@ -1541,8 +1543,8 @@ int split_free_page(struct page *page) | |||
1541 | */ | 1543 | */ |
1542 | static inline | 1544 | static inline |
1543 | struct page *buffered_rmqueue(struct zone *preferred_zone, | 1545 | struct page *buffered_rmqueue(struct zone *preferred_zone, |
1544 | struct zone *zone, int order, gfp_t gfp_flags, | 1546 | struct zone *zone, unsigned int order, |
1545 | int migratetype) | 1547 | gfp_t gfp_flags, int migratetype) |
1546 | { | 1548 | { |
1547 | unsigned long flags; | 1549 | unsigned long flags; |
1548 | struct page *page; | 1550 | struct page *page; |
@@ -1691,8 +1693,9 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) | |||
1691 | * Return true if free pages are above 'mark'. This takes into account the order | 1693 | * Return true if free pages are above 'mark'. This takes into account the order |
1692 | * of the allocation. | 1694 | * of the allocation. |
1693 | */ | 1695 | */ |
1694 | static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 1696 | static bool __zone_watermark_ok(struct zone *z, unsigned int order, |
1695 | int classzone_idx, int alloc_flags, long free_pages) | 1697 | unsigned long mark, int classzone_idx, int alloc_flags, |
1698 | long free_pages) | ||
1696 | { | 1699 | { |
1697 | /* free_pages my go negative - that's OK */ | 1700 | /* free_pages my go negative - that's OK */ |
1698 | long min = mark; | 1701 | long min = mark; |
@@ -1726,15 +1729,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1726 | return true; | 1729 | return true; |
1727 | } | 1730 | } |
1728 | 1731 | ||
1729 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 1732 | bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
1730 | int classzone_idx, int alloc_flags) | 1733 | int classzone_idx, int alloc_flags) |
1731 | { | 1734 | { |
1732 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 1735 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, |
1733 | zone_page_state(z, NR_FREE_PAGES)); | 1736 | zone_page_state(z, NR_FREE_PAGES)); |
1734 | } | 1737 | } |
1735 | 1738 | ||
1736 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | 1739 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
1737 | int classzone_idx, int alloc_flags) | 1740 | unsigned long mark, int classzone_idx, int alloc_flags) |
1738 | { | 1741 | { |
1739 | long free_pages = zone_page_state(z, NR_FREE_PAGES); | 1742 | long free_pages = zone_page_state(z, NR_FREE_PAGES); |
1740 | 1743 | ||
@@ -4121,7 +4124,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
4121 | 4124 | ||
4122 | static void __meminit zone_init_free_lists(struct zone *zone) | 4125 | static void __meminit zone_init_free_lists(struct zone *zone) |
4123 | { | 4126 | { |
4124 | int order, t; | 4127 | unsigned int order, t; |
4125 | for_each_migratetype_order(order, t) { | 4128 | for_each_migratetype_order(order, t) { |
4126 | INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); | 4129 | INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); |
4127 | zone->free_area[order].nr_free = 0; | 4130 | zone->free_area[order].nr_free = 0; |
@@ -6444,7 +6447,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |||
6444 | { | 6447 | { |
6445 | struct page *page; | 6448 | struct page *page; |
6446 | struct zone *zone; | 6449 | struct zone *zone; |
6447 | int order, i; | 6450 | unsigned int order, i; |
6448 | unsigned long pfn; | 6451 | unsigned long pfn; |
6449 | unsigned long flags; | 6452 | unsigned long flags; |
6450 | /* find the first valid pfn */ | 6453 | /* find the first valid pfn */ |
@@ -6496,7 +6499,7 @@ bool is_free_buddy_page(struct page *page) | |||
6496 | struct zone *zone = page_zone(page); | 6499 | struct zone *zone = page_zone(page); |
6497 | unsigned long pfn = page_to_pfn(page); | 6500 | unsigned long pfn = page_to_pfn(page); |
6498 | unsigned long flags; | 6501 | unsigned long flags; |
6499 | int order; | 6502 | unsigned int order; |
6500 | 6503 | ||
6501 | spin_lock_irqsave(&zone->lock, flags); | 6504 | spin_lock_irqsave(&zone->lock, flags); |
6502 | for (order = 0; order < MAX_ORDER; order++) { | 6505 | for (order = 0; order < MAX_ORDER; order++) { |