summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-11-06 19:29:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-06 20:50:42 -0500
commitd00181b96eb86c914cb327d1de974a1b71366e1b (patch)
tree95d11627900f5b8284677d455db4daea3e1a82da /mm/page_alloc.c
parent1d798ca3f16437c71ff63e36597ff07f9c12e4d6 (diff)
mm: use 'unsigned int' for page order
Let's try to be consistent about data type of page order. [sfr@canb.auug.org.au: fix build (type of pageblock_order)] [hughd@google.com: some configs end up with MAX_ORDER and pageblock_order having different types] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e361001519d3..208e4c7e771b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -181,7 +181,7 @@ bool pm_suspended_storage(void)
181#endif /* CONFIG_PM_SLEEP */ 181#endif /* CONFIG_PM_SLEEP */
182 182
183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
184int pageblock_order __read_mostly; 184unsigned int pageblock_order __read_mostly;
185#endif 185#endif
186 186
187static void __free_pages_ok(struct page *page, unsigned int order); 187static void __free_pages_ok(struct page *page, unsigned int order);
@@ -462,7 +462,7 @@ static void free_compound_page(struct page *page)
462 __free_pages_ok(page, compound_order(page)); 462 __free_pages_ok(page, compound_order(page));
463} 463}
464 464
465void prep_compound_page(struct page *page, unsigned long order) 465void prep_compound_page(struct page *page, unsigned int order)
466{ 466{
467 int i; 467 int i;
468 int nr_pages = 1 << order; 468 int nr_pages = 1 << order;
@@ -662,7 +662,7 @@ static inline void __free_one_page(struct page *page,
662 unsigned long combined_idx; 662 unsigned long combined_idx;
663 unsigned long uninitialized_var(buddy_idx); 663 unsigned long uninitialized_var(buddy_idx);
664 struct page *buddy; 664 struct page *buddy;
665 int max_order = MAX_ORDER; 665 unsigned int max_order = MAX_ORDER;
666 666
667 VM_BUG_ON(!zone_is_initialized(zone)); 667 VM_BUG_ON(!zone_is_initialized(zone));
668 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 668 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
@@ -675,7 +675,7 @@ static inline void __free_one_page(struct page *page,
675 * pageblock. Without this, pageblock isolation 675 * pageblock. Without this, pageblock isolation
676 * could cause incorrect freepage accounting. 676 * could cause incorrect freepage accounting.
677 */ 677 */
678 max_order = min(MAX_ORDER, pageblock_order + 1); 678 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
679 } else { 679 } else {
680 __mod_zone_freepage_state(zone, 1 << order, migratetype); 680 __mod_zone_freepage_state(zone, 1 << order, migratetype);
681 } 681 }
@@ -1471,7 +1471,7 @@ int move_freepages(struct zone *zone,
1471 int migratetype) 1471 int migratetype)
1472{ 1472{
1473 struct page *page; 1473 struct page *page;
1474 unsigned long order; 1474 unsigned int order;
1475 int pages_moved = 0; 1475 int pages_moved = 0;
1476 1476
1477#ifndef CONFIG_HOLES_IN_ZONE 1477#ifndef CONFIG_HOLES_IN_ZONE
@@ -1584,7 +1584,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
1584static void steal_suitable_fallback(struct zone *zone, struct page *page, 1584static void steal_suitable_fallback(struct zone *zone, struct page *page,
1585 int start_type) 1585 int start_type)
1586{ 1586{
1587 int current_order = page_order(page); 1587 unsigned int current_order = page_order(page);
1588 int pages; 1588 int pages;
1589 1589
1590 /* Take ownership for orders >= pageblock_order */ 1590 /* Take ownership for orders >= pageblock_order */
@@ -2637,7 +2637,7 @@ static DEFINE_RATELIMIT_STATE(nopage_rs,
2637 DEFAULT_RATELIMIT_INTERVAL, 2637 DEFAULT_RATELIMIT_INTERVAL,
2638 DEFAULT_RATELIMIT_BURST); 2638 DEFAULT_RATELIMIT_BURST);
2639 2639
2640void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) 2640void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
2641{ 2641{
2642 unsigned int filter = SHOW_MEM_FILTER_NODES; 2642 unsigned int filter = SHOW_MEM_FILTER_NODES;
2643 2643
@@ -2671,7 +2671,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2671 va_end(args); 2671 va_end(args);
2672 } 2672 }
2673 2673
2674 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n", 2674 pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
2675 current->comm, order, gfp_mask); 2675 current->comm, order, gfp_mask);
2676 2676
2677 dump_stack(); 2677 dump_stack();
@@ -3449,7 +3449,8 @@ void free_kmem_pages(unsigned long addr, unsigned int order)
3449 } 3449 }
3450} 3450}
3451 3451
3452static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 3452static void *make_alloc_exact(unsigned long addr, unsigned int order,
3453 size_t size)
3453{ 3454{
3454 if (addr) { 3455 if (addr) {
3455 unsigned long alloc_end = addr + (PAGE_SIZE << order); 3456 unsigned long alloc_end = addr + (PAGE_SIZE << order);
@@ -3499,7 +3500,7 @@ EXPORT_SYMBOL(alloc_pages_exact);
3499 */ 3500 */
3500void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 3501void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3501{ 3502{
3502 unsigned order = get_order(size); 3503 unsigned int order = get_order(size);
3503 struct page *p = alloc_pages_node(nid, gfp_mask, order); 3504 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3504 if (!p) 3505 if (!p)
3505 return NULL; 3506 return NULL;
@@ -3800,7 +3801,8 @@ void show_free_areas(unsigned int filter)
3800 } 3801 }
3801 3802
3802 for_each_populated_zone(zone) { 3803 for_each_populated_zone(zone) {
3803 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3804 unsigned int order;
3805 unsigned long nr[MAX_ORDER], flags, total = 0;
3804 unsigned char types[MAX_ORDER]; 3806 unsigned char types[MAX_ORDER];
3805 3807
3806 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3808 if (skip_free_areas_node(filter, zone_to_nid(zone)))
@@ -4149,7 +4151,7 @@ static void build_zonelists(pg_data_t *pgdat)
4149 nodemask_t used_mask; 4151 nodemask_t used_mask;
4150 int local_node, prev_node; 4152 int local_node, prev_node;
4151 struct zonelist *zonelist; 4153 struct zonelist *zonelist;
4152 int order = current_zonelist_order; 4154 unsigned int order = current_zonelist_order;
4153 4155
4154 /* initialize zonelists */ 4156 /* initialize zonelists */
4155 for (i = 0; i < MAX_ZONELISTS; i++) { 4157 for (i = 0; i < MAX_ZONELISTS; i++) {
@@ -6678,7 +6680,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6678 unsigned migratetype) 6680 unsigned migratetype)
6679{ 6681{
6680 unsigned long outer_start, outer_end; 6682 unsigned long outer_start, outer_end;
6681 int ret = 0, order; 6683 unsigned int order;
6684 int ret = 0;
6682 6685
6683 struct compact_control cc = { 6686 struct compact_control cc = {
6684 .nr_migratepages = 0, 6687 .nr_migratepages = 0,