aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/pageblock-flags.h2
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/internal.h4
-rw-r--r--mm/page_alloc.c29
5 files changed, 32 insertions, 27 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9671b6f23eda..00bad7793788 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -550,7 +550,7 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
550 return compound_page_dtors[page[1].compound_dtor]; 550 return compound_page_dtors[page[1].compound_dtor];
551} 551}
552 552
553static inline int compound_order(struct page *page) 553static inline unsigned int compound_order(struct page *page)
554{ 554{
555 if (!PageHead(page)) 555 if (!PageHead(page))
556 return 0; 556 return 0;
@@ -1810,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val);
1810extern void si_meminfo_node(struct sysinfo *val, int nid); 1810extern void si_meminfo_node(struct sysinfo *val, int nid);
1811 1811
1812extern __printf(3, 4) 1812extern __printf(3, 4)
1813void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); 1813void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
1814 const char *fmt, ...);
1814 1815
1815extern void setup_per_cpu_pageset(void); 1816extern void setup_per_cpu_pageset(void);
1816 1817
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 2baeee12f48e..e942558b3585 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -44,7 +44,7 @@ enum pageblock_bits {
44#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 44#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
45 45
46/* Huge page sizes are variable */ 46/* Huge page sizes are variable */
47extern int pageblock_order; 47extern unsigned int pageblock_order;
48 48
49#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 49#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
50 50
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4eb0f0964883..7ce07d681265 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -994,7 +994,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
994 994
995#if defined(CONFIG_CMA) && defined(CONFIG_X86_64) 995#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
996static void destroy_compound_gigantic_page(struct page *page, 996static void destroy_compound_gigantic_page(struct page *page,
997 unsigned long order) 997 unsigned int order)
998{ 998{
999 int i; 999 int i;
1000 int nr_pages = 1 << order; 1000 int nr_pages = 1 << order;
@@ -1009,7 +1009,7 @@ static void destroy_compound_gigantic_page(struct page *page,
1009 __ClearPageHead(page); 1009 __ClearPageHead(page);
1010} 1010}
1011 1011
1012static void free_gigantic_page(struct page *page, unsigned order) 1012static void free_gigantic_page(struct page *page, unsigned int order)
1013{ 1013{
1014 free_contig_range(page_to_pfn(page), 1 << order); 1014 free_contig_range(page_to_pfn(page), 1 << order);
1015} 1015}
@@ -1053,7 +1053,7 @@ static bool zone_spans_last_pfn(const struct zone *zone,
1053 return zone_spans_pfn(zone, last_pfn); 1053 return zone_spans_pfn(zone, last_pfn);
1054} 1054}
1055 1055
1056static struct page *alloc_gigantic_page(int nid, unsigned order) 1056static struct page *alloc_gigantic_page(int nid, unsigned int order)
1057{ 1057{
1058 unsigned long nr_pages = 1 << order; 1058 unsigned long nr_pages = 1 << order;
1059 unsigned long ret, pfn, flags; 1059 unsigned long ret, pfn, flags;
@@ -1089,7 +1089,7 @@ static struct page *alloc_gigantic_page(int nid, unsigned order)
1089} 1089}
1090 1090
1091static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); 1091static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1092static void prep_compound_gigantic_page(struct page *page, unsigned long order); 1092static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1093 1093
1094static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) 1094static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1095{ 1095{
@@ -1122,9 +1122,9 @@ static int alloc_fresh_gigantic_page(struct hstate *h,
1122static inline bool gigantic_page_supported(void) { return true; } 1122static inline bool gigantic_page_supported(void) { return true; }
1123#else 1123#else
1124static inline bool gigantic_page_supported(void) { return false; } 1124static inline bool gigantic_page_supported(void) { return false; }
1125static inline void free_gigantic_page(struct page *page, unsigned order) { } 1125static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1126static inline void destroy_compound_gigantic_page(struct page *page, 1126static inline void destroy_compound_gigantic_page(struct page *page,
1127 unsigned long order) { } 1127 unsigned int order) { }
1128static inline int alloc_fresh_gigantic_page(struct hstate *h, 1128static inline int alloc_fresh_gigantic_page(struct hstate *h,
1129 nodemask_t *nodes_allowed) { return 0; } 1129 nodemask_t *nodes_allowed) { return 0; }
1130#endif 1130#endif
@@ -1250,7 +1250,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1250 put_page(page); /* free it into the hugepage allocator */ 1250 put_page(page); /* free it into the hugepage allocator */
1251} 1251}
1252 1252
1253static void prep_compound_gigantic_page(struct page *page, unsigned long order) 1253static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1254{ 1254{
1255 int i; 1255 int i;
1256 int nr_pages = 1 << order; 1256 int nr_pages = 1 << order;
@@ -1968,7 +1968,8 @@ found:
1968 return 1; 1968 return 1;
1969} 1969}
1970 1970
1971static void __init prep_compound_huge_page(struct page *page, int order) 1971static void __init prep_compound_huge_page(struct page *page,
1972 unsigned int order)
1972{ 1973{
1973 if (unlikely(order > (MAX_ORDER - 1))) 1974 if (unlikely(order > (MAX_ORDER - 1)))
1974 prep_compound_gigantic_page(page, order); 1975 prep_compound_gigantic_page(page, order);
@@ -2679,7 +2680,7 @@ static int __init hugetlb_init(void)
2679module_init(hugetlb_init); 2680module_init(hugetlb_init);
2680 2681
2681/* Should be called on processing a hugepagesz=... option */ 2682/* Should be called on processing a hugepagesz=... option */
2682void __init hugetlb_add_hstate(unsigned order) 2683void __init hugetlb_add_hstate(unsigned int order)
2683{ 2684{
2684 struct hstate *h; 2685 struct hstate *h;
2685 unsigned long i; 2686 unsigned long i;
diff --git a/mm/internal.h b/mm/internal.h
index a7f5670fea23..38e24b89e4c4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -177,7 +177,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
177extern int __isolate_free_page(struct page *page, unsigned int order); 177extern int __isolate_free_page(struct page *page, unsigned int order);
178extern void __free_pages_bootmem(struct page *page, unsigned long pfn, 178extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
179 unsigned int order); 179 unsigned int order);
180extern void prep_compound_page(struct page *page, unsigned long order); 180extern void prep_compound_page(struct page *page, unsigned int order);
181#ifdef CONFIG_MEMORY_FAILURE 181#ifdef CONFIG_MEMORY_FAILURE
182extern bool is_free_buddy_page(struct page *page); 182extern bool is_free_buddy_page(struct page *page);
183#endif 183#endif
@@ -235,7 +235,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
235 * page cannot be allocated or merged in parallel. Alternatively, it must 235 * page cannot be allocated or merged in parallel. Alternatively, it must
236 * handle invalid values gracefully, and use page_order_unsafe() below. 236 * handle invalid values gracefully, and use page_order_unsafe() below.
237 */ 237 */
238static inline unsigned long page_order(struct page *page) 238static inline unsigned int page_order(struct page *page)
239{ 239{
240 /* PageBuddy() must be checked by the caller */ 240 /* PageBuddy() must be checked by the caller */
241 return page_private(page); 241 return page_private(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e361001519d3..208e4c7e771b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -181,7 +181,7 @@ bool pm_suspended_storage(void)
181#endif /* CONFIG_PM_SLEEP */ 181#endif /* CONFIG_PM_SLEEP */
182 182
183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
184int pageblock_order __read_mostly; 184unsigned int pageblock_order __read_mostly;
185#endif 185#endif
186 186
187static void __free_pages_ok(struct page *page, unsigned int order); 187static void __free_pages_ok(struct page *page, unsigned int order);
@@ -462,7 +462,7 @@ static void free_compound_page(struct page *page)
462 __free_pages_ok(page, compound_order(page)); 462 __free_pages_ok(page, compound_order(page));
463} 463}
464 464
465void prep_compound_page(struct page *page, unsigned long order) 465void prep_compound_page(struct page *page, unsigned int order)
466{ 466{
467 int i; 467 int i;
468 int nr_pages = 1 << order; 468 int nr_pages = 1 << order;
@@ -662,7 +662,7 @@ static inline void __free_one_page(struct page *page,
662 unsigned long combined_idx; 662 unsigned long combined_idx;
663 unsigned long uninitialized_var(buddy_idx); 663 unsigned long uninitialized_var(buddy_idx);
664 struct page *buddy; 664 struct page *buddy;
665 int max_order = MAX_ORDER; 665 unsigned int max_order = MAX_ORDER;
666 666
667 VM_BUG_ON(!zone_is_initialized(zone)); 667 VM_BUG_ON(!zone_is_initialized(zone));
668 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 668 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
@@ -675,7 +675,7 @@ static inline void __free_one_page(struct page *page,
675 * pageblock. Without this, pageblock isolation 675 * pageblock. Without this, pageblock isolation
676 * could cause incorrect freepage accounting. 676 * could cause incorrect freepage accounting.
677 */ 677 */
678 max_order = min(MAX_ORDER, pageblock_order + 1); 678 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
679 } else { 679 } else {
680 __mod_zone_freepage_state(zone, 1 << order, migratetype); 680 __mod_zone_freepage_state(zone, 1 << order, migratetype);
681 } 681 }
@@ -1471,7 +1471,7 @@ int move_freepages(struct zone *zone,
1471 int migratetype) 1471 int migratetype)
1472{ 1472{
1473 struct page *page; 1473 struct page *page;
1474 unsigned long order; 1474 unsigned int order;
1475 int pages_moved = 0; 1475 int pages_moved = 0;
1476 1476
1477#ifndef CONFIG_HOLES_IN_ZONE 1477#ifndef CONFIG_HOLES_IN_ZONE
@@ -1584,7 +1584,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
1584static void steal_suitable_fallback(struct zone *zone, struct page *page, 1584static void steal_suitable_fallback(struct zone *zone, struct page *page,
1585 int start_type) 1585 int start_type)
1586{ 1586{
1587 int current_order = page_order(page); 1587 unsigned int current_order = page_order(page);
1588 int pages; 1588 int pages;
1589 1589
1590 /* Take ownership for orders >= pageblock_order */ 1590 /* Take ownership for orders >= pageblock_order */
@@ -2637,7 +2637,7 @@ static DEFINE_RATELIMIT_STATE(nopage_rs,
2637 DEFAULT_RATELIMIT_INTERVAL, 2637 DEFAULT_RATELIMIT_INTERVAL,
2638 DEFAULT_RATELIMIT_BURST); 2638 DEFAULT_RATELIMIT_BURST);
2639 2639
2640void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) 2640void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
2641{ 2641{
2642 unsigned int filter = SHOW_MEM_FILTER_NODES; 2642 unsigned int filter = SHOW_MEM_FILTER_NODES;
2643 2643
@@ -2671,7 +2671,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2671 va_end(args); 2671 va_end(args);
2672 } 2672 }
2673 2673
2674 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n", 2674 pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
2675 current->comm, order, gfp_mask); 2675 current->comm, order, gfp_mask);
2676 2676
2677 dump_stack(); 2677 dump_stack();
@@ -3449,7 +3449,8 @@ void free_kmem_pages(unsigned long addr, unsigned int order)
3449 } 3449 }
3450} 3450}
3451 3451
3452static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 3452static void *make_alloc_exact(unsigned long addr, unsigned int order,
3453 size_t size)
3453{ 3454{
3454 if (addr) { 3455 if (addr) {
3455 unsigned long alloc_end = addr + (PAGE_SIZE << order); 3456 unsigned long alloc_end = addr + (PAGE_SIZE << order);
@@ -3499,7 +3500,7 @@ EXPORT_SYMBOL(alloc_pages_exact);
3499 */ 3500 */
3500void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 3501void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3501{ 3502{
3502 unsigned order = get_order(size); 3503 unsigned int order = get_order(size);
3503 struct page *p = alloc_pages_node(nid, gfp_mask, order); 3504 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3504 if (!p) 3505 if (!p)
3505 return NULL; 3506 return NULL;
@@ -3800,7 +3801,8 @@ void show_free_areas(unsigned int filter)
3800 } 3801 }
3801 3802
3802 for_each_populated_zone(zone) { 3803 for_each_populated_zone(zone) {
3803 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3804 unsigned int order;
3805 unsigned long nr[MAX_ORDER], flags, total = 0;
3804 unsigned char types[MAX_ORDER]; 3806 unsigned char types[MAX_ORDER];
3805 3807
3806 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3808 if (skip_free_areas_node(filter, zone_to_nid(zone)))
@@ -4149,7 +4151,7 @@ static void build_zonelists(pg_data_t *pgdat)
4149 nodemask_t used_mask; 4151 nodemask_t used_mask;
4150 int local_node, prev_node; 4152 int local_node, prev_node;
4151 struct zonelist *zonelist; 4153 struct zonelist *zonelist;
4152 int order = current_zonelist_order; 4154 unsigned int order = current_zonelist_order;
4153 4155
4154 /* initialize zonelists */ 4156 /* initialize zonelists */
4155 for (i = 0; i < MAX_ZONELISTS; i++) { 4157 for (i = 0; i < MAX_ZONELISTS; i++) {
@@ -6678,7 +6680,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6678 unsigned migratetype) 6680 unsigned migratetype)
6679{ 6681{
6680 unsigned long outer_start, outer_end; 6682 unsigned long outer_start, outer_end;
6681 int ret = 0, order; 6683 unsigned int order;
6684 int ret = 0;
6682 6685
6683 struct compact_control cc = { 6686 struct compact_control cc = {
6684 .nr_migratepages = 0, 6687 .nr_migratepages = 0,