aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c78
1 files changed, 41 insertions, 37 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bab8e3bc4202..8cbfc38e68ac 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
219 219
220int page_group_by_mobility_disabled __read_mostly; 220int page_group_by_mobility_disabled __read_mostly;
221 221
222static void set_pageblock_migratetype(struct page *page, int migratetype) 222void set_pageblock_migratetype(struct page *page, int migratetype)
223{ 223{
224 224
225 if (unlikely(page_group_by_mobility_disabled)) 225 if (unlikely(page_group_by_mobility_disabled))
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
954 return pages_moved; 954 return pages_moved;
955} 955}
956 956
957static int move_freepages_block(struct zone *zone, struct page *page, 957int move_freepages_block(struct zone *zone, struct page *page,
958 int migratetype) 958 int migratetype)
959{ 959{
960 unsigned long start_pfn, end_pfn; 960 unsigned long start_pfn, end_pfn;
961 struct page *start_page, *end_page; 961 struct page *start_page, *end_page;
@@ -4300,25 +4300,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
4300 4300
4301#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4301#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4302 4302
4303/* Return a sensible default order for the pageblock size. */
4304static inline int pageblock_default_order(void)
4305{
4306 if (HPAGE_SHIFT > PAGE_SHIFT)
4307 return HUGETLB_PAGE_ORDER;
4308
4309 return MAX_ORDER-1;
4310}
4311
4312/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4303/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4313static inline void __init set_pageblock_order(unsigned int order) 4304static inline void __init set_pageblock_order(void)
4314{ 4305{
4306 unsigned int order;
4307
4315 /* Check that pageblock_nr_pages has not already been setup */ 4308 /* Check that pageblock_nr_pages has not already been setup */
4316 if (pageblock_order) 4309 if (pageblock_order)
4317 return; 4310 return;
4318 4311
4312 if (HPAGE_SHIFT > PAGE_SHIFT)
4313 order = HUGETLB_PAGE_ORDER;
4314 else
4315 order = MAX_ORDER - 1;
4316
4319 /* 4317 /*
4320 * Assume the largest contiguous order of interest is a huge page. 4318 * Assume the largest contiguous order of interest is a huge page.
4321 * This value may be variable depending on boot parameters on IA64 4319 * This value may be variable depending on boot parameters on IA64 and
4320 * powerpc.
4322 */ 4321 */
4323 pageblock_order = order; 4322 pageblock_order = order;
4324} 4323}
@@ -4326,15 +4325,13 @@ static inline void __init set_pageblock_order(unsigned int order)
4326 4325
4327/* 4326/*
4328 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 4327 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4329 * and pageblock_default_order() are unused as pageblock_order is set 4328 * is unused as pageblock_order is set at compile-time. See
4330 * at compile-time. See include/linux/pageblock-flags.h for the values of 4329 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4331 * pageblock_order based on the kernel config 4330 * the kernel config
4332 */ 4331 */
4333static inline int pageblock_default_order(unsigned int order) 4332static inline void set_pageblock_order(void)
4334{ 4333{
4335 return MAX_ORDER-1;
4336} 4334}
4337#define set_pageblock_order(x) do {} while (0)
4338 4335
4339#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4336#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4340 4337
@@ -4413,16 +4410,16 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4413 zone_pcp_init(zone); 4410 zone_pcp_init(zone);
4414 for_each_lru(lru) 4411 for_each_lru(lru)
4415 INIT_LIST_HEAD(&zone->lruvec.lists[lru]); 4412 INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
4416 zone->reclaim_stat.recent_rotated[0] = 0; 4413 zone->lruvec.reclaim_stat.recent_rotated[0] = 0;
4417 zone->reclaim_stat.recent_rotated[1] = 0; 4414 zone->lruvec.reclaim_stat.recent_rotated[1] = 0;
4418 zone->reclaim_stat.recent_scanned[0] = 0; 4415 zone->lruvec.reclaim_stat.recent_scanned[0] = 0;
4419 zone->reclaim_stat.recent_scanned[1] = 0; 4416 zone->lruvec.reclaim_stat.recent_scanned[1] = 0;
4420 zap_zone_vm_stats(zone); 4417 zap_zone_vm_stats(zone);
4421 zone->flags = 0; 4418 zone->flags = 0;
4422 if (!size) 4419 if (!size)
4423 continue; 4420 continue;
4424 4421
4425 set_pageblock_order(pageblock_default_order()); 4422 set_pageblock_order();
4426 setup_usemap(pgdat, zone, size); 4423 setup_usemap(pgdat, zone, size);
4427 ret = init_currently_empty_zone(zone, zone_start_pfn, 4424 ret = init_currently_empty_zone(zone, zone_start_pfn,
4428 size, MEMMAP_EARLY); 4425 size, MEMMAP_EARLY);
@@ -4815,7 +4812,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4815 find_zone_movable_pfns_for_nodes(); 4812 find_zone_movable_pfns_for_nodes();
4816 4813
4817 /* Print out the zone ranges */ 4814 /* Print out the zone ranges */
4818 printk("Zone PFN ranges:\n"); 4815 printk("Zone ranges:\n");
4819 for (i = 0; i < MAX_NR_ZONES; i++) { 4816 for (i = 0; i < MAX_NR_ZONES; i++) {
4820 if (i == ZONE_MOVABLE) 4817 if (i == ZONE_MOVABLE)
4821 continue; 4818 continue;
@@ -4824,22 +4821,25 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4824 arch_zone_highest_possible_pfn[i]) 4821 arch_zone_highest_possible_pfn[i])
4825 printk(KERN_CONT "empty\n"); 4822 printk(KERN_CONT "empty\n");
4826 else 4823 else
4827 printk(KERN_CONT "%0#10lx -> %0#10lx\n", 4824 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
4828 arch_zone_lowest_possible_pfn[i], 4825 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
4829 arch_zone_highest_possible_pfn[i]); 4826 (arch_zone_highest_possible_pfn[i]
4827 << PAGE_SHIFT) - 1);
4830 } 4828 }
4831 4829
4832 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 4830 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4833 printk("Movable zone start PFN for each node\n"); 4831 printk("Movable zone start for each node\n");
4834 for (i = 0; i < MAX_NUMNODES; i++) { 4832 for (i = 0; i < MAX_NUMNODES; i++) {
4835 if (zone_movable_pfn[i]) 4833 if (zone_movable_pfn[i])
4836 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 4834 printk(" Node %d: %#010lx\n", i,
4835 zone_movable_pfn[i] << PAGE_SHIFT);
4837 } 4836 }
4838 4837
4839 /* Print out the early_node_map[] */ 4838 /* Print out the early_node_map[] */
4840 printk("Early memory PFN ranges\n"); 4839 printk("Early memory node ranges\n");
4841 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 4840 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4842 printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn); 4841 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
4842 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
4843 4843
4844 /* Initialise every node */ 4844 /* Initialise every node */
4845 mminit_verify_pageflags_layout(); 4845 mminit_verify_pageflags_layout();
@@ -5657,7 +5657,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5657 .nr_migratepages = 0, 5657 .nr_migratepages = 0,
5658 .order = -1, 5658 .order = -1,
5659 .zone = page_zone(pfn_to_page(start)), 5659 .zone = page_zone(pfn_to_page(start)),
5660 .sync = true, 5660 .mode = COMPACT_SYNC,
5661 }; 5661 };
5662 INIT_LIST_HEAD(&cc.migratepages); 5662 INIT_LIST_HEAD(&cc.migratepages);
5663 5663
@@ -5938,7 +5938,7 @@ bool is_free_buddy_page(struct page *page)
5938} 5938}
5939#endif 5939#endif
5940 5940
5941static struct trace_print_flags pageflag_names[] = { 5941static const struct trace_print_flags pageflag_names[] = {
5942 {1UL << PG_locked, "locked" }, 5942 {1UL << PG_locked, "locked" },
5943 {1UL << PG_error, "error" }, 5943 {1UL << PG_error, "error" },
5944 {1UL << PG_referenced, "referenced" }, 5944 {1UL << PG_referenced, "referenced" },
@@ -5973,7 +5973,9 @@ static struct trace_print_flags pageflag_names[] = {
5973#ifdef CONFIG_MEMORY_FAILURE 5973#ifdef CONFIG_MEMORY_FAILURE
5974 {1UL << PG_hwpoison, "hwpoison" }, 5974 {1UL << PG_hwpoison, "hwpoison" },
5975#endif 5975#endif
5976 {-1UL, NULL }, 5976#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5977 {1UL << PG_compound_lock, "compound_lock" },
5978#endif
5977}; 5979};
5978 5980
5979static void dump_page_flags(unsigned long flags) 5981static void dump_page_flags(unsigned long flags)
@@ -5982,12 +5984,14 @@ static void dump_page_flags(unsigned long flags)
5982 unsigned long mask; 5984 unsigned long mask;
5983 int i; 5985 int i;
5984 5986
5987 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
5988
5985 printk(KERN_ALERT "page flags: %#lx(", flags); 5989 printk(KERN_ALERT "page flags: %#lx(", flags);
5986 5990
5987 /* remove zone id */ 5991 /* remove zone id */
5988 flags &= (1UL << NR_PAGEFLAGS) - 1; 5992 flags &= (1UL << NR_PAGEFLAGS) - 1;
5989 5993
5990 for (i = 0; pageflag_names[i].name && flags; i++) { 5994 for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
5991 5995
5992 mask = pageflag_names[i].mask; 5996 mask = pageflag_names[i].mask;
5993 if ((flags & mask) != mask) 5997 if ((flags & mask) != mask)