summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c160
1 files changed, 116 insertions, 44 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0b9f577b1a2a..3eb01dedfb50 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -289,8 +289,8 @@ EXPORT_SYMBOL(movable_zone);
289#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 289#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
290 290
291#if MAX_NUMNODES > 1 291#if MAX_NUMNODES > 1
292int nr_node_ids __read_mostly = MAX_NUMNODES; 292unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
293int nr_online_nodes __read_mostly = 1; 293unsigned int nr_online_nodes __read_mostly = 1;
294EXPORT_SYMBOL(nr_node_ids); 294EXPORT_SYMBOL(nr_node_ids);
295EXPORT_SYMBOL(nr_online_nodes); 295EXPORT_SYMBOL(nr_online_nodes);
296#endif 296#endif
@@ -789,6 +789,57 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
789 return 0; 789 return 0;
790} 790}
791 791
792#ifdef CONFIG_COMPACTION
793static inline struct capture_control *task_capc(struct zone *zone)
794{
795 struct capture_control *capc = current->capture_control;
796
797 return capc &&
798 !(current->flags & PF_KTHREAD) &&
799 !capc->page &&
800 capc->cc->zone == zone &&
801 capc->cc->direct_compaction ? capc : NULL;
802}
803
804static inline bool
805compaction_capture(struct capture_control *capc, struct page *page,
806 int order, int migratetype)
807{
808 if (!capc || order != capc->cc->order)
809 return false;
810
811 /* Do not accidentally pollute CMA or isolated regions*/
812 if (is_migrate_cma(migratetype) ||
813 is_migrate_isolate(migratetype))
814 return false;
815
816 /*
817 * Do not let lower order allocations polluate a movable pageblock.
818 * This might let an unmovable request use a reclaimable pageblock
819 * and vice-versa but no more than normal fallback logic which can
820 * have trouble finding a high-order free page.
821 */
822 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
823 return false;
824
825 capc->page = page;
826 return true;
827}
828
829#else
830static inline struct capture_control *task_capc(struct zone *zone)
831{
832 return NULL;
833}
834
835static inline bool
836compaction_capture(struct capture_control *capc, struct page *page,
837 int order, int migratetype)
838{
839 return false;
840}
841#endif /* CONFIG_COMPACTION */
842
792/* 843/*
793 * Freeing function for a buddy system allocator. 844 * Freeing function for a buddy system allocator.
794 * 845 *
@@ -822,6 +873,7 @@ static inline void __free_one_page(struct page *page,
822 unsigned long uninitialized_var(buddy_pfn); 873 unsigned long uninitialized_var(buddy_pfn);
823 struct page *buddy; 874 struct page *buddy;
824 unsigned int max_order; 875 unsigned int max_order;
876 struct capture_control *capc = task_capc(zone);
825 877
826 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 878 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
827 879
@@ -837,6 +889,11 @@ static inline void __free_one_page(struct page *page,
837 889
838continue_merging: 890continue_merging:
839 while (order < max_order - 1) { 891 while (order < max_order - 1) {
892 if (compaction_capture(capc, page, order, migratetype)) {
893 __mod_zone_freepage_state(zone, -(1 << order),
894 migratetype);
895 return;
896 }
840 buddy_pfn = __find_buddy_pfn(pfn, order); 897 buddy_pfn = __find_buddy_pfn(pfn, order);
841 buddy = page + (buddy_pfn - pfn); 898 buddy = page + (buddy_pfn - pfn);
842 899
@@ -1056,7 +1113,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
1056 if (PageMappingFlags(page)) 1113 if (PageMappingFlags(page))
1057 page->mapping = NULL; 1114 page->mapping = NULL;
1058 if (memcg_kmem_enabled() && PageKmemcg(page)) 1115 if (memcg_kmem_enabled() && PageKmemcg(page))
1059 memcg_kmem_uncharge(page, order); 1116 __memcg_kmem_uncharge(page, order);
1060 if (check_free) 1117 if (check_free)
1061 bad += free_pages_check(page); 1118 bad += free_pages_check(page);
1062 if (bad) 1119 if (bad)
@@ -1303,7 +1360,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1303 local_irq_restore(flags); 1360 local_irq_restore(flags);
1304} 1361}
1305 1362
1306static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1363void __free_pages_core(struct page *page, unsigned int order)
1307{ 1364{
1308 unsigned int nr_pages = 1 << order; 1365 unsigned int nr_pages = 1 << order;
1309 struct page *p = page; 1366 struct page *p = page;
@@ -1382,7 +1439,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
1382{ 1439{
1383 if (early_page_uninitialised(pfn)) 1440 if (early_page_uninitialised(pfn))
1384 return; 1441 return;
1385 return __free_pages_boot_core(page, order); 1442 __free_pages_core(page, order);
1386} 1443}
1387 1444
1388/* 1445/*
@@ -1472,14 +1529,14 @@ static void __init deferred_free_range(unsigned long pfn,
1472 if (nr_pages == pageblock_nr_pages && 1529 if (nr_pages == pageblock_nr_pages &&
1473 (pfn & (pageblock_nr_pages - 1)) == 0) { 1530 (pfn & (pageblock_nr_pages - 1)) == 0) {
1474 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1531 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1475 __free_pages_boot_core(page, pageblock_order); 1532 __free_pages_core(page, pageblock_order);
1476 return; 1533 return;
1477 } 1534 }
1478 1535
1479 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1536 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1480 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1537 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1481 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1538 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1482 __free_pages_boot_core(page, 0); 1539 __free_pages_core(page, 0);
1483 } 1540 }
1484} 1541}
1485 1542
@@ -1945,8 +2002,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
1945 2002
1946 arch_alloc_page(page, order); 2003 arch_alloc_page(page, order);
1947 kernel_map_pages(page, 1 << order, 1); 2004 kernel_map_pages(page, 1 << order, 1);
1948 kernel_poison_pages(page, 1 << order, 1);
1949 kasan_alloc_pages(page, order); 2005 kasan_alloc_pages(page, order);
2006 kernel_poison_pages(page, 1 << order, 1);
1950 set_page_owner(page, order, gfp_flags); 2007 set_page_owner(page, order, gfp_flags);
1951} 2008}
1952 2009
@@ -2962,7 +3019,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
2962 * watermark, because we already know our high-order page 3019 * watermark, because we already know our high-order page
2963 * exists. 3020 * exists.
2964 */ 3021 */
2965 watermark = min_wmark_pages(zone) + (1UL << order); 3022 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2966 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3023 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2967 return 0; 3024 return 0;
2968 3025
@@ -3173,24 +3230,14 @@ static int __init fail_page_alloc_debugfs(void)
3173 3230
3174 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3231 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3175 &fail_page_alloc.attr); 3232 &fail_page_alloc.attr);
3176 if (IS_ERR(dir))
3177 return PTR_ERR(dir);
3178
3179 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
3180 &fail_page_alloc.ignore_gfp_reclaim))
3181 goto fail;
3182 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3183 &fail_page_alloc.ignore_gfp_highmem))
3184 goto fail;
3185 if (!debugfs_create_u32("min-order", mode, dir,
3186 &fail_page_alloc.min_order))
3187 goto fail;
3188 3233
3189 return 0; 3234 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3190fail: 3235 &fail_page_alloc.ignore_gfp_reclaim);
3191 debugfs_remove_recursive(dir); 3236 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3237 &fail_page_alloc.ignore_gfp_highmem);
3238 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3192 3239
3193 return -ENOMEM; 3240 return 0;
3194} 3241}
3195 3242
3196late_initcall(fail_page_alloc_debugfs); 3243late_initcall(fail_page_alloc_debugfs);
@@ -3710,7 +3757,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3710 unsigned int alloc_flags, const struct alloc_context *ac, 3757 unsigned int alloc_flags, const struct alloc_context *ac,
3711 enum compact_priority prio, enum compact_result *compact_result) 3758 enum compact_priority prio, enum compact_result *compact_result)
3712{ 3759{
3713 struct page *page; 3760 struct page *page = NULL;
3714 unsigned long pflags; 3761 unsigned long pflags;
3715 unsigned int noreclaim_flag; 3762 unsigned int noreclaim_flag;
3716 3763
@@ -3721,13 +3768,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3721 noreclaim_flag = memalloc_noreclaim_save(); 3768 noreclaim_flag = memalloc_noreclaim_save();
3722 3769
3723 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3770 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3724 prio); 3771 prio, &page);
3725 3772
3726 memalloc_noreclaim_restore(noreclaim_flag); 3773 memalloc_noreclaim_restore(noreclaim_flag);
3727 psi_memstall_leave(&pflags); 3774 psi_memstall_leave(&pflags);
3728 3775
3729 if (*compact_result <= COMPACT_INACTIVE) 3776 if (*compact_result <= COMPACT_INACTIVE) {
3777 WARN_ON_ONCE(page);
3730 return NULL; 3778 return NULL;
3779 }
3731 3780
3732 /* 3781 /*
3733 * At least in one zone compaction wasn't deferred or skipped, so let's 3782 * At least in one zone compaction wasn't deferred or skipped, so let's
@@ -3735,7 +3784,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3735 */ 3784 */
3736 count_vm_event(COMPACTSTALL); 3785 count_vm_event(COMPACTSTALL);
3737 3786
3738 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3787 /* Prep a captured page if available */
3788 if (page)
3789 prep_new_page(page, order, gfp_mask, alloc_flags);
3790
3791 /* Try get a page from the freelist if available */
3792 if (!page)
3793 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3739 3794
3740 if (page) { 3795 if (page) {
3741 struct zone *zone = page_zone(page); 3796 struct zone *zone = page_zone(page);
@@ -4568,7 +4623,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4568 4623
4569out: 4624out:
4570 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 4625 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4571 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { 4626 unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4572 __free_pages(page, order); 4627 __free_pages(page, order);
4573 page = NULL; 4628 page = NULL;
4574 } 4629 }
@@ -4761,6 +4816,8 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
4761 * This function is also limited by MAX_ORDER. 4816 * This function is also limited by MAX_ORDER.
4762 * 4817 *
4763 * Memory allocated by this function must be released by free_pages_exact(). 4818 * Memory allocated by this function must be released by free_pages_exact().
4819 *
4820 * Return: pointer to the allocated area or %NULL in case of error.
4764 */ 4821 */
4765void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4822void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4766{ 4823{
@@ -4781,6 +4838,8 @@ EXPORT_SYMBOL(alloc_pages_exact);
4781 * 4838 *
4782 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4839 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4783 * back. 4840 * back.
4841 *
4842 * Return: pointer to the allocated area or %NULL in case of error.
4784 */ 4843 */
4785void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4844void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4786{ 4845{
@@ -4814,11 +4873,13 @@ EXPORT_SYMBOL(free_pages_exact);
4814 * nr_free_zone_pages - count number of pages beyond high watermark 4873 * nr_free_zone_pages - count number of pages beyond high watermark
4815 * @offset: The zone index of the highest zone 4874 * @offset: The zone index of the highest zone
4816 * 4875 *
4817 * nr_free_zone_pages() counts the number of counts pages which are beyond the 4876 * nr_free_zone_pages() counts the number of pages which are beyond the
4818 * high watermark within all zones at or below a given zone index. For each 4877 * high watermark within all zones at or below a given zone index. For each
4819 * zone, the number of pages is calculated as: 4878 * zone, the number of pages is calculated as:
4820 * 4879 *
4821 * nr_free_zone_pages = managed_pages - high_pages 4880 * nr_free_zone_pages = managed_pages - high_pages
4881 *
4882 * Return: number of pages beyond high watermark.
4822 */ 4883 */
4823static unsigned long nr_free_zone_pages(int offset) 4884static unsigned long nr_free_zone_pages(int offset)
4824{ 4885{
@@ -4845,6 +4906,9 @@ static unsigned long nr_free_zone_pages(int offset)
4845 * 4906 *
4846 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4907 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4847 * watermark within ZONE_DMA and ZONE_NORMAL. 4908 * watermark within ZONE_DMA and ZONE_NORMAL.
4909 *
4910 * Return: number of pages beyond high watermark within ZONE_DMA and
4911 * ZONE_NORMAL.
4848 */ 4912 */
4849unsigned long nr_free_buffer_pages(void) 4913unsigned long nr_free_buffer_pages(void)
4850{ 4914{
@@ -4857,6 +4921,8 @@ EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4857 * 4921 *
4858 * nr_free_pagecache_pages() counts the number of pages which are beyond the 4922 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4859 * high watermark within all zones. 4923 * high watermark within all zones.
4924 *
4925 * Return: number of pages beyond high watermark within all zones.
4860 */ 4926 */
4861unsigned long nr_free_pagecache_pages(void) 4927unsigned long nr_free_pagecache_pages(void)
4862{ 4928{
@@ -5303,7 +5369,8 @@ static int node_load[MAX_NUMNODES];
5303 * from each node to each node in the system), and should also prefer nodes 5369 * from each node to each node in the system), and should also prefer nodes
5304 * with no CPUs, since presumably they'll have very little allocation pressure 5370 * with no CPUs, since presumably they'll have very little allocation pressure
5305 * on them otherwise. 5371 * on them otherwise.
5306 * It returns -1 if no node is found. 5372 *
5373 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5307 */ 5374 */
5308static int find_next_best_node(int node, nodemask_t *used_node_mask) 5375static int find_next_best_node(int node, nodemask_t *used_node_mask)
5309{ 5376{
@@ -5609,7 +5676,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
5609 else 5676 else
5610 page_group_by_mobility_disabled = 0; 5677 page_group_by_mobility_disabled = 0;
5611 5678
5612 pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n", 5679 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5613 nr_online_nodes, 5680 nr_online_nodes,
5614 page_group_by_mobility_disabled ? "off" : "on", 5681 page_group_by_mobility_disabled ? "off" : "on",
5615 vm_total_pages); 5682 vm_total_pages);
@@ -6016,7 +6083,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn,
6016 return state->last_nid; 6083 return state->last_nid;
6017 6084
6018 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 6085 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
6019 if (nid != -1) { 6086 if (nid != NUMA_NO_NODE) {
6020 state->last_start = start_pfn; 6087 state->last_start = start_pfn;
6021 state->last_end = end_pfn; 6088 state->last_end = end_pfn;
6022 state->last_nid = nid; 6089 state->last_nid = nid;
@@ -6214,7 +6281,7 @@ unsigned long __init __absent_pages_in_range(int nid,
6214 * @start_pfn: The start PFN to start searching for holes 6281 * @start_pfn: The start PFN to start searching for holes
6215 * @end_pfn: The end PFN to stop searching for holes 6282 * @end_pfn: The end PFN to stop searching for holes
6216 * 6283 *
6217 * It returns the number of pages frames in memory holes within a range. 6284 * Return: the number of pages frames in memory holes within a range.
6218 */ 6285 */
6219unsigned long __init absent_pages_in_range(unsigned long start_pfn, 6286unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6220 unsigned long end_pfn) 6287 unsigned long end_pfn)
@@ -6376,10 +6443,14 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
6376{ 6443{
6377 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 6444 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6378 zone->pageblock_flags = NULL; 6445 zone->pageblock_flags = NULL;
6379 if (usemapsize) 6446 if (usemapsize) {
6380 zone->pageblock_flags = 6447 zone->pageblock_flags =
6381 memblock_alloc_node_nopanic(usemapsize, 6448 memblock_alloc_node_nopanic(usemapsize,
6382 pgdat->node_id); 6449 pgdat->node_id);
6450 if (!zone->pageblock_flags)
6451 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6452 usemapsize, zone->name, pgdat->node_id);
6453 }
6383} 6454}
6384#else 6455#else
6385static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 6456static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
@@ -6609,6 +6680,9 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6609 end = ALIGN(end, MAX_ORDER_NR_PAGES); 6680 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6610 size = (end - start) * sizeof(struct page); 6681 size = (end - start) * sizeof(struct page);
6611 map = memblock_alloc_node_nopanic(size, pgdat->node_id); 6682 map = memblock_alloc_node_nopanic(size, pgdat->node_id);
6683 if (!map)
6684 panic("Failed to allocate %ld bytes for node %d memory map\n",
6685 size, pgdat->node_id);
6612 pgdat->node_mem_map = map + offset; 6686 pgdat->node_mem_map = map + offset;
6613 } 6687 }
6614 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 6688 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
@@ -6764,14 +6838,14 @@ void __init setup_nr_node_ids(void)
6764 * model has fine enough granularity to avoid incorrect mapping for the 6838 * model has fine enough granularity to avoid incorrect mapping for the
6765 * populated node map. 6839 * populated node map.
6766 * 6840 *
6767 * Returns the determined alignment in pfn's. 0 if there is no alignment 6841 * Return: the determined alignment in pfn's. 0 if there is no alignment
6768 * requirement (single node). 6842 * requirement (single node).
6769 */ 6843 */
6770unsigned long __init node_map_pfn_alignment(void) 6844unsigned long __init node_map_pfn_alignment(void)
6771{ 6845{
6772 unsigned long accl_mask = 0, last_end = 0; 6846 unsigned long accl_mask = 0, last_end = 0;
6773 unsigned long start, end, mask; 6847 unsigned long start, end, mask;
6774 int last_nid = -1; 6848 int last_nid = NUMA_NO_NODE;
6775 int i, nid; 6849 int i, nid;
6776 6850
6777 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 6851 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
@@ -6819,7 +6893,7 @@ static unsigned long __init find_min_pfn_for_node(int nid)
6819/** 6893/**
6820 * find_min_pfn_with_active_regions - Find the minimum PFN registered 6894 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6821 * 6895 *
6822 * It returns the minimum PFN based on information provided via 6896 * Return: the minimum PFN based on information provided via
6823 * memblock_set_node(). 6897 * memblock_set_node().
6824 */ 6898 */
6825unsigned long __init find_min_pfn_with_active_regions(void) 6899unsigned long __init find_min_pfn_with_active_regions(void)
@@ -7267,7 +7341,6 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
7267 7341
7268 return pages; 7342 return pages;
7269} 7343}
7270EXPORT_SYMBOL(free_reserved_area);
7271 7344
7272#ifdef CONFIG_HIGHMEM 7345#ifdef CONFIG_HIGHMEM
7273void free_highmem_page(struct page *page) 7346void free_highmem_page(struct page *page)
@@ -7496,7 +7569,7 @@ static void __setup_per_zone_wmarks(void)
7496 * value here. 7569 * value here.
7497 * 7570 *
7498 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 7571 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7499 * deltas control asynch page reclaim, and so should 7572 * deltas control async page reclaim, and so should
7500 * not be capped for highmem. 7573 * not be capped for highmem.
7501 */ 7574 */
7502 unsigned long min_pages; 7575 unsigned long min_pages;
@@ -7973,7 +8046,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7973 8046
7974 /* 8047 /*
7975 * Hugepages are not in LRU lists, but they're movable. 8048 * Hugepages are not in LRU lists, but they're movable.
7976 * We need not scan over tail pages bacause we don't 8049 * We need not scan over tail pages because we don't
7977 * handle each tail page individually in migration. 8050 * handle each tail page individually in migration.
7978 */ 8051 */
7979 if (PageHuge(page)) { 8052 if (PageHuge(page)) {
@@ -8112,7 +8185,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
8112 * pageblocks in the range. Once isolated, the pageblocks should not 8185 * pageblocks in the range. Once isolated, the pageblocks should not
8113 * be modified by others. 8186 * be modified by others.
8114 * 8187 *
8115 * Returns zero on success or negative error code. On success all 8188 * Return: zero on success or negative error code. On success all
8116 * pages which PFN is in [start, end) are allocated for the caller and 8189 * pages which PFN is in [start, end) are allocated for the caller and
8117 * need to be freed with free_contig_range(). 8190 * need to be freed with free_contig_range().
8118 */ 8191 */
@@ -8196,7 +8269,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
8196 */ 8269 */
8197 8270
8198 lru_add_drain_all(); 8271 lru_add_drain_all();
8199 drain_all_pages(cc.zone);
8200 8272
8201 order = 0; 8273 order = 0;
8202 outer_start = start; 8274 outer_start = start;