aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c130
1 files changed, 54 insertions, 76 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 178bf9c2a2cb..2c25de46c58f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1091,14 +1091,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1091{ 1091{
1092 int migratetype = 0; 1092 int migratetype = 0;
1093 int batch_free = 0; 1093 int batch_free = 0;
1094 unsigned long nr_scanned, flags;
1095 bool isolated_pageblocks; 1094 bool isolated_pageblocks;
1096 1095
1097 spin_lock_irqsave(&zone->lock, flags); 1096 spin_lock(&zone->lock);
1098 isolated_pageblocks = has_isolate_pageblock(zone); 1097 isolated_pageblocks = has_isolate_pageblock(zone);
1099 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1100 if (nr_scanned)
1101 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1102 1098
1103 while (count) { 1099 while (count) {
1104 struct page *page; 1100 struct page *page;
@@ -1143,7 +1139,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1143 trace_mm_page_pcpu_drain(page, 0, mt); 1139 trace_mm_page_pcpu_drain(page, 0, mt);
1144 } while (--count && --batch_free && !list_empty(list)); 1140 } while (--count && --batch_free && !list_empty(list));
1145 } 1141 }
1146 spin_unlock_irqrestore(&zone->lock, flags); 1142 spin_unlock(&zone->lock);
1147} 1143}
1148 1144
1149static void free_one_page(struct zone *zone, 1145static void free_one_page(struct zone *zone,
@@ -1151,19 +1147,13 @@ static void free_one_page(struct zone *zone,
1151 unsigned int order, 1147 unsigned int order,
1152 int migratetype) 1148 int migratetype)
1153{ 1149{
1154 unsigned long nr_scanned, flags; 1150 spin_lock(&zone->lock);
1155 spin_lock_irqsave(&zone->lock, flags);
1156 __count_vm_events(PGFREE, 1 << order);
1157 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1158 if (nr_scanned)
1159 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1160
1161 if (unlikely(has_isolate_pageblock(zone) || 1151 if (unlikely(has_isolate_pageblock(zone) ||
1162 is_migrate_isolate(migratetype))) { 1152 is_migrate_isolate(migratetype))) {
1163 migratetype = get_pfnblock_migratetype(page, pfn); 1153 migratetype = get_pfnblock_migratetype(page, pfn);
1164 } 1154 }
1165 __free_one_page(page, pfn, zone, order, migratetype); 1155 __free_one_page(page, pfn, zone, order, migratetype);
1166 spin_unlock_irqrestore(&zone->lock, flags); 1156 spin_unlock(&zone->lock);
1167} 1157}
1168 1158
1169static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1159static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1241,6 +1231,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1241 1231
1242static void __free_pages_ok(struct page *page, unsigned int order) 1232static void __free_pages_ok(struct page *page, unsigned int order)
1243{ 1233{
1234 unsigned long flags;
1244 int migratetype; 1235 int migratetype;
1245 unsigned long pfn = page_to_pfn(page); 1236 unsigned long pfn = page_to_pfn(page);
1246 1237
@@ -1248,7 +1239,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1248 return; 1239 return;
1249 1240
1250 migratetype = get_pfnblock_migratetype(page, pfn); 1241 migratetype = get_pfnblock_migratetype(page, pfn);
1242 local_irq_save(flags);
1243 __count_vm_events(PGFREE, 1 << order);
1251 free_one_page(page_zone(page), page, pfn, order, migratetype); 1244 free_one_page(page_zone(page), page, pfn, order, migratetype);
1245 local_irq_restore(flags);
1252} 1246}
1253 1247
1254static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1248static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -1696,10 +1690,10 @@ static inline int check_new_page(struct page *page)
1696 return 1; 1690 return 1;
1697} 1691}
1698 1692
1699static inline bool free_pages_prezeroed(bool poisoned) 1693static inline bool free_pages_prezeroed(void)
1700{ 1694{
1701 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && 1695 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1702 page_poisoning_enabled() && poisoned; 1696 page_poisoning_enabled();
1703} 1697}
1704 1698
1705#ifdef CONFIG_DEBUG_VM 1699#ifdef CONFIG_DEBUG_VM
@@ -1753,17 +1747,10 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
1753 unsigned int alloc_flags) 1747 unsigned int alloc_flags)
1754{ 1748{
1755 int i; 1749 int i;
1756 bool poisoned = true;
1757
1758 for (i = 0; i < (1 << order); i++) {
1759 struct page *p = page + i;
1760 if (poisoned)
1761 poisoned &= page_is_poisoned(p);
1762 }
1763 1750
1764 post_alloc_hook(page, order, gfp_flags); 1751 post_alloc_hook(page, order, gfp_flags);
1765 1752
1766 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) 1753 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1767 for (i = 0; i < (1 << order); i++) 1754 for (i = 0; i < (1 << order); i++)
1768 clear_highpage(page + i); 1755 clear_highpage(page + i);
1769 1756
@@ -2043,8 +2030,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2043 2030
2044 /* Yoink! */ 2031 /* Yoink! */
2045 mt = get_pageblock_migratetype(page); 2032 mt = get_pageblock_migratetype(page);
2046 if (mt != MIGRATE_HIGHATOMIC && 2033 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2047 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { 2034 && !is_migrate_cma(mt)) {
2048 zone->nr_reserved_highatomic += pageblock_nr_pages; 2035 zone->nr_reserved_highatomic += pageblock_nr_pages;
2049 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2036 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2050 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 2037 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
@@ -2101,8 +2088,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2101 * from highatomic to ac->migratetype. So we should 2088 * from highatomic to ac->migratetype. So we should
2102 * adjust the count once. 2089 * adjust the count once.
2103 */ 2090 */
2104 if (get_pageblock_migratetype(page) == 2091 if (is_migrate_highatomic_page(page)) {
2105 MIGRATE_HIGHATOMIC) {
2106 /* 2092 /*
2107 * It should never happen but changes to 2093 * It should never happen but changes to
2108 * locking could inadvertently allow a per-cpu 2094 * locking could inadvertently allow a per-cpu
@@ -2159,8 +2145,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2159 2145
2160 page = list_first_entry(&area->free_list[fallback_mt], 2146 page = list_first_entry(&area->free_list[fallback_mt],
2161 struct page, lru); 2147 struct page, lru);
2162 if (can_steal && 2148 if (can_steal && !is_migrate_highatomic_page(page))
2163 get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
2164 steal_suitable_fallback(zone, page, start_migratetype); 2149 steal_suitable_fallback(zone, page, start_migratetype);
2165 2150
2166 /* Remove the page from the freelists */ 2151 /* Remove the page from the freelists */
@@ -2220,9 +2205,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2220 int migratetype, bool cold) 2205 int migratetype, bool cold)
2221{ 2206{
2222 int i, alloced = 0; 2207 int i, alloced = 0;
2223 unsigned long flags;
2224 2208
2225 spin_lock_irqsave(&zone->lock, flags); 2209 spin_lock(&zone->lock);
2226 for (i = 0; i < count; ++i) { 2210 for (i = 0; i < count; ++i) {
2227 struct page *page = __rmqueue(zone, order, migratetype); 2211 struct page *page = __rmqueue(zone, order, migratetype);
2228 if (unlikely(page == NULL)) 2212 if (unlikely(page == NULL))
@@ -2258,7 +2242,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2258 * pages added to the pcp list. 2242 * pages added to the pcp list.
2259 */ 2243 */
2260 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2244 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2261 spin_unlock_irqrestore(&zone->lock, flags); 2245 spin_unlock(&zone->lock);
2262 return alloced; 2246 return alloced;
2263} 2247}
2264 2248
@@ -2374,6 +2358,13 @@ void drain_all_pages(struct zone *zone)
2374 */ 2358 */
2375 static cpumask_t cpus_with_pcps; 2359 static cpumask_t cpus_with_pcps;
2376 2360
2361 /*
2362 * Make sure nobody triggers this path before mm_percpu_wq is fully
2363 * initialized.
2364 */
2365 if (WARN_ON_ONCE(!mm_percpu_wq))
2366 return;
2367
2377 /* Workqueues cannot recurse */ 2368 /* Workqueues cannot recurse */
2378 if (current->flags & PF_WQ_WORKER) 2369 if (current->flags & PF_WQ_WORKER)
2379 return; 2370 return;
@@ -2423,7 +2414,7 @@ void drain_all_pages(struct zone *zone)
2423 for_each_cpu(cpu, &cpus_with_pcps) { 2414 for_each_cpu(cpu, &cpus_with_pcps) {
2424 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2415 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2425 INIT_WORK(work, drain_local_pages_wq); 2416 INIT_WORK(work, drain_local_pages_wq);
2426 schedule_work_on(cpu, work); 2417 queue_work_on(cpu, mm_percpu_wq, work);
2427 } 2418 }
2428 for_each_cpu(cpu, &cpus_with_pcps) 2419 for_each_cpu(cpu, &cpus_with_pcps)
2429 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2420 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -2479,25 +2470,22 @@ void free_hot_cold_page(struct page *page, bool cold)
2479{ 2470{
2480 struct zone *zone = page_zone(page); 2471 struct zone *zone = page_zone(page);
2481 struct per_cpu_pages *pcp; 2472 struct per_cpu_pages *pcp;
2473 unsigned long flags;
2482 unsigned long pfn = page_to_pfn(page); 2474 unsigned long pfn = page_to_pfn(page);
2483 int migratetype; 2475 int migratetype;
2484 2476
2485 if (in_interrupt()) {
2486 __free_pages_ok(page, 0);
2487 return;
2488 }
2489
2490 if (!free_pcp_prepare(page)) 2477 if (!free_pcp_prepare(page))
2491 return; 2478 return;
2492 2479
2493 migratetype = get_pfnblock_migratetype(page, pfn); 2480 migratetype = get_pfnblock_migratetype(page, pfn);
2494 set_pcppage_migratetype(page, migratetype); 2481 set_pcppage_migratetype(page, migratetype);
2495 preempt_disable(); 2482 local_irq_save(flags);
2483 __count_vm_event(PGFREE);
2496 2484
2497 /* 2485 /*
2498 * We only track unmovable, reclaimable and movable on pcp lists. 2486 * We only track unmovable, reclaimable and movable on pcp lists.
2499 * Free ISOLATE pages back to the allocator because they are being 2487 * Free ISOLATE pages back to the allocator because they are being
2500 * offlined but treat RESERVE as movable pages so we can get those 2488 * offlined but treat HIGHATOMIC as movable pages so we can get those
2501 * areas back if necessary. Otherwise, we may have to free 2489 * areas back if necessary. Otherwise, we may have to free
2502 * excessively into the page allocator 2490 * excessively into the page allocator
2503 */ 2491 */
@@ -2509,7 +2497,6 @@ void free_hot_cold_page(struct page *page, bool cold)
2509 migratetype = MIGRATE_MOVABLE; 2497 migratetype = MIGRATE_MOVABLE;
2510 } 2498 }
2511 2499
2512 __count_vm_event(PGFREE);
2513 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2500 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2514 if (!cold) 2501 if (!cold)
2515 list_add(&page->lru, &pcp->lists[migratetype]); 2502 list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2523,7 +2510,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2523 } 2510 }
2524 2511
2525out: 2512out:
2526 preempt_enable(); 2513 local_irq_restore(flags);
2527} 2514}
2528 2515
2529/* 2516/*
@@ -2608,7 +2595,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
2608 for (; page < endpage; page += pageblock_nr_pages) { 2595 for (; page < endpage; page += pageblock_nr_pages) {
2609 int mt = get_pageblock_migratetype(page); 2596 int mt = get_pageblock_migratetype(page);
2610 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) 2597 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2611 && mt != MIGRATE_HIGHATOMIC) 2598 && !is_migrate_highatomic(mt))
2612 set_pageblock_migratetype(page, 2599 set_pageblock_migratetype(page,
2613 MIGRATE_MOVABLE); 2600 MIGRATE_MOVABLE);
2614 } 2601 }
@@ -2648,8 +2635,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2648{ 2635{
2649 struct page *page; 2636 struct page *page;
2650 2637
2651 VM_BUG_ON(in_interrupt());
2652
2653 do { 2638 do {
2654 if (list_empty(list)) { 2639 if (list_empty(list)) {
2655 pcp->count += rmqueue_bulk(zone, 0, 2640 pcp->count += rmqueue_bulk(zone, 0,
@@ -2680,8 +2665,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2680 struct list_head *list; 2665 struct list_head *list;
2681 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2666 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2682 struct page *page; 2667 struct page *page;
2668 unsigned long flags;
2683 2669
2684 preempt_disable(); 2670 local_irq_save(flags);
2685 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2671 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2686 list = &pcp->lists[migratetype]; 2672 list = &pcp->lists[migratetype];
2687 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2673 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
@@ -2689,7 +2675,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2689 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2675 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2690 zone_statistics(preferred_zone, zone); 2676 zone_statistics(preferred_zone, zone);
2691 } 2677 }
2692 preempt_enable(); 2678 local_irq_restore(flags);
2693 return page; 2679 return page;
2694} 2680}
2695 2681
@@ -2705,7 +2691,7 @@ struct page *rmqueue(struct zone *preferred_zone,
2705 unsigned long flags; 2691 unsigned long flags;
2706 struct page *page; 2692 struct page *page;
2707 2693
2708 if (likely(order == 0) && !in_interrupt()) { 2694 if (likely(order == 0)) {
2709 page = rmqueue_pcplist(preferred_zone, zone, order, 2695 page = rmqueue_pcplist(preferred_zone, zone, order,
2710 gfp_flags, migratetype); 2696 gfp_flags, migratetype);
2711 goto out; 2697 goto out;
@@ -3107,8 +3093,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3107 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL, 3093 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3108 DEFAULT_RATELIMIT_BURST); 3094 DEFAULT_RATELIMIT_BURST);
3109 3095
3110 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 3096 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3111 debug_guardpage_minorder() > 0)
3112 return; 3097 return;
3113 3098
3114 pr_warn("%s: ", current->comm); 3099 pr_warn("%s: ", current->comm);
@@ -3519,19 +3504,12 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3519} 3504}
3520 3505
3521/* 3506/*
3522 * Maximum number of reclaim retries without any progress before OOM killer
3523 * is consider as the only way to move forward.
3524 */
3525#define MAX_RECLAIM_RETRIES 16
3526
3527/*
3528 * Checks whether it makes sense to retry the reclaim to make a forward progress 3507 * Checks whether it makes sense to retry the reclaim to make a forward progress
3529 * for the given allocation request. 3508 * for the given allocation request.
3530 * The reclaim feedback represented by did_some_progress (any progress during 3509 *
3531 * the last reclaim round) and no_progress_loops (number of reclaim rounds without 3510 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3532 * any progress in a row) is considered as well as the reclaimable pages on the 3511 * without success, or when we couldn't even meet the watermark if we
3533 * applicable zone list (with a backoff mechanism which is a function of 3512 * reclaimed all remaining pages on the LRU lists.
3534 * no_progress_loops).
3535 * 3513 *
3536 * Returns true if a retry is viable or false to enter the oom path. 3514 * Returns true if a retry is viable or false to enter the oom path.
3537 */ 3515 */
@@ -3576,13 +3554,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3576 bool wmark; 3554 bool wmark;
3577 3555
3578 available = reclaimable = zone_reclaimable_pages(zone); 3556 available = reclaimable = zone_reclaimable_pages(zone);
3579 available -= DIV_ROUND_UP((*no_progress_loops) * available,
3580 MAX_RECLAIM_RETRIES);
3581 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3557 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3582 3558
3583 /* 3559 /*
3584 * Would the allocation succeed if we reclaimed the whole 3560 * Would the allocation succeed if we reclaimed all
3585 * available? 3561 * reclaimable pages?
3586 */ 3562 */
3587 wmark = __zone_watermark_ok(zone, order, min_wmark, 3563 wmark = __zone_watermark_ok(zone, order, min_wmark,
3588 ac_classzone_idx(ac), alloc_flags, available); 3564 ac_classzone_idx(ac), alloc_flags, available);
@@ -3768,7 +3744,7 @@ retry:
3768 3744
3769 /* Make sure we know about allocations which stall for too long */ 3745 /* Make sure we know about allocations which stall for too long */
3770 if (time_after(jiffies, alloc_start + stall_timeout)) { 3746 if (time_after(jiffies, alloc_start + stall_timeout)) {
3771 warn_alloc(gfp_mask, ac->nodemask, 3747 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
3772 "page allocation stalls for %ums, order:%u", 3748 "page allocation stalls for %ums, order:%u",
3773 jiffies_to_msecs(jiffies-alloc_start), order); 3749 jiffies_to_msecs(jiffies-alloc_start), order);
3774 stall_timeout += 10 * HZ; 3750 stall_timeout += 10 * HZ;
@@ -3968,10 +3944,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3968 goto out; 3944 goto out;
3969 3945
3970 /* 3946 /*
3971 * Runtime PM, block IO and its error handling path can deadlock 3947 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
3972 * because I/O on the device might not complete. 3948 * resp. GFP_NOIO which has to be inherited for all allocation requests
3949 * from a particular context which has been marked by
3950 * memalloc_no{fs,io}_{save,restore}.
3973 */ 3951 */
3974 alloc_mask = memalloc_noio_flags(gfp_mask); 3952 alloc_mask = current_gfp_context(gfp_mask);
3975 ac.spread_dirty_pages = false; 3953 ac.spread_dirty_pages = false;
3976 3954
3977 /* 3955 /*
@@ -4244,7 +4222,8 @@ EXPORT_SYMBOL(free_pages_exact);
4244 * nr_free_zone_pages() counts the number of counts pages which are beyond the 4222 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4245 * high watermark within all zones at or below a given zone index. For each 4223 * high watermark within all zones at or below a given zone index. For each
4246 * zone, the number of pages is calculated as: 4224 * zone, the number of pages is calculated as:
4247 * managed_pages - high_pages 4225 *
4226 * nr_free_zone_pages = managed_pages - high_pages
4248 */ 4227 */
4249static unsigned long nr_free_zone_pages(int offset) 4228static unsigned long nr_free_zone_pages(int offset)
4250{ 4229{
@@ -4506,7 +4485,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4506#endif 4485#endif
4507 " writeback_tmp:%lukB" 4486 " writeback_tmp:%lukB"
4508 " unstable:%lukB" 4487 " unstable:%lukB"
4509 " pages_scanned:%lu"
4510 " all_unreclaimable? %s" 4488 " all_unreclaimable? %s"
4511 "\n", 4489 "\n",
4512 pgdat->node_id, 4490 pgdat->node_id,
@@ -4520,17 +4498,17 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4520 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4498 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4521 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4499 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4522 K(node_page_state(pgdat, NR_WRITEBACK)), 4500 K(node_page_state(pgdat, NR_WRITEBACK)),
4501 K(node_page_state(pgdat, NR_SHMEM)),
4523#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4502#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4524 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4503 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4525 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4504 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4526 * HPAGE_PMD_NR), 4505 * HPAGE_PMD_NR),
4527 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4506 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4528#endif 4507#endif
4529 K(node_page_state(pgdat, NR_SHMEM)),
4530 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4508 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4531 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4509 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4532 node_page_state(pgdat, NR_PAGES_SCANNED), 4510 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4533 !pgdat_reclaimable(pgdat) ? "yes" : "no"); 4511 "yes" : "no");
4534 } 4512 }
4535 4513
4536 for_each_populated_zone(zone) { 4514 for_each_populated_zone(zone) {
@@ -7425,7 +7403,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7425 .zone = page_zone(pfn_to_page(start)), 7403 .zone = page_zone(pfn_to_page(start)),
7426 .mode = MIGRATE_SYNC, 7404 .mode = MIGRATE_SYNC,
7427 .ignore_skip_hint = true, 7405 .ignore_skip_hint = true,
7428 .gfp_mask = memalloc_noio_flags(gfp_mask), 7406 .gfp_mask = current_gfp_context(gfp_mask),
7429 }; 7407 };
7430 INIT_LIST_HEAD(&cc.migratepages); 7408 INIT_LIST_HEAD(&cc.migratepages);
7431 7409