diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 77 |
1 files changed, 30 insertions, 47 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b74de6702e0..a8f2c87792c3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1405 | 1405 | ||
1406 | mt = get_pageblock_migratetype(page); | 1406 | mt = get_pageblock_migratetype(page); |
1407 | if (unlikely(mt != MIGRATE_ISOLATE)) | 1407 | if (unlikely(mt != MIGRATE_ISOLATE)) |
1408 | __mod_zone_freepage_state(zone, -(1UL << order), mt); | 1408 | __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); |
1409 | 1409 | ||
1410 | if (alloc_order != order) | 1410 | if (alloc_order != order) |
1411 | expand(zone, page, alloc_order, order, | 1411 | expand(zone, page, alloc_order, order, |
@@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1422 | } | 1422 | } |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | return 1UL << order; | 1425 | return 1UL << alloc_order; |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | /* | 1428 | /* |
@@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) | |||
2378 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); | 2378 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | /* Returns true if the allocation is likely for THP */ | ||
2382 | static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order) | ||
2383 | { | ||
2384 | if (order == pageblock_order && | ||
2385 | (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) | ||
2386 | return true; | ||
2387 | return false; | ||
2388 | } | ||
2389 | |||
2381 | static inline struct page * | 2390 | static inline struct page * |
2382 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | 2391 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
2383 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2392 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
@@ -2416,7 +2425,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2416 | goto nopage; | 2425 | goto nopage; |
2417 | 2426 | ||
2418 | restart: | 2427 | restart: |
2419 | wake_all_kswapd(order, zonelist, high_zoneidx, | 2428 | /* The decision whether to wake kswapd for THP is made later */ |
2429 | if (!is_thp_alloc(gfp_mask, order)) | ||
2430 | wake_all_kswapd(order, zonelist, high_zoneidx, | ||
2420 | zone_idx(preferred_zone)); | 2431 | zone_idx(preferred_zone)); |
2421 | 2432 | ||
2422 | /* | 2433 | /* |
@@ -2487,15 +2498,21 @@ rebalance: | |||
2487 | goto got_pg; | 2498 | goto got_pg; |
2488 | sync_migration = true; | 2499 | sync_migration = true; |
2489 | 2500 | ||
2490 | /* | 2501 | if (is_thp_alloc(gfp_mask, order)) { |
2491 | * If compaction is deferred for high-order allocations, it is because | 2502 | /* |
2492 | * sync compaction recently failed. In this is the case and the caller | 2503 | * If compaction is deferred for high-order allocations, it is |
2493 | * requested a movable allocation that does not heavily disrupt the | 2504 | * because sync compaction recently failed. If this is the case |
2494 | * system then fail the allocation instead of entering direct reclaim. | 2505 | * and the caller requested a movable allocation that does not |
2495 | */ | 2506 | * heavily disrupt the system then fail the allocation instead |
2496 | if ((deferred_compaction || contended_compaction) && | 2507 | * of entering direct reclaim. |
2497 | (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) | 2508 | */ |
2498 | goto nopage; | 2509 | if (deferred_compaction || contended_compaction) |
2510 | goto nopage; | ||
2511 | |||
2512 | /* If process is willing to reclaim/compact then wake kswapd */ | ||
2513 | wake_all_kswapd(order, zonelist, high_zoneidx, | ||
2514 | zone_idx(preferred_zone)); | ||
2515 | } | ||
2499 | 2516 | ||
2500 | /* Try direct reclaim and then allocating */ | 2517 | /* Try direct reclaim and then allocating */ |
2501 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 2518 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |
@@ -4505,7 +4522,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4505 | zone->zone_pgdat = pgdat; | 4522 | zone->zone_pgdat = pgdat; |
4506 | 4523 | ||
4507 | zone_pcp_init(zone); | 4524 | zone_pcp_init(zone); |
4508 | lruvec_init(&zone->lruvec, zone); | 4525 | lruvec_init(&zone->lruvec); |
4509 | if (!size) | 4526 | if (!size) |
4510 | continue; | 4527 | continue; |
4511 | 4528 | ||
@@ -6098,37 +6115,3 @@ void dump_page(struct page *page) | |||
6098 | dump_page_flags(page->flags); | 6115 | dump_page_flags(page->flags); |
6099 | mem_cgroup_print_bad_page(page); | 6116 | mem_cgroup_print_bad_page(page); |
6100 | } | 6117 | } |
6101 | |||
6102 | /* reset zone->present_pages */ | ||
6103 | void reset_zone_present_pages(void) | ||
6104 | { | ||
6105 | struct zone *z; | ||
6106 | int i, nid; | ||
6107 | |||
6108 | for_each_node_state(nid, N_HIGH_MEMORY) { | ||
6109 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
6110 | z = NODE_DATA(nid)->node_zones + i; | ||
6111 | z->present_pages = 0; | ||
6112 | } | ||
6113 | } | ||
6114 | } | ||
6115 | |||
6116 | /* calculate zone's present pages in buddy system */ | ||
6117 | void fixup_zone_present_pages(int nid, unsigned long start_pfn, | ||
6118 | unsigned long end_pfn) | ||
6119 | { | ||
6120 | struct zone *z; | ||
6121 | unsigned long zone_start_pfn, zone_end_pfn; | ||
6122 | int i; | ||
6123 | |||
6124 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
6125 | z = NODE_DATA(nid)->node_zones + i; | ||
6126 | zone_start_pfn = z->zone_start_pfn; | ||
6127 | zone_end_pfn = zone_start_pfn + z->spanned_pages; | ||
6128 | |||
6129 | /* if the two regions intersect */ | ||
6130 | if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn)) | ||
6131 | z->present_pages += min(end_pfn, zone_end_pfn) - | ||
6132 | max(start_pfn, zone_start_pfn); | ||
6133 | } | ||
6134 | } | ||