diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 27 |
1 files changed, 19 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ff5484fdbdf9..3334a769eb91 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -670,6 +670,7 @@ out: | |||
670 | 670 | ||
671 | void free_compound_page(struct page *page) | 671 | void free_compound_page(struct page *page) |
672 | { | 672 | { |
673 | mem_cgroup_uncharge(page); | ||
673 | __free_pages_ok(page, compound_order(page)); | 674 | __free_pages_ok(page, compound_order(page)); |
674 | } | 675 | } |
675 | 676 | ||
@@ -3955,14 +3956,22 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, | |||
3955 | goto check_priority; | 3956 | goto check_priority; |
3956 | 3957 | ||
3957 | /* | 3958 | /* |
3959 | * compaction was skipped because there are not enough order-0 pages | ||
3960 | * to work with, so we retry only if it looks like reclaim can help. | ||
3961 | */ | ||
3962 | if (compaction_needs_reclaim(compact_result)) { | ||
3963 | ret = compaction_zonelist_suitable(ac, order, alloc_flags); | ||
3964 | goto out; | ||
3965 | } | ||
3966 | |||
3967 | /* | ||
3958 | * make sure the compaction wasn't deferred or didn't bail out early | 3968 | * make sure the compaction wasn't deferred or didn't bail out early |
3959 | * due to locks contention before we declare that we should give up. | 3969 | * due to locks contention before we declare that we should give up. |
3960 | * But do not retry if the given zonelist is not suitable for | 3970 | * But the next retry should use a higher priority if allowed, so |
3961 | * compaction. | 3971 | * we don't just keep bailing out endlessly. |
3962 | */ | 3972 | */ |
3963 | if (compaction_withdrawn(compact_result)) { | 3973 | if (compaction_withdrawn(compact_result)) { |
3964 | ret = compaction_zonelist_suitable(ac, order, alloc_flags); | 3974 | goto check_priority; |
3965 | goto out; | ||
3966 | } | 3975 | } |
3967 | 3976 | ||
3968 | /* | 3977 | /* |
@@ -6638,9 +6647,11 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages, | |||
6638 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 6647 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6639 | static void pgdat_init_split_queue(struct pglist_data *pgdat) | 6648 | static void pgdat_init_split_queue(struct pglist_data *pgdat) |
6640 | { | 6649 | { |
6641 | spin_lock_init(&pgdat->split_queue_lock); | 6650 | struct deferred_split *ds_queue = &pgdat->deferred_split_queue; |
6642 | INIT_LIST_HEAD(&pgdat->split_queue); | 6651 | |
6643 | pgdat->split_queue_len = 0; | 6652 | spin_lock_init(&ds_queue->split_queue_lock); |
6653 | INIT_LIST_HEAD(&ds_queue->split_queue); | ||
6654 | ds_queue->split_queue_len = 0; | ||
6644 | } | 6655 | } |
6645 | #else | 6656 | #else |
6646 | static void pgdat_init_split_queue(struct pglist_data *pgdat) {} | 6657 | static void pgdat_init_split_queue(struct pglist_data *pgdat) {} |
@@ -8196,7 +8207,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
8196 | if (!hugepage_migration_supported(page_hstate(head))) | 8207 | if (!hugepage_migration_supported(page_hstate(head))) |
8197 | goto unmovable; | 8208 | goto unmovable; |
8198 | 8209 | ||
8199 | skip_pages = (1 << compound_order(head)) - (page - head); | 8210 | skip_pages = compound_nr(head) - (page - head); |
8200 | iter += skip_pages - 1; | 8211 | iter += skip_pages - 1; |
8201 | continue; | 8212 | continue; |
8202 | } | 8213 | } |