summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c73
1 files changed, 69 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2e132b9e7a93..09bf2c5f8b4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -789,6 +789,57 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
789 return 0; 789 return 0;
790} 790}
791 791
792#ifdef CONFIG_COMPACTION
793static inline struct capture_control *task_capc(struct zone *zone)
794{
795 struct capture_control *capc = current->capture_control;
796
797 return capc &&
798 !(current->flags & PF_KTHREAD) &&
799 !capc->page &&
800 capc->cc->zone == zone &&
801 capc->cc->direct_compaction ? capc : NULL;
802}
803
804static inline bool
805compaction_capture(struct capture_control *capc, struct page *page,
806 int order, int migratetype)
807{
808 if (!capc || order != capc->cc->order)
809 return false;
810
811 /* Do not accidentally pollute CMA or isolated regions*/
812 if (is_migrate_cma(migratetype) ||
813 is_migrate_isolate(migratetype))
814 return false;
815
816 /*
817 * Do not let lower order allocations polluate a movable pageblock.
818 * This might let an unmovable request use a reclaimable pageblock
819 * and vice-versa but no more than normal fallback logic which can
820 * have trouble finding a high-order free page.
821 */
822 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
823 return false;
824
825 capc->page = page;
826 return true;
827}
828
829#else
830static inline struct capture_control *task_capc(struct zone *zone)
831{
832 return NULL;
833}
834
835static inline bool
836compaction_capture(struct capture_control *capc, struct page *page,
837 int order, int migratetype)
838{
839 return false;
840}
841#endif /* CONFIG_COMPACTION */
842
792/* 843/*
793 * Freeing function for a buddy system allocator. 844 * Freeing function for a buddy system allocator.
794 * 845 *
@@ -822,6 +873,7 @@ static inline void __free_one_page(struct page *page,
822 unsigned long uninitialized_var(buddy_pfn); 873 unsigned long uninitialized_var(buddy_pfn);
823 struct page *buddy; 874 struct page *buddy;
824 unsigned int max_order; 875 unsigned int max_order;
876 struct capture_control *capc = task_capc(zone);
825 877
826 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 878 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
827 879
@@ -837,6 +889,11 @@ static inline void __free_one_page(struct page *page,
837 889
838continue_merging: 890continue_merging:
839 while (order < max_order - 1) { 891 while (order < max_order - 1) {
892 if (compaction_capture(capc, page, order, migratetype)) {
893 __mod_zone_freepage_state(zone, -(1 << order),
894 migratetype);
895 return;
896 }
840 buddy_pfn = __find_buddy_pfn(pfn, order); 897 buddy_pfn = __find_buddy_pfn(pfn, order);
841 buddy = page + (buddy_pfn - pfn); 898 buddy = page + (buddy_pfn - pfn);
842 899
@@ -3710,7 +3767,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3710 unsigned int alloc_flags, const struct alloc_context *ac, 3767 unsigned int alloc_flags, const struct alloc_context *ac,
3711 enum compact_priority prio, enum compact_result *compact_result) 3768 enum compact_priority prio, enum compact_result *compact_result)
3712{ 3769{
3713 struct page *page; 3770 struct page *page = NULL;
3714 unsigned long pflags; 3771 unsigned long pflags;
3715 unsigned int noreclaim_flag; 3772 unsigned int noreclaim_flag;
3716 3773
@@ -3721,13 +3778,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3721 noreclaim_flag = memalloc_noreclaim_save(); 3778 noreclaim_flag = memalloc_noreclaim_save();
3722 3779
3723 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3780 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3724 prio); 3781 prio, &page);
3725 3782
3726 memalloc_noreclaim_restore(noreclaim_flag); 3783 memalloc_noreclaim_restore(noreclaim_flag);
3727 psi_memstall_leave(&pflags); 3784 psi_memstall_leave(&pflags);
3728 3785
3729 if (*compact_result <= COMPACT_INACTIVE) 3786 if (*compact_result <= COMPACT_INACTIVE) {
3787 WARN_ON_ONCE(page);
3730 return NULL; 3788 return NULL;
3789 }
3731 3790
3732 /* 3791 /*
3733 * At least in one zone compaction wasn't deferred or skipped, so let's 3792 * At least in one zone compaction wasn't deferred or skipped, so let's
@@ -3735,7 +3794,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3735 */ 3794 */
3736 count_vm_event(COMPACTSTALL); 3795 count_vm_event(COMPACTSTALL);
3737 3796
3738 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3797 /* Prep a captured page if available */
3798 if (page)
3799 prep_new_page(page, order, gfp_mask, alloc_flags);
3800
3801 /* Try get a page from the freelist if available */
3802 if (!page)
3803 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3739 3804
3740 if (page) { 3805 if (page) {
3741 struct zone *zone = page_zone(page); 3806 struct zone *zone = page_zone(page);