diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 36 |
1 files changed, 30 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 29f4de1423c9..03fef8d987f6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -170,10 +170,16 @@ static void set_pageblock_migratetype(struct page *page, int migratetype) | |||
170 | PB_migrate, PB_migrate_end); | 170 | PB_migrate, PB_migrate_end); |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | 173 | static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order) |
174 | { | 174 | { |
175 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | 175 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
176 | 176 | ||
177 | /* Cluster high-order atomic allocations together */ | ||
178 | if (unlikely(order > 0) && | ||
179 | (!(gfp_flags & __GFP_WAIT) || in_interrupt())) | ||
180 | return MIGRATE_HIGHATOMIC; | ||
181 | |||
182 | /* Cluster based on mobility */ | ||
177 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | 183 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | |
178 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 184 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); |
179 | } | 185 | } |
@@ -188,7 +194,7 @@ static void set_pageblock_migratetype(struct page *page, int migratetype) | |||
188 | { | 194 | { |
189 | } | 195 | } |
190 | 196 | ||
191 | static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | 197 | static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order) |
192 | { | 198 | { |
193 | return MIGRATE_UNMOVABLE; | 199 | return MIGRATE_UNMOVABLE; |
194 | } | 200 | } |
@@ -679,9 +685,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
679 | * the free lists for the desirable migrate type are depleted | 685 | * the free lists for the desirable migrate type are depleted |
680 | */ | 686 | */ |
681 | static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { | 687 | static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { |
682 | [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, | 688 | [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC }, |
683 | [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, | 689 | [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC }, |
684 | [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, | 690 | [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_HIGHATOMIC }, |
691 | [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_MOVABLE}, | ||
685 | }; | 692 | }; |
686 | 693 | ||
687 | /* | 694 | /* |
@@ -758,13 +765,24 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, | |||
758 | int current_order; | 765 | int current_order; |
759 | struct page *page; | 766 | struct page *page; |
760 | int migratetype, i; | 767 | int migratetype, i; |
768 | int nonatomic_fallback_atomic = 0; | ||
761 | 769 | ||
770 | retry: | ||
762 | /* Find the largest possible block of pages in the other list */ | 771 | /* Find the largest possible block of pages in the other list */ |
763 | for (current_order = MAX_ORDER-1; current_order >= order; | 772 | for (current_order = MAX_ORDER-1; current_order >= order; |
764 | --current_order) { | 773 | --current_order) { |
765 | for (i = 0; i < MIGRATE_TYPES - 1; i++) { | 774 | for (i = 0; i < MIGRATE_TYPES - 1; i++) { |
766 | migratetype = fallbacks[start_migratetype][i]; | 775 | migratetype = fallbacks[start_migratetype][i]; |
767 | 776 | ||
777 | /* | ||
778 | * Make it hard to fallback to blocks used for | ||
779 | * high-order atomic allocations | ||
780 | */ | ||
781 | if (migratetype == MIGRATE_HIGHATOMIC && | ||
782 | start_migratetype != MIGRATE_UNMOVABLE && | ||
783 | !nonatomic_fallback_atomic) | ||
784 | continue; | ||
785 | |||
768 | area = &(zone->free_area[current_order]); | 786 | area = &(zone->free_area[current_order]); |
769 | if (list_empty(&area->free_list[migratetype])) | 787 | if (list_empty(&area->free_list[migratetype])) |
770 | continue; | 788 | continue; |
@@ -797,6 +815,12 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, | |||
797 | } | 815 | } |
798 | } | 816 | } |
799 | 817 | ||
818 | /* Allow fallback to high-order atomic blocks if memory is that low */ | ||
819 | if (!nonatomic_fallback_atomic) { | ||
820 | nonatomic_fallback_atomic = 1; | ||
821 | goto retry; | ||
822 | } | ||
823 | |||
800 | return NULL; | 824 | return NULL; |
801 | } | 825 | } |
802 | #else | 826 | #else |
@@ -1058,7 +1082,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist, | |||
1058 | struct page *page; | 1082 | struct page *page; |
1059 | int cold = !!(gfp_flags & __GFP_COLD); | 1083 | int cold = !!(gfp_flags & __GFP_COLD); |
1060 | int cpu; | 1084 | int cpu; |
1061 | int migratetype = gfpflags_to_migratetype(gfp_flags); | 1085 | int migratetype = allocflags_to_migratetype(gfp_flags, order); |
1062 | 1086 | ||
1063 | again: | 1087 | again: |
1064 | cpu = get_cpu(); | 1088 | cpu = get_cpu(); |