aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:53 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commite010487dbe09d63cf916fd1b119d17abd0f48207 (patch)
tree37c7f36913daf4bc0a68a1d0ba1cc30ee0d4e307 /mm/page_alloc.c
parente12ba74d8ff3e2f73a583500d7095e406df4d093 (diff)
Group high-order atomic allocations
In rare cases, the kernel needs to allocate a high-order block of pages without sleeping. For example, this is the case with e1000 cards configured to use jumbo frames. Migrating or reclaiming pages in this situation is not an option. This patch groups these allocations together as much as possible by adding a new MIGRATE_TYPE. The MIGRATE_HIGHATOMIC type are exactly what they sound like. Care is taken that pages of other migrate types do not use the same blocks as high-order atomic allocations. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c36
1 files changed, 30 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 29f4de1423c9..03fef8d987f6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -170,10 +170,16 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
170 PB_migrate, PB_migrate_end); 170 PB_migrate, PB_migrate_end);
171} 171}
172 172
173static inline int gfpflags_to_migratetype(gfp_t gfp_flags) 173static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
174{ 174{
175 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 175 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
176 176
177 /* Cluster high-order atomic allocations together */
178 if (unlikely(order > 0) &&
179 (!(gfp_flags & __GFP_WAIT) || in_interrupt()))
180 return MIGRATE_HIGHATOMIC;
181
182 /* Cluster based on mobility */
177 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | 183 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
178 ((gfp_flags & __GFP_RECLAIMABLE) != 0); 184 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
179} 185}
@@ -188,7 +194,7 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
188{ 194{
189} 195}
190 196
191static inline int gfpflags_to_migratetype(gfp_t gfp_flags) 197static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
192{ 198{
193 return MIGRATE_UNMOVABLE; 199 return MIGRATE_UNMOVABLE;
194} 200}
@@ -679,9 +685,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
679 * the free lists for the desirable migrate type are depleted 685 * the free lists for the desirable migrate type are depleted
680 */ 686 */
681static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 687static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
682 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 688 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC },
683 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 689 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC },
684 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 690 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_HIGHATOMIC },
691 [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_MOVABLE},
685}; 692};
686 693
687/* 694/*
@@ -758,13 +765,24 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
758 int current_order; 765 int current_order;
759 struct page *page; 766 struct page *page;
760 int migratetype, i; 767 int migratetype, i;
768 int nonatomic_fallback_atomic = 0;
761 769
770retry:
762 /* Find the largest possible block of pages in the other list */ 771 /* Find the largest possible block of pages in the other list */
763 for (current_order = MAX_ORDER-1; current_order >= order; 772 for (current_order = MAX_ORDER-1; current_order >= order;
764 --current_order) { 773 --current_order) {
765 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 774 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
766 migratetype = fallbacks[start_migratetype][i]; 775 migratetype = fallbacks[start_migratetype][i];
767 776
777 /*
778 * Make it hard to fallback to blocks used for
779 * high-order atomic allocations
780 */
781 if (migratetype == MIGRATE_HIGHATOMIC &&
782 start_migratetype != MIGRATE_UNMOVABLE &&
783 !nonatomic_fallback_atomic)
784 continue;
785
768 area = &(zone->free_area[current_order]); 786 area = &(zone->free_area[current_order]);
769 if (list_empty(&area->free_list[migratetype])) 787 if (list_empty(&area->free_list[migratetype]))
770 continue; 788 continue;
@@ -797,6 +815,12 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
797 } 815 }
798 } 816 }
799 817
818 /* Allow fallback to high-order atomic blocks if memory is that low */
819 if (!nonatomic_fallback_atomic) {
820 nonatomic_fallback_atomic = 1;
821 goto retry;
822 }
823
800 return NULL; 824 return NULL;
801} 825}
802#else 826#else
@@ -1058,7 +1082,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
1058 struct page *page; 1082 struct page *page;
1059 int cold = !!(gfp_flags & __GFP_COLD); 1083 int cold = !!(gfp_flags & __GFP_COLD);
1060 int cpu; 1084 int cpu;
1061 int migratetype = gfpflags_to_migratetype(gfp_flags); 1085 int migratetype = allocflags_to_migratetype(gfp_flags, order);
1062 1086
1063again: 1087again:
1064 cpu = get_cpu(); 1088 cpu = get_cpu();