aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/pageblock-flags.h2
-rw-r--r--mm/page_alloc.c37
3 files changed, 10 insertions, 34 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 09b2c4f50e38..fef08c6cf75e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -36,9 +36,8 @@
36#define MIGRATE_UNMOVABLE 0 36#define MIGRATE_UNMOVABLE 0
37#define MIGRATE_RECLAIMABLE 1 37#define MIGRATE_RECLAIMABLE 1
38#define MIGRATE_MOVABLE 2 38#define MIGRATE_MOVABLE 2
39#define MIGRATE_HIGHATOMIC 3 39#define MIGRATE_RESERVE 3
40#define MIGRATE_RESERVE 4 40#define MIGRATE_TYPES 4
41#define MIGRATE_TYPES 5
42 41
43#define for_each_migratetype_order(order, type) \ 42#define for_each_migratetype_order(order, type) \
44 for (order = 0; order < MAX_ORDER; order++) \ 43 for (order = 0; order < MAX_ORDER; order++) \
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index fa3b1001894b..5456da6b4ade 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -31,7 +31,7 @@
31 31
32/* Bit indices that affect a whole block of pages */ 32/* Bit indices that affect a whole block of pages */
33enum pageblock_bits { 33enum pageblock_bits {
34 PB_range(PB_migrate, 3), /* 3 bits required for migrate types */ 34 PB_range(PB_migrate, 2), /* 2 bits required for migrate types */
35 NR_PAGEBLOCK_BITS 35 NR_PAGEBLOCK_BITS
36}; 36};
37 37
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8aec4d4601e7..ac8fc51825bb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -174,18 +174,13 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
174 PB_migrate, PB_migrate_end); 174 PB_migrate, PB_migrate_end);
175} 175}
176 176
177static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order) 177static inline int allocflags_to_migratetype(gfp_t gfp_flags)
178{ 178{
179 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 179 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
180 180
181 if (unlikely(page_group_by_mobility_disabled)) 181 if (unlikely(page_group_by_mobility_disabled))
182 return MIGRATE_UNMOVABLE; 182 return MIGRATE_UNMOVABLE;
183 183
184 /* Cluster high-order atomic allocations together */
185 if (unlikely(order > 0) &&
186 (!(gfp_flags & __GFP_WAIT) || in_interrupt()))
187 return MIGRATE_HIGHATOMIC;
188
189 /* Cluster based on mobility */ 184 /* Cluster based on mobility */
190 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | 185 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
191 ((gfp_flags & __GFP_RECLAIMABLE) != 0); 186 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
@@ -706,11 +701,10 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
706 * the free lists for the desirable migrate type are depleted 701 * the free lists for the desirable migrate type are depleted
707 */ 702 */
708static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 703static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
709 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE }, 704 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
710 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE }, 705 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
711 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE }, 706 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
712 [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 707 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
713 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
714}; 708};
715 709
716/* 710/*
@@ -804,9 +798,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
804 int current_order; 798 int current_order;
805 struct page *page; 799 struct page *page;
806 int migratetype, i; 800 int migratetype, i;
807 int nonatomic_fallback_atomic = 0;
808 801
809retry:
810 /* Find the largest possible block of pages in the other list */ 802 /* Find the largest possible block of pages in the other list */
811 for (current_order = MAX_ORDER-1; current_order >= order; 803 for (current_order = MAX_ORDER-1; current_order >= order;
812 --current_order) { 804 --current_order) {
@@ -816,14 +808,6 @@ retry:
816 /* MIGRATE_RESERVE handled later if necessary */ 808 /* MIGRATE_RESERVE handled later if necessary */
817 if (migratetype == MIGRATE_RESERVE) 809 if (migratetype == MIGRATE_RESERVE)
818 continue; 810 continue;
819 /*
820 * Make it hard to fallback to blocks used for
821 * high-order atomic allocations
822 */
823 if (migratetype == MIGRATE_HIGHATOMIC &&
824 start_migratetype != MIGRATE_UNMOVABLE &&
825 !nonatomic_fallback_atomic)
826 continue;
827 811
828 area = &(zone->free_area[current_order]); 812 area = &(zone->free_area[current_order]);
829 if (list_empty(&area->free_list[migratetype])) 813 if (list_empty(&area->free_list[migratetype]))
@@ -849,8 +833,7 @@ retry:
849 start_migratetype); 833 start_migratetype);
850 834
851 /* Claim the whole block if over half of it is free */ 835 /* Claim the whole block if over half of it is free */
852 if ((pages << current_order) >= (1 << (MAX_ORDER-2)) && 836 if ((pages << current_order) >= (1 << (MAX_ORDER-2)))
853 migratetype != MIGRATE_HIGHATOMIC)
854 set_pageblock_migratetype(page, 837 set_pageblock_migratetype(page,
855 start_migratetype); 838 start_migratetype);
856 839
@@ -872,12 +855,6 @@ retry:
872 } 855 }
873 } 856 }
874 857
875 /* Allow fallback to high-order atomic blocks if memory is that low */
876 if (!nonatomic_fallback_atomic) {
877 nonatomic_fallback_atomic = 1;
878 goto retry;
879 }
880
881 /* Use MIGRATE_RESERVE rather than fail an allocation */ 858 /* Use MIGRATE_RESERVE rather than fail an allocation */
882 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 859 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
883} 860}
@@ -1112,7 +1089,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
1112 struct page *page; 1089 struct page *page;
1113 int cold = !!(gfp_flags & __GFP_COLD); 1090 int cold = !!(gfp_flags & __GFP_COLD);
1114 int cpu; 1091 int cpu;
1115 int migratetype = allocflags_to_migratetype(gfp_flags, order); 1092 int migratetype = allocflags_to_migratetype(gfp_flags);
1116 1093
1117again: 1094again:
1118 cpu = get_cpu(); 1095 cpu = get_cpu();