diff options
-rw-r--r-- | include/linux/mmzone.h | 6 | ||||
-rw-r--r-- | init/Kconfig | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 51 |
3 files changed, 56 insertions, 14 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 57700038e669..7d7e4fe0fda8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -33,9 +33,15 @@ | |||
33 | */ | 33 | */ |
34 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 34 | #define PAGE_ALLOC_COSTLY_ORDER 3 |
35 | 35 | ||
36 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
36 | #define MIGRATE_UNMOVABLE 0 | 37 | #define MIGRATE_UNMOVABLE 0 |
37 | #define MIGRATE_MOVABLE 1 | 38 | #define MIGRATE_MOVABLE 1 |
38 | #define MIGRATE_TYPES 2 | 39 | #define MIGRATE_TYPES 2 |
40 | #else | ||
41 | #define MIGRATE_UNMOVABLE 0 | ||
42 | #define MIGRATE_MOVABLE 0 | ||
43 | #define MIGRATE_TYPES 1 | ||
44 | #endif | ||
39 | 45 | ||
40 | #define for_each_migratetype_order(order, type) \ | 46 | #define for_each_migratetype_order(order, type) \ |
41 | for (order = 0; order < MAX_ORDER; order++) \ | 47 | for (order = 0; order < MAX_ORDER; order++) \ |
diff --git a/init/Kconfig b/init/Kconfig index 54f31a191b88..bab643f7717f 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -607,6 +607,19 @@ config BASE_SMALL | |||
607 | default 0 if BASE_FULL | 607 | default 0 if BASE_FULL |
608 | default 1 if !BASE_FULL | 608 | default 1 if !BASE_FULL |
609 | 609 | ||
610 | config PAGE_GROUP_BY_MOBILITY | ||
611 | bool "Group pages based on their mobility in the page allocator" | ||
612 | def_bool y | ||
613 | help | ||
614 | The standard allocator will fragment memory over time which means | ||
615 | that high order allocations will fail even if kswapd is running. If | ||
616 | this option is set, the allocator will try and group page types | ||
617 | based on their ability to migrate or reclaim. This is a best effort | ||
618 | attempt at lowering fragmentation which a few workloads care about. | ||
619 | The loss is a more complex allocator that may perform slower. If | ||
620 | you are interested in working with large pages, say Y and set | ||
621 | /proc/sys/vm/min_free_bytes to 16374. Otherwise say N | ||
622 | |||
610 | menuconfig MODULES | 623 | menuconfig MODULES |
611 | bool "Enable loadable module support" | 624 | bool "Enable loadable module support" |
612 | help | 625 | help |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3e726bd2858..fea1e3b56c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -158,6 +158,7 @@ int nr_node_ids __read_mostly = MAX_NUMNODES; | |||
158 | EXPORT_SYMBOL(nr_node_ids); | 158 | EXPORT_SYMBOL(nr_node_ids); |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
161 | static inline int get_pageblock_migratetype(struct page *page) | 162 | static inline int get_pageblock_migratetype(struct page *page) |
162 | { | 163 | { |
163 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 164 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
@@ -174,6 +175,22 @@ static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | |||
174 | return ((gfp_flags & __GFP_MOVABLE) != 0); | 175 | return ((gfp_flags & __GFP_MOVABLE) != 0); |
175 | } | 176 | } |
176 | 177 | ||
178 | #else | ||
179 | static inline int get_pageblock_migratetype(struct page *page) | ||
180 | { | ||
181 | return MIGRATE_UNMOVABLE; | ||
182 | } | ||
183 | |||
184 | static void set_pageblock_migratetype(struct page *page, int migratetype) | ||
185 | { | ||
186 | } | ||
187 | |||
188 | static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | ||
189 | { | ||
190 | return MIGRATE_UNMOVABLE; | ||
191 | } | ||
192 | #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ | ||
193 | |||
177 | #ifdef CONFIG_DEBUG_VM | 194 | #ifdef CONFIG_DEBUG_VM |
178 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) | 195 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) |
179 | { | 196 | { |
@@ -653,6 +670,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
653 | return 0; | 670 | return 0; |
654 | } | 671 | } |
655 | 672 | ||
673 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
656 | /* | 674 | /* |
657 | * This array describes the order lists are fallen back to when | 675 | * This array describes the order lists are fallen back to when |
658 | * the free lists for the desirable migrate type are depleted | 676 | * the free lists for the desirable migrate type are depleted |
@@ -709,6 +727,13 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, | |||
709 | 727 | ||
710 | return NULL; | 728 | return NULL; |
711 | } | 729 | } |
730 | #else | ||
731 | static struct page *__rmqueue_fallback(struct zone *zone, int order, | ||
732 | int start_migratetype) | ||
733 | { | ||
734 | return NULL; | ||
735 | } | ||
736 | #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ | ||
712 | 737 | ||
713 | /* | 738 | /* |
714 | * Do the hard work of removing an element from the buddy allocator. | 739 | * Do the hard work of removing an element from the buddy allocator. |
@@ -953,27 +978,25 @@ again: | |||
953 | if (unlikely(!pcp->count)) | 978 | if (unlikely(!pcp->count)) |
954 | goto failed; | 979 | goto failed; |
955 | } | 980 | } |
981 | |||
982 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
956 | /* Find a page of the appropriate migrate type */ | 983 | /* Find a page of the appropriate migrate type */ |
957 | list_for_each_entry(page, &pcp->list, lru) { | 984 | list_for_each_entry(page, &pcp->list, lru) |
958 | if (page_private(page) == migratetype) { | 985 | if (page_private(page) == migratetype) |
959 | list_del(&page->lru); | ||
960 | pcp->count--; | ||
961 | break; | 986 | break; |
962 | } | ||
963 | } | ||
964 | 987 | ||
965 | /* | 988 | /* Allocate more to the pcp list if necessary */ |
966 | * Check if a page of the appropriate migrate type | 989 | if (unlikely(&page->lru == &pcp->list)) { |
967 | * was found. If not, allocate more to the pcp list | ||
968 | */ | ||
969 | if (&page->lru == &pcp->list) { | ||
970 | pcp->count += rmqueue_bulk(zone, 0, | 990 | pcp->count += rmqueue_bulk(zone, 0, |
971 | pcp->batch, &pcp->list, migratetype); | 991 | pcp->batch, &pcp->list, migratetype); |
972 | page = list_entry(pcp->list.next, struct page, lru); | 992 | page = list_entry(pcp->list.next, struct page, lru); |
973 | VM_BUG_ON(page_private(page) != migratetype); | ||
974 | list_del(&page->lru); | ||
975 | pcp->count--; | ||
976 | } | 993 | } |
994 | #else | ||
995 | page = list_entry(pcp->list.next, struct page, lru); | ||
996 | #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ | ||
997 | |||
998 | list_del(&page->lru); | ||
999 | pcp->count--; | ||
977 | } else { | 1000 | } else { |
978 | spin_lock_irqsave(&zone->lock, flags); | 1001 | spin_lock_irqsave(&zone->lock, flags); |
979 | page = __rmqueue(zone, order, migratetype); | 1002 | page = __rmqueue(zone, order, migratetype); |