aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 17:57:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit7cf91a98e607c2f935dbcc177d70011e95b8faff (patch)
tree8a57a26127dc9c96059ceedebc2cf13e5d124e3c /mm/compaction.c
parente1409c325fdc1fef7b3d8025c51892355f065d15 (diff)
mm/compaction: speed up pageblock_pfn_to_page() when zone is contiguous
There is a performance drop report due to hugepage allocation and in there half of cpu time are spent on pageblock_pfn_to_page() in compaction [1]. In that workload, compaction is triggered to make hugepage but most of pageblocks are un-available for compaction due to pageblock type and skip bit so compaction usually fails. Most costly operations in this case is to find valid pageblock while scanning whole zone range. To check if pageblock is valid to compact, valid pfn within pageblock is required and we can obtain it by calling pageblock_pfn_to_page(). This function checks whether pageblock is in a single zone and return valid pfn if possible. Problem is that we need to check it every time before scanning pageblock even if we re-visit it and this turns out to be very expensive in this workload. Although we have no way to skip this pageblock check in the system where hole exists at arbitrary position, we can use cached value for zone continuity and just do pfn_to_page() in the system where hole doesn't exist. This optimization considerably speeds up in above workload. Before vs After Max: 1096 MB/s vs 1325 MB/s Min: 635 MB/s 1015 MB/s Avg: 899 MB/s 1194 MB/s Avg is improved by roughly 30% [2]. [1]: http://www.spinics.net/lists/linux-mm/msg97378.html [2]: https://lkml.org/lkml/2015/12/9/23 [akpm@linux-foundation.org: don't forget to restore zone->contiguous on error path, per Vlastimil] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reported-by: Aaron Lu <aaron.lu@intel.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Aaron Lu <aaron.lu@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c43
1 files changed, 0 insertions, 43 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 8ce36ebc8d15..93f71d968098 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -71,49 +71,6 @@ static inline bool migrate_async_suitable(int migratetype)
71 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 71 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
72} 72}
73 73
74/*
75 * Check that the whole (or subset of) a pageblock given by the interval of
76 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
77 * with the migration of free compaction scanner. The scanners then need to
78 * use only pfn_valid_within() check for arches that allow holes within
79 * pageblocks.
80 *
81 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
82 *
83 * It's possible on some configurations to have a setup like node0 node1 node0
84 * i.e. it's possible that all pages within a zones range of pages do not
85 * belong to a single zone. We assume that a border between node0 and node1
86 * can occur within a single pageblock, but not a node0 node1 node0
87 * interleaving within a single pageblock. It is therefore sufficient to check
88 * the first and last page of a pageblock and avoid checking each individual
89 * page in a pageblock.
90 */
91static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
92 unsigned long end_pfn, struct zone *zone)
93{
94 struct page *start_page;
95 struct page *end_page;
96
97 /* end_pfn is one past the range we are checking */
98 end_pfn--;
99
100 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
101 return NULL;
102
103 start_page = pfn_to_page(start_pfn);
104
105 if (page_zone(start_page) != zone)
106 return NULL;
107
108 end_page = pfn_to_page(end_pfn);
109
110 /* This gives a shorter code than deriving page_zone(end_page) */
111 if (page_zone_id(start_page) != page_zone_id(end_page))
112 return NULL;
113
114 return start_page;
115}
116
117#ifdef CONFIG_COMPACTION 74#ifdef CONFIG_COMPACTION
118 75
119/* Do not skip compaction more than 64 times */ 76/* Do not skip compaction more than 64 times */