diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2016-03-15 17:57:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 19:55:16 -0400 |
commit | 623446e4dc45b37740268165107cc63abb3022f0 (patch) | |
tree | a64c998a96634a0747febeb3affa6143dff6ecab /mm | |
parent | 5aa174801fc85b4b48a2520b2a50ee78990b7925 (diff) |
mm/compaction: fix invalid free_pfn and compact_cached_free_pfn
free_pfn and compact_cached_free_pfn are the pointer that remember
restart position of freepage scanner. When they are reset or invalid,
we set them to zone_end_pfn because freepage scanner works in reverse
direction. But, because zone range is defined as [zone_start_pfn,
zone_end_pfn), zone_end_pfn is invalid to access. Therefore, we should
not store it to free_pfn and compact_cached_free_pfn. Instead, we need
to store zone_end_pfn - 1 to them. There is one more thing we should
consider. Freepage scanner scan reversely by pageblock unit. If
free_pfn and compact_cached_free_pfn are set to middle of pageblock, it
regards that sitiation as that it already scans front part of pageblock
so we lose opportunity to scan there. To fix-up, this patch do
round_down() to guarantee that reset position will be pageblock aligned.
Note that thanks to the current pageblock_pfn_to_page() implementation,
actual access to zone_end_pfn doesn't happen until now. But, following
patch will change pageblock_pfn_to_page() so this patch is needed from
now on.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Aaron Lu <aaron.lu@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 585de54dbe8c..56fa3216a6b4 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -200,7 +200,8 @@ static void reset_cached_positions(struct zone *zone) | |||
200 | { | 200 | { |
201 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; | 201 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; |
202 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; | 202 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; |
203 | zone->compact_cached_free_pfn = zone_end_pfn(zone); | 203 | zone->compact_cached_free_pfn = |
204 | round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages); | ||
204 | } | 205 | } |
205 | 206 | ||
206 | /* | 207 | /* |
@@ -1371,11 +1372,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1371 | */ | 1372 | */ |
1372 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; | 1373 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
1373 | cc->free_pfn = zone->compact_cached_free_pfn; | 1374 | cc->free_pfn = zone->compact_cached_free_pfn; |
1374 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { | 1375 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { |
1375 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); | 1376 | cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages); |
1376 | zone->compact_cached_free_pfn = cc->free_pfn; | 1377 | zone->compact_cached_free_pfn = cc->free_pfn; |
1377 | } | 1378 | } |
1378 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { | 1379 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { |
1379 | cc->migrate_pfn = start_pfn; | 1380 | cc->migrate_pfn = start_pfn; |
1380 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; | 1381 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
1381 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; | 1382 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |