diff options
author | Ming Ling <ming.ling@spreadtrum.com> | 2016-12-12 19:42:26 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-01-12 05:39:32 -0500 |
commit | 5d7d362abc408e69ee229459fba21f833f2f5bf1 (patch) | |
tree | a2036d2c05a26b9e90e9338e85ee47f48d187c9e /mm | |
parent | dc1b6d0aed97a5e7c95afd1a2eff20151f440513 (diff) |
mm, compaction: fix NR_ISOLATED_* stats for pfn based migration
commit 6afcf8ef0ca0a69d014f8edb613d94821f0ae700 upstream.
Since commit bda807d44454 ("mm: migrate: support non-lru movable page
migration") isolate_migratepages_block) can isolate !PageLRU pages which
would acct_isolated account as NR_ISOLATED_*. Accounting these non-lru
pages NR_ISOLATED_{ANON,FILE} doesn't make any sense and it can misguide
heuristics based on those counters such as pgdat_reclaimable_pages resp.
too_many_isolated which would lead to unexpected stalls during the
direct reclaim without any good reason. Note that
__alloc_contig_migrate_range can isolate a lot of pages at once.
On mobile devices such as 512M ram android Phone, it may use a big zram
swap. In some cases zram(zsmalloc) uses too many non-lru but
migratedable pages, such as:
MemTotal: 468148 kB
Normal free:5620kB
Free swap:4736kB
Total swap:409596kB
ZRAM: 164616kB(zsmalloc non-lru pages)
active_anon:60700kB
inactive_anon:60744kB
active_file:34420kB
inactive_file:37532kB
Fix this by only accounting lru pages to NR_ISOLATED_* in
isolate_migratepages_block right after they were isolated and we still
know they were on LRU. Drop acct_isolated because it is called after
the fact and we've lost that information. Batching per-cpu counter
doesn't make much improvement anyway. Also make sure that we uncharge
only LRU pages when putting them back on the LRU in
putback_movable_pages resp. when unmap_and_move migrates the page.
[mhocko@suse.com: replace acct_isolated() with direct counting]
Fixes: bda807d44454 ("mm: migrate: support non-lru movable page migration")
Link: http://lkml.kernel.org/r/20161019080240.9682-1-mhocko@kernel.org
Signed-off-by: Ming Ling <ming.ling@spreadtrum.com>
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 25 | ||||
-rw-r--r-- | mm/migrate.c | 15 |
2 files changed, 14 insertions, 26 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 0409a4ad6ea1..70e6bec46dc2 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -634,22 +634,6 @@ isolate_freepages_range(struct compact_control *cc, | |||
634 | return pfn; | 634 | return pfn; |
635 | } | 635 | } |
636 | 636 | ||
637 | /* Update the number of anon and file isolated pages in the zone */ | ||
638 | static void acct_isolated(struct zone *zone, struct compact_control *cc) | ||
639 | { | ||
640 | struct page *page; | ||
641 | unsigned int count[2] = { 0, }; | ||
642 | |||
643 | if (list_empty(&cc->migratepages)) | ||
644 | return; | ||
645 | |||
646 | list_for_each_entry(page, &cc->migratepages, lru) | ||
647 | count[!!page_is_file_cache(page)]++; | ||
648 | |||
649 | mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]); | ||
650 | mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]); | ||
651 | } | ||
652 | |||
653 | /* Similar to reclaim, but different enough that they don't share logic */ | 637 | /* Similar to reclaim, but different enough that they don't share logic */ |
654 | static bool too_many_isolated(struct zone *zone) | 638 | static bool too_many_isolated(struct zone *zone) |
655 | { | 639 | { |
@@ -866,6 +850,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
866 | 850 | ||
867 | /* Successfully isolated */ | 851 | /* Successfully isolated */ |
868 | del_page_from_lru_list(page, lruvec, page_lru(page)); | 852 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
853 | inc_node_page_state(page, | ||
854 | NR_ISOLATED_ANON + page_is_file_cache(page)); | ||
869 | 855 | ||
870 | isolate_success: | 856 | isolate_success: |
871 | list_add(&page->lru, &cc->migratepages); | 857 | list_add(&page->lru, &cc->migratepages); |
@@ -902,7 +888,6 @@ isolate_fail: | |||
902 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); | 888 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); |
903 | locked = false; | 889 | locked = false; |
904 | } | 890 | } |
905 | acct_isolated(zone, cc); | ||
906 | putback_movable_pages(&cc->migratepages); | 891 | putback_movable_pages(&cc->migratepages); |
907 | cc->nr_migratepages = 0; | 892 | cc->nr_migratepages = 0; |
908 | cc->last_migrated_pfn = 0; | 893 | cc->last_migrated_pfn = 0; |
@@ -988,7 +973,6 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, | |||
988 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) | 973 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
989 | break; | 974 | break; |
990 | } | 975 | } |
991 | acct_isolated(cc->zone, cc); | ||
992 | 976 | ||
993 | return pfn; | 977 | return pfn; |
994 | } | 978 | } |
@@ -1258,10 +1242,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
1258 | low_pfn = isolate_migratepages_block(cc, low_pfn, | 1242 | low_pfn = isolate_migratepages_block(cc, low_pfn, |
1259 | block_end_pfn, isolate_mode); | 1243 | block_end_pfn, isolate_mode); |
1260 | 1244 | ||
1261 | if (!low_pfn || cc->contended) { | 1245 | if (!low_pfn || cc->contended) |
1262 | acct_isolated(zone, cc); | ||
1263 | return ISOLATE_ABORT; | 1246 | return ISOLATE_ABORT; |
1264 | } | ||
1265 | 1247 | ||
1266 | /* | 1248 | /* |
1267 | * Either we isolated something and proceed with migration. Or | 1249 | * Either we isolated something and proceed with migration. Or |
@@ -1271,7 +1253,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
1271 | break; | 1253 | break; |
1272 | } | 1254 | } |
1273 | 1255 | ||
1274 | acct_isolated(zone, cc); | ||
1275 | /* Record where migration scanner will be restarted. */ | 1256 | /* Record where migration scanner will be restarted. */ |
1276 | cc->migrate_pfn = low_pfn; | 1257 | cc->migrate_pfn = low_pfn; |
1277 | 1258 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index 99250aee1ac1..66ce6b490b13 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -168,8 +168,6 @@ void putback_movable_pages(struct list_head *l) | |||
168 | continue; | 168 | continue; |
169 | } | 169 | } |
170 | list_del(&page->lru); | 170 | list_del(&page->lru); |
171 | dec_node_page_state(page, NR_ISOLATED_ANON + | ||
172 | page_is_file_cache(page)); | ||
173 | /* | 171 | /* |
174 | * We isolated non-lru movable page so here we can use | 172 | * We isolated non-lru movable page so here we can use |
175 | * __PageMovable because LRU page's mapping cannot have | 173 | * __PageMovable because LRU page's mapping cannot have |
@@ -186,6 +184,8 @@ void putback_movable_pages(struct list_head *l) | |||
186 | put_page(page); | 184 | put_page(page); |
187 | } else { | 185 | } else { |
188 | putback_lru_page(page); | 186 | putback_lru_page(page); |
187 | dec_node_page_state(page, NR_ISOLATED_ANON + | ||
188 | page_is_file_cache(page)); | ||
189 | } | 189 | } |
190 | } | 190 | } |
191 | } | 191 | } |
@@ -1121,8 +1121,15 @@ out: | |||
1121 | * restored. | 1121 | * restored. |
1122 | */ | 1122 | */ |
1123 | list_del(&page->lru); | 1123 | list_del(&page->lru); |
1124 | dec_node_page_state(page, NR_ISOLATED_ANON + | 1124 | |
1125 | page_is_file_cache(page)); | 1125 | /* |
1126 | * Compaction can migrate also non-LRU pages which are | ||
1127 | * not accounted to NR_ISOLATED_*. They can be recognized | ||
1128 | * as __PageMovable | ||
1129 | */ | ||
1130 | if (likely(!__PageMovable(page))) | ||
1131 | dec_node_page_state(page, NR_ISOLATED_ANON + | ||
1132 | page_is_file_cache(page)); | ||
1126 | } | 1133 | } |
1127 | 1134 | ||
1128 | /* | 1135 | /* |