aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-02-22 18:45:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:41:30 -0500
commitf0958906cd2bf3730cd7938b8af80a1c23e8ac06 (patch)
tree15d54ac8207f998554d5a35e1273322ae52136ac
parentbc71226b0690c21bdf50c9c9d08c5dc9ef98764e (diff)
mm, vmscan: do not count freed pages as PGDEACTIVATE
PGDEACTIVATE represents the number of pages moved from the active list to the inactive list. At least this sounds like the original motivation of the counter. move_active_pages_to_lru, however, counts pages which got freed in the mean time as deactivated as well. This is a very rare event and counting them as deactivation in itself is not harmful but it makes the code more convoluted than necessary - we have to count both all pages and those which are freed which is a bit confusing. After this patch the PGDEACTIVATE should have a slightly more clear semantic and only count those pages which are moved from the active to the inactive list which is a plus. Link: http://lkml.kernel.org/r/20170112211221.17636-1-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index de400d1eac0e..277e105646a5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1878,7 +1878,6 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
1878 enum lru_list lru) 1878 enum lru_list lru)
1879{ 1879{
1880 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 1880 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1881 unsigned long pgmoved = 0;
1882 struct page *page; 1881 struct page *page;
1883 int nr_pages; 1882 int nr_pages;
1884 int nr_moved = 0; 1883 int nr_moved = 0;
@@ -1893,7 +1892,6 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
1893 nr_pages = hpage_nr_pages(page); 1892 nr_pages = hpage_nr_pages(page);
1894 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); 1893 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1895 list_move(&page->lru, &lruvec->lists[lru]); 1894 list_move(&page->lru, &lruvec->lists[lru]);
1896 pgmoved += nr_pages;
1897 1895
1898 if (put_page_testzero(page)) { 1896 if (put_page_testzero(page)) {
1899 __ClearPageLRU(page); 1897 __ClearPageLRU(page);
@@ -1913,7 +1911,7 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
1913 } 1911 }
1914 1912
1915 if (!is_active_lru(lru)) 1913 if (!is_active_lru(lru))
1916 __count_vm_events(PGDEACTIVATE, pgmoved); 1914 __count_vm_events(PGDEACTIVATE, nr_moved);
1917 1915
1918 return nr_moved; 1916 return nr_moved;
1919} 1917}