summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2017-05-03 17:52:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 18:52:08 -0400
commitc822f6223d03c2c5b026a21da09c6b6d523258cd (patch)
treeac2fdb0a28b2be80c1c1fd407d8a96b6155530e6
parent688035f729dcd9a98152c827338805a061f5c6fa (diff)
mm: delete NR_PAGES_SCANNED and pgdat_reclaimable()
NR_PAGES_SCANNED counts number of pages scanned since the last page free event in the allocator. This was used primarily to measure the reclaimability of zones and nodes, and determine when reclaim should give up on them. In that role, it has been replaced in the preceding patches by a different mechanism. Being implemented as an efficient vmstat counter, it was automatically exported to userspace as well. It's however unlikely that anyone outside the kernel is using this counter in any meaningful way. Remove the counter and the unused pgdat_reclaimable(). Link: http://lkml.kernel.org/r/20170228214007.5621-8-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Jia He <hejianet@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--mm/internal.h1
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/vmscan.c9
-rw-r--r--mm/vmstat.c22
5 files changed, 3 insertions, 41 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d2c50ab6ae40..04e0969966f6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -149,7 +149,6 @@ enum node_stat_item {
149 NR_UNEVICTABLE, /* " " " " " */ 149 NR_UNEVICTABLE, /* " " " " " */
150 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 150 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
151 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 151 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
152 NR_PAGES_SCANNED, /* pages scanned since last reclaim */
153 WORKINGSET_REFAULT, 152 WORKINGSET_REFAULT,
154 WORKINGSET_ACTIVATE, 153 WORKINGSET_ACTIVATE,
155 WORKINGSET_NODERECLAIM, 154 WORKINGSET_NODERECLAIM,
diff --git a/mm/internal.h b/mm/internal.h
index e5a0e0ec2177..a36719572eb9 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -91,7 +91,6 @@ extern unsigned long highest_memmap_pfn;
91 */ 91 */
92extern int isolate_lru_page(struct page *page); 92extern int isolate_lru_page(struct page *page);
93extern void putback_lru_page(struct page *page); 93extern void putback_lru_page(struct page *page);
94extern bool pgdat_reclaimable(struct pglist_data *pgdat);
95 94
96/* 95/*
97 * in mm/rmap.c: 96 * in mm/rmap.c:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 42c0543e46c3..6994f28f769c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1090,14 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1090{ 1090{
1091 int migratetype = 0; 1091 int migratetype = 0;
1092 int batch_free = 0; 1092 int batch_free = 0;
1093 unsigned long nr_scanned;
1094 bool isolated_pageblocks; 1093 bool isolated_pageblocks;
1095 1094
1096 spin_lock(&zone->lock); 1095 spin_lock(&zone->lock);
1097 isolated_pageblocks = has_isolate_pageblock(zone); 1096 isolated_pageblocks = has_isolate_pageblock(zone);
1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1099 if (nr_scanned)
1100 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1101 1097
1102 while (count) { 1098 while (count) {
1103 struct page *page; 1099 struct page *page;
@@ -1150,12 +1146,7 @@ static void free_one_page(struct zone *zone,
1150 unsigned int order, 1146 unsigned int order,
1151 int migratetype) 1147 int migratetype)
1152{ 1148{
1153 unsigned long nr_scanned;
1154 spin_lock(&zone->lock); 1149 spin_lock(&zone->lock);
1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1156 if (nr_scanned)
1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1158
1159 if (unlikely(has_isolate_pageblock(zone) || 1150 if (unlikely(has_isolate_pageblock(zone) ||
1160 is_migrate_isolate(migratetype))) { 1151 is_migrate_isolate(migratetype))) {
1161 migratetype = get_pfnblock_migratetype(page, pfn); 1152 migratetype = get_pfnblock_migratetype(page, pfn);
@@ -4504,7 +4495,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4504#endif 4495#endif
4505 " writeback_tmp:%lukB" 4496 " writeback_tmp:%lukB"
4506 " unstable:%lukB" 4497 " unstable:%lukB"
4507 " pages_scanned:%lu"
4508 " all_unreclaimable? %s" 4498 " all_unreclaimable? %s"
4509 "\n", 4499 "\n",
4510 pgdat->node_id, 4500 pgdat->node_id,
@@ -4527,7 +4517,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4527#endif 4517#endif
4528 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4518 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4529 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4519 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4530 node_page_state(pgdat, NR_PAGES_SCANNED),
4531 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 4520 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4532 "yes" : "no"); 4521 "yes" : "no");
4533 } 4522 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9117ae8d49ee..02f2eb51b33e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -230,12 +230,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
230 return nr; 230 return nr;
231} 231}
232 232
233bool pgdat_reclaimable(struct pglist_data *pgdat)
234{
235 return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
236 pgdat_reclaimable_pages(pgdat) * 6;
237}
238
239/** 233/**
240 * lruvec_lru_size - Returns the number of pages on the given LRU list. 234 * lruvec_lru_size - Returns the number of pages on the given LRU list.
241 * @lruvec: lru vector 235 * @lruvec: lru vector
@@ -1750,7 +1744,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1750 reclaim_stat->recent_scanned[file] += nr_taken; 1744 reclaim_stat->recent_scanned[file] += nr_taken;
1751 1745
1752 if (global_reclaim(sc)) { 1746 if (global_reclaim(sc)) {
1753 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1754 if (current_is_kswapd()) 1747 if (current_is_kswapd())
1755 __count_vm_events(PGSCAN_KSWAPD, nr_scanned); 1748 __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
1756 else 1749 else
@@ -1953,8 +1946,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
1953 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 1946 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1954 reclaim_stat->recent_scanned[file] += nr_taken; 1947 reclaim_stat->recent_scanned[file] += nr_taken;
1955 1948
1956 if (global_reclaim(sc))
1957 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1958 __count_vm_events(PGREFILL, nr_scanned); 1949 __count_vm_events(PGREFILL, nr_scanned);
1959 1950
1960 spin_unlock_irq(&pgdat->lru_lock); 1951 spin_unlock_irq(&pgdat->lru_lock);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index baee70dafba8..c8d15051616b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -954,7 +954,6 @@ const char * const vmstat_text[] = {
954 "nr_unevictable", 954 "nr_unevictable",
955 "nr_isolated_anon", 955 "nr_isolated_anon",
956 "nr_isolated_file", 956 "nr_isolated_file",
957 "nr_pages_scanned",
958 "workingset_refault", 957 "workingset_refault",
959 "workingset_activate", 958 "workingset_activate",
960 "workingset_nodereclaim", 959 "workingset_nodereclaim",
@@ -1378,7 +1377,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1378 "\n min %lu" 1377 "\n min %lu"
1379 "\n low %lu" 1378 "\n low %lu"
1380 "\n high %lu" 1379 "\n high %lu"
1381 "\n node_scanned %lu"
1382 "\n spanned %lu" 1380 "\n spanned %lu"
1383 "\n present %lu" 1381 "\n present %lu"
1384 "\n managed %lu", 1382 "\n managed %lu",
@@ -1386,7 +1384,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1386 min_wmark_pages(zone), 1384 min_wmark_pages(zone),
1387 low_wmark_pages(zone), 1385 low_wmark_pages(zone),
1388 high_wmark_pages(zone), 1386 high_wmark_pages(zone),
1389 node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
1390 zone->spanned_pages, 1387 zone->spanned_pages,
1391 zone->present_pages, 1388 zone->present_pages,
1392 zone->managed_pages); 1389 zone->managed_pages);
@@ -1586,22 +1583,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
1586 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 1583 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1587 val = atomic_long_read(&vm_zone_stat[i]); 1584 val = atomic_long_read(&vm_zone_stat[i]);
1588 if (val < 0) { 1585 if (val < 0) {
1589 switch (i) { 1586 pr_warn("%s: %s %ld\n",
1590 case NR_PAGES_SCANNED: 1587 __func__, vmstat_text[i], val);
1591 /* 1588 err = -EINVAL;
1592 * This is often seen to go negative in
1593 * recent kernels, but not to go permanently
1594 * negative. Whilst it would be nicer not to
1595 * have exceptions, rooting them out would be
1596 * another task, of rather low priority.
1597 */
1598 break;
1599 default:
1600 pr_warn("%s: %s %ld\n",
1601 __func__, vmstat_text[i], val);
1602 err = -EINVAL;
1603 break;
1604 }
1605 } 1589 }
1606 } 1590 }
1607 if (err) 1591 if (err)