summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/tile/mm/pgtable.c8
-rw-r--r--drivers/base/node.c16
-rw-r--r--drivers/staging/android/lowmemorykiller.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c6
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/proc/meminfo.c16
-rw-r--r--include/linux/mmzone.h19
-rw-r--r--include/trace/events/writeback.h6
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/khugepaged.c6
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/page-writeback.c47
-rw-r--r--mm/page_alloc.c74
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c14
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c16
-rw-r--r--mm/vmstat.c19
24 files changed, 155 insertions, 162 deletions
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index edcf2a706942..598df5708501 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -102,7 +102,7 @@ static void appldata_get_mem_data(void *data)
102 mem_data->totalhigh = P2K(val.totalhigh); 102 mem_data->totalhigh = P2K(val.totalhigh);
103 mem_data->freehigh = P2K(val.freehigh); 103 mem_data->freehigh = P2K(val.freehigh);
104 mem_data->bufferram = P2K(val.bufferram); 104 mem_data->bufferram = P2K(val.bufferram);
105 mem_data->cached = P2K(global_page_state(NR_FILE_PAGES) 105 mem_data->cached = P2K(global_node_page_state(NR_FILE_PAGES)
106 - val.bufferram); 106 - val.bufferram);
107 107
108 si_swapinfo(&val); 108 si_swapinfo(&val);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index c606b0ef2f7e..7cc6ee7f1a58 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -49,16 +49,16 @@ void show_mem(unsigned int filter)
49 global_node_page_state(NR_ACTIVE_FILE)), 49 global_node_page_state(NR_ACTIVE_FILE)),
50 (global_node_page_state(NR_INACTIVE_ANON) + 50 (global_node_page_state(NR_INACTIVE_ANON) +
51 global_node_page_state(NR_INACTIVE_FILE)), 51 global_node_page_state(NR_INACTIVE_FILE)),
52 global_page_state(NR_FILE_DIRTY), 52 global_node_page_state(NR_FILE_DIRTY),
53 global_page_state(NR_WRITEBACK), 53 global_node_page_state(NR_WRITEBACK),
54 global_page_state(NR_UNSTABLE_NFS), 54 global_node_page_state(NR_UNSTABLE_NFS),
55 global_page_state(NR_FREE_PAGES), 55 global_page_state(NR_FREE_PAGES),
56 (global_page_state(NR_SLAB_RECLAIMABLE) + 56 (global_page_state(NR_SLAB_RECLAIMABLE) +
57 global_page_state(NR_SLAB_UNRECLAIMABLE)), 57 global_page_state(NR_SLAB_UNRECLAIMABLE)),
58 global_node_page_state(NR_FILE_MAPPED), 58 global_node_page_state(NR_FILE_MAPPED),
59 global_page_state(NR_PAGETABLE), 59 global_page_state(NR_PAGETABLE),
60 global_page_state(NR_BOUNCE), 60 global_page_state(NR_BOUNCE),
61 global_page_state(NR_FILE_PAGES), 61 global_node_page_state(NR_FILE_PAGES),
62 get_nr_swap_pages()); 62 get_nr_swap_pages());
63 63
64 for_each_zone(zone) { 64 for_each_zone(zone) {
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 6cd9ff43ee22..264cc214c4df 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -118,28 +118,28 @@ static ssize_t node_read_meminfo(struct device *dev,
118 "Node %d ShmemPmdMapped: %8lu kB\n" 118 "Node %d ShmemPmdMapped: %8lu kB\n"
119#endif 119#endif
120 , 120 ,
121 nid, K(sum_zone_node_page_state(nid, NR_FILE_DIRTY)), 121 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
122 nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK)), 122 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
123 nid, K(sum_zone_node_page_state(nid, NR_FILE_PAGES)), 123 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
124 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), 124 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
125 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), 125 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
126 nid, K(i.sharedram), 126 nid, K(i.sharedram),
127 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) * 127 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) *
128 THREAD_SIZE / 1024, 128 THREAD_SIZE / 1024,
129 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), 129 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
130 nid, K(sum_zone_node_page_state(nid, NR_UNSTABLE_NFS)), 130 nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
131 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), 131 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
132 nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK_TEMP)), 132 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
133 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) + 133 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
134 sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 134 sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
135 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)), 135 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
136#ifdef CONFIG_TRANSPARENT_HUGEPAGE 136#ifdef CONFIG_TRANSPARENT_HUGEPAGE
137 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 137 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
138 nid, K(sum_zone_node_page_state(nid, NR_ANON_THPS) * 138 nid, K(node_page_state(pgdat, NR_ANON_THPS) *
139 HPAGE_PMD_NR), 139 HPAGE_PMD_NR),
140 nid, K(sum_zone_node_page_state(nid, NR_SHMEM_THPS) * 140 nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
141 HPAGE_PMD_NR), 141 HPAGE_PMD_NR),
142 nid, K(sum_zone_node_page_state(nid, NR_SHMEM_PMDMAPPED) * 142 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
143 HPAGE_PMD_NR)); 143 HPAGE_PMD_NR));
144#else 144#else
145 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); 145 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 93dbcc38eb0f..45a1b4ec4ca3 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -91,8 +91,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
91 short selected_oom_score_adj; 91 short selected_oom_score_adj;
92 int array_size = ARRAY_SIZE(lowmem_adj); 92 int array_size = ARRAY_SIZE(lowmem_adj);
93 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 93 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
94 int other_file = global_page_state(NR_FILE_PAGES) - 94 int other_file = global_node_page_state(NR_FILE_PAGES) -
95 global_page_state(NR_SHMEM) - 95 global_node_page_state(NR_SHMEM) -
96 total_swapcache_pages(); 96 total_swapcache_pages();
97 97
98 if (lowmem_adj_size < array_size) 98 if (lowmem_adj_size < array_size)
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index d1a7d6beee60..d011135802d5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1864,7 +1864,8 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
1864 LASSERT(page_count >= 0); 1864 LASSERT(page_count >= 0);
1865 1865
1866 for (i = 0; i < page_count; i++) 1866 for (i = 0; i < page_count; i++)
1867 dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); 1867 dec_node_page_state(desc->bd_iov[i].kiov_page,
1868 NR_UNSTABLE_NFS);
1868 1869
1869 atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr); 1870 atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
1870 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); 1871 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
@@ -1898,7 +1899,8 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
1898 LASSERT(page_count >= 0); 1899 LASSERT(page_count >= 0);
1899 1900
1900 for (i = 0; i < page_count; i++) 1901 for (i = 0; i < page_count; i++)
1901 inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); 1902 inc_node_page_state(desc->bd_iov[i].kiov_page,
1903 NR_UNSTABLE_NFS);
1902 1904
1903 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); 1905 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
1904 atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr); 1906 atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6f9c9f6f5157..56c8fda436c0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1807,8 +1807,8 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
1807 */ 1807 */
1808static unsigned long get_nr_dirty_pages(void) 1808static unsigned long get_nr_dirty_pages(void)
1809{ 1809{
1810 return global_page_state(NR_FILE_DIRTY) + 1810 return global_node_page_state(NR_FILE_DIRTY) +
1811 global_page_state(NR_UNSTABLE_NFS) + 1811 global_node_page_state(NR_UNSTABLE_NFS) +
1812 get_nr_dirty_inodes(); 1812 get_nr_dirty_inodes();
1813} 1813}
1814 1814
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9154f8679024..2382f22a2a8b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1452,7 +1452,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1452 list_del(&req->writepages_entry); 1452 list_del(&req->writepages_entry);
1453 for (i = 0; i < req->num_pages; i++) { 1453 for (i = 0; i < req->num_pages; i++) {
1454 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1454 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1455 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); 1455 dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP);
1456 wb_writeout_inc(&bdi->wb); 1456 wb_writeout_inc(&bdi->wb);
1457 } 1457 }
1458 wake_up(&fi->page_waitq); 1458 wake_up(&fi->page_waitq);
@@ -1642,7 +1642,7 @@ static int fuse_writepage_locked(struct page *page)
1642 req->inode = inode; 1642 req->inode = inode;
1643 1643
1644 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1644 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1645 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1645 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
1646 1646
1647 spin_lock(&fc->lock); 1647 spin_lock(&fc->lock);
1648 list_add(&req->writepages_entry, &fi->writepages); 1648 list_add(&req->writepages_entry, &fi->writepages);
@@ -1756,7 +1756,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1756 spin_unlock(&fc->lock); 1756 spin_unlock(&fc->lock);
1757 1757
1758 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1758 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1759 dec_zone_page_state(page, NR_WRITEBACK_TEMP); 1759 dec_node_page_state(page, NR_WRITEBACK_TEMP);
1760 wb_writeout_inc(&bdi->wb); 1760 wb_writeout_inc(&bdi->wb);
1761 fuse_writepage_free(fc, new_req); 1761 fuse_writepage_free(fc, new_req);
1762 fuse_request_free(new_req); 1762 fuse_request_free(new_req);
@@ -1855,7 +1855,7 @@ static int fuse_writepages_fill(struct page *page,
1855 req->page_descs[req->num_pages].length = PAGE_SIZE; 1855 req->page_descs[req->num_pages].length = PAGE_SIZE;
1856 1856
1857 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1857 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1858 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1858 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
1859 1859
1860 err = 0; 1860 err = 0;
1861 if (is_writeback && fuse_writepage_in_flight(req, page)) { 1861 if (is_writeback && fuse_writepage_in_flight(req, page)) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5154fa65a2f2..5ea04d87fc65 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -623,7 +623,7 @@ void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
623 if (!cinfo->dreq) { 623 if (!cinfo->dreq) {
624 struct inode *inode = page_file_mapping(page)->host; 624 struct inode *inode = page_file_mapping(page)->host;
625 625
626 inc_zone_page_state(page, NR_UNSTABLE_NFS); 626 inc_node_page_state(page, NR_UNSTABLE_NFS);
627 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); 627 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
628 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 628 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
629 } 629 }
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e1c74d3db64d..593fa21a02c0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -898,7 +898,7 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
898static void 898static void
899nfs_clear_page_commit(struct page *page) 899nfs_clear_page_commit(struct page *page)
900{ 900{
901 dec_zone_page_state(page, NR_UNSTABLE_NFS); 901 dec_node_page_state(page, NR_UNSTABLE_NFS);
902 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 902 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
903 WB_RECLAIMABLE); 903 WB_RECLAIMABLE);
904} 904}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 40f108783d59..c1fdcc1a907a 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -40,7 +40,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
40 si_swapinfo(&i); 40 si_swapinfo(&i);
41 committed = percpu_counter_read_positive(&vm_committed_as); 41 committed = percpu_counter_read_positive(&vm_committed_as);
42 42
43 cached = global_page_state(NR_FILE_PAGES) - 43 cached = global_node_page_state(NR_FILE_PAGES) -
44 total_swapcache_pages() - i.bufferram; 44 total_swapcache_pages() - i.bufferram;
45 if (cached < 0) 45 if (cached < 0)
46 cached = 0; 46 cached = 0;
@@ -138,8 +138,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
138#endif 138#endif
139 K(i.totalswap), 139 K(i.totalswap),
140 K(i.freeswap), 140 K(i.freeswap),
141 K(global_page_state(NR_FILE_DIRTY)), 141 K(global_node_page_state(NR_FILE_DIRTY)),
142 K(global_page_state(NR_WRITEBACK)), 142 K(global_node_page_state(NR_WRITEBACK)),
143 K(global_node_page_state(NR_ANON_MAPPED)), 143 K(global_node_page_state(NR_ANON_MAPPED)),
144 K(global_node_page_state(NR_FILE_MAPPED)), 144 K(global_node_page_state(NR_FILE_MAPPED)),
145 K(i.sharedram), 145 K(i.sharedram),
@@ -152,9 +152,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
152#ifdef CONFIG_QUICKLIST 152#ifdef CONFIG_QUICKLIST
153 K(quicklist_total_size()), 153 K(quicklist_total_size()),
154#endif 154#endif
155 K(global_page_state(NR_UNSTABLE_NFS)), 155 K(global_node_page_state(NR_UNSTABLE_NFS)),
156 K(global_page_state(NR_BOUNCE)), 156 K(global_page_state(NR_BOUNCE)),
157 K(global_page_state(NR_WRITEBACK_TEMP)), 157 K(global_node_page_state(NR_WRITEBACK_TEMP)),
158 K(vm_commit_limit()), 158 K(vm_commit_limit()),
159 K(committed), 159 K(committed),
160 (unsigned long)VMALLOC_TOTAL >> 10, 160 (unsigned long)VMALLOC_TOTAL >> 10,
@@ -164,9 +164,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
164 , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) 164 , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
165#endif 165#endif
166#ifdef CONFIG_TRANSPARENT_HUGEPAGE 166#ifdef CONFIG_TRANSPARENT_HUGEPAGE
167 , K(global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR) 167 , K(global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR)
168 , K(global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR) 168 , K(global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR)
169 , K(global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR) 169 , K(global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR)
170#endif 170#endif
171#ifdef CONFIG_CMA 171#ifdef CONFIG_CMA
172 , K(totalcma_pages) 172 , K(totalcma_pages)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2d4a8804eafa..acd4665c3025 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -114,21 +114,16 @@ enum zone_stat_item {
114 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 114 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
115 NR_ZONE_LRU_ANON = NR_ZONE_LRU_BASE, 115 NR_ZONE_LRU_ANON = NR_ZONE_LRU_BASE,
116 NR_ZONE_LRU_FILE, 116 NR_ZONE_LRU_FILE,
117 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
117 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 118 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
118 NR_FILE_PAGES,
119 NR_FILE_DIRTY,
120 NR_WRITEBACK,
121 NR_SLAB_RECLAIMABLE, 119 NR_SLAB_RECLAIMABLE,
122 NR_SLAB_UNRECLAIMABLE, 120 NR_SLAB_UNRECLAIMABLE,
123 NR_PAGETABLE, /* used for pagetables */ 121 NR_PAGETABLE, /* used for pagetables */
124 NR_KERNEL_STACK, 122 NR_KERNEL_STACK,
125 /* Second 128 byte cacheline */ 123 /* Second 128 byte cacheline */
126 NR_UNSTABLE_NFS, /* NFS unstable pages */
127 NR_BOUNCE, 124 NR_BOUNCE,
128 NR_VMSCAN_WRITE, 125 NR_VMSCAN_WRITE,
129 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 126 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
130 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
131 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
132 NR_DIRTIED, /* page dirtyings since bootup */ 127 NR_DIRTIED, /* page dirtyings since bootup */
133 NR_WRITTEN, /* page writings since bootup */ 128 NR_WRITTEN, /* page writings since bootup */
134#if IS_ENABLED(CONFIG_ZSMALLOC) 129#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -142,9 +137,6 @@ enum zone_stat_item {
142 NUMA_LOCAL, /* allocation from local node */ 137 NUMA_LOCAL, /* allocation from local node */
143 NUMA_OTHER, /* allocation from other node */ 138 NUMA_OTHER, /* allocation from other node */
144#endif 139#endif
145 NR_ANON_THPS,
146 NR_SHMEM_THPS,
147 NR_SHMEM_PMDMAPPED,
148 NR_FREE_CMA_PAGES, 140 NR_FREE_CMA_PAGES,
149 NR_VM_ZONE_STAT_ITEMS }; 141 NR_VM_ZONE_STAT_ITEMS };
150 142
@@ -164,6 +156,15 @@ enum node_stat_item {
164 NR_ANON_MAPPED, /* Mapped anonymous pages */ 156 NR_ANON_MAPPED, /* Mapped anonymous pages */
165 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 157 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
166 only modified from process context */ 158 only modified from process context */
159 NR_FILE_PAGES,
160 NR_FILE_DIRTY,
161 NR_WRITEBACK,
162 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
163 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
164 NR_SHMEM_THPS,
165 NR_SHMEM_PMDMAPPED,
166 NR_ANON_THPS,
167 NR_UNSTABLE_NFS, /* NFS unstable pages */
167 NR_VM_NODE_STAT_ITEMS 168 NR_VM_NODE_STAT_ITEMS
168}; 169};
169 170
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 531f5811ff6b..ad20f2d2b1f9 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -412,9 +412,9 @@ TRACE_EVENT(global_dirty_state,
412 ), 412 ),
413 413
414 TP_fast_assign( 414 TP_fast_assign(
415 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY); 415 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
416 __entry->nr_writeback = global_page_state(NR_WRITEBACK); 416 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
417 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS); 417 __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
418 __entry->nr_dirtied = global_page_state(NR_DIRTIED); 418 __entry->nr_dirtied = global_page_state(NR_DIRTIED);
419 __entry->nr_written = global_page_state(NR_WRITTEN); 419 __entry->nr_written = global_page_state(NR_WRITTEN);
420 __entry->background_thresh = background_thresh; 420 __entry->background_thresh = background_thresh;
diff --git a/mm/filemap.c b/mm/filemap.c
index 7ec50bd6f88c..c5f5e46c6f7f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -218,11 +218,11 @@ void __delete_from_page_cache(struct page *page, void *shadow)
218 218
219 /* hugetlb pages do not participate in page cache accounting. */ 219 /* hugetlb pages do not participate in page cache accounting. */
220 if (!PageHuge(page)) 220 if (!PageHuge(page))
221 __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr); 221 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
222 if (PageSwapBacked(page)) { 222 if (PageSwapBacked(page)) {
223 __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr); 223 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
224 if (PageTransHuge(page)) 224 if (PageTransHuge(page))
225 __dec_zone_page_state(page, NR_SHMEM_THPS); 225 __dec_node_page_state(page, NR_SHMEM_THPS);
226 } else { 226 } else {
227 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); 227 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
228 } 228 }
@@ -568,9 +568,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
568 * hugetlb pages do not participate in page cache accounting. 568 * hugetlb pages do not participate in page cache accounting.
569 */ 569 */
570 if (!PageHuge(new)) 570 if (!PageHuge(new))
571 __inc_zone_page_state(new, NR_FILE_PAGES); 571 __inc_node_page_state(new, NR_FILE_PAGES);
572 if (PageSwapBacked(new)) 572 if (PageSwapBacked(new))
573 __inc_zone_page_state(new, NR_SHMEM); 573 __inc_node_page_state(new, NR_SHMEM);
574 spin_unlock_irqrestore(&mapping->tree_lock, flags); 574 spin_unlock_irqrestore(&mapping->tree_lock, flags);
575 mem_cgroup_migrate(old, new); 575 mem_cgroup_migrate(old, new);
576 radix_tree_preload_end(); 576 radix_tree_preload_end();
@@ -677,7 +677,7 @@ static int __add_to_page_cache_locked(struct page *page,
677 677
678 /* hugetlb pages do not participate in page cache accounting. */ 678 /* hugetlb pages do not participate in page cache accounting. */
679 if (!huge) 679 if (!huge)
680 __inc_zone_page_state(page, NR_FILE_PAGES); 680 __inc_node_page_state(page, NR_FILE_PAGES);
681 spin_unlock_irq(&mapping->tree_lock); 681 spin_unlock_irq(&mapping->tree_lock);
682 if (!huge) 682 if (!huge)
683 mem_cgroup_commit_charge(page, memcg, false, false); 683 mem_cgroup_commit_charge(page, memcg, false, false);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 481fb0128d21..121a7f808216 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1586,7 +1586,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1586 1586
1587 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 1587 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
1588 /* Last compound_mapcount is gone. */ 1588 /* Last compound_mapcount is gone. */
1589 __dec_zone_page_state(page, NR_ANON_THPS); 1589 __dec_node_page_state(page, NR_ANON_THPS);
1590 if (TestClearPageDoubleMap(page)) { 1590 if (TestClearPageDoubleMap(page)) {
1591 /* No need in mapcount reference anymore */ 1591 /* No need in mapcount reference anymore */
1592 for (i = 0; i < HPAGE_PMD_NR; i++) 1592 for (i = 0; i < HPAGE_PMD_NR; i++)
@@ -2061,7 +2061,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2061 list_del(page_deferred_list(head)); 2061 list_del(page_deferred_list(head));
2062 } 2062 }
2063 if (mapping) 2063 if (mapping)
2064 __dec_zone_page_state(page, NR_SHMEM_THPS); 2064 __dec_node_page_state(page, NR_SHMEM_THPS);
2065 spin_unlock(&pgdata->split_queue_lock); 2065 spin_unlock(&pgdata->split_queue_lock);
2066 __split_huge_page(page, list, flags); 2066 __split_huge_page(page, list, flags);
2067 ret = 0; 2067 ret = 0;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 374237bb059d..d03b14a6ef5e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1483,10 +1483,10 @@ tree_unlocked:
1483 } 1483 }
1484 1484
1485 local_irq_save(flags); 1485 local_irq_save(flags);
1486 __inc_zone_page_state(new_page, NR_SHMEM_THPS); 1486 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1487 if (nr_none) { 1487 if (nr_none) {
1488 __mod_zone_page_state(zone, NR_FILE_PAGES, nr_none); 1488 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1489 __mod_zone_page_state(zone, NR_SHMEM, nr_none); 1489 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1490 } 1490 }
1491 local_irq_restore(flags); 1491 local_irq_restore(flags);
1492 1492
diff --git a/mm/migrate.c b/mm/migrate.c
index fba770c54d84..ed0268268e93 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -505,15 +505,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
505 * are mapped to swap space. 505 * are mapped to swap space.
506 */ 506 */
507 if (newzone != oldzone) { 507 if (newzone != oldzone) {
508 __dec_zone_state(oldzone, NR_FILE_PAGES); 508 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
509 __inc_zone_state(newzone, NR_FILE_PAGES); 509 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
510 if (PageSwapBacked(page) && !PageSwapCache(page)) { 510 if (PageSwapBacked(page) && !PageSwapCache(page)) {
511 __dec_zone_state(oldzone, NR_SHMEM); 511 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
512 __inc_zone_state(newzone, NR_SHMEM); 512 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
513 } 513 }
514 if (dirty && mapping_cap_account_dirty(mapping)) { 514 if (dirty && mapping_cap_account_dirty(mapping)) {
515 __dec_zone_state(oldzone, NR_FILE_DIRTY); 515 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
516 __inc_zone_state(newzone, NR_FILE_DIRTY); 516 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
517 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
518 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
517 } 519 }
518 } 520 }
519 local_irq_enable(); 521 local_irq_enable();
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f7c0fb993fb9..f97591d9fa00 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -498,20 +498,12 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
498 */ 498 */
499bool node_dirty_ok(struct pglist_data *pgdat) 499bool node_dirty_ok(struct pglist_data *pgdat)
500{ 500{
501 int z;
502 unsigned long limit = node_dirty_limit(pgdat); 501 unsigned long limit = node_dirty_limit(pgdat);
503 unsigned long nr_pages = 0; 502 unsigned long nr_pages = 0;
504 503
505 for (z = 0; z < MAX_NR_ZONES; z++) { 504 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
506 struct zone *zone = pgdat->node_zones + z; 505 nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
507 506 nr_pages += node_page_state(pgdat, NR_WRITEBACK);
508 if (!populated_zone(zone))
509 continue;
510
511 nr_pages += zone_page_state(zone, NR_FILE_DIRTY);
512 nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS);
513 nr_pages += zone_page_state(zone, NR_WRITEBACK);
514 }
515 507
516 return nr_pages <= limit; 508 return nr_pages <= limit;
517} 509}
@@ -1601,10 +1593,10 @@ static void balance_dirty_pages(struct address_space *mapping,
1601 * written to the server's write cache, but has not yet 1593 * written to the server's write cache, but has not yet
1602 * been flushed to permanent storage. 1594 * been flushed to permanent storage.
1603 */ 1595 */
1604 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 1596 nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) +
1605 global_page_state(NR_UNSTABLE_NFS); 1597 global_node_page_state(NR_UNSTABLE_NFS);
1606 gdtc->avail = global_dirtyable_memory(); 1598 gdtc->avail = global_dirtyable_memory();
1607 gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); 1599 gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
1608 1600
1609 domain_dirty_limits(gdtc); 1601 domain_dirty_limits(gdtc);
1610 1602
@@ -1941,8 +1933,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
1941 * as we're trying to decide whether to put more under writeback. 1933 * as we're trying to decide whether to put more under writeback.
1942 */ 1934 */
1943 gdtc->avail = global_dirtyable_memory(); 1935 gdtc->avail = global_dirtyable_memory();
1944 gdtc->dirty = global_page_state(NR_FILE_DIRTY) + 1936 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) +
1945 global_page_state(NR_UNSTABLE_NFS); 1937 global_node_page_state(NR_UNSTABLE_NFS);
1946 domain_dirty_limits(gdtc); 1938 domain_dirty_limits(gdtc);
1947 1939
1948 if (gdtc->dirty > gdtc->bg_thresh) 1940 if (gdtc->dirty > gdtc->bg_thresh)
@@ -1986,8 +1978,8 @@ void throttle_vm_writeout(gfp_t gfp_mask)
1986 */ 1978 */
1987 dirty_thresh += dirty_thresh / 10; /* wheeee... */ 1979 dirty_thresh += dirty_thresh / 10; /* wheeee... */
1988 1980
1989 if (global_page_state(NR_UNSTABLE_NFS) + 1981 if (global_node_page_state(NR_UNSTABLE_NFS) +
1990 global_page_state(NR_WRITEBACK) <= dirty_thresh) 1982 global_node_page_state(NR_WRITEBACK) <= dirty_thresh)
1991 break; 1983 break;
1992 congestion_wait(BLK_RW_ASYNC, HZ/10); 1984 congestion_wait(BLK_RW_ASYNC, HZ/10);
1993 1985
@@ -2015,8 +2007,8 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
2015void laptop_mode_timer_fn(unsigned long data) 2007void laptop_mode_timer_fn(unsigned long data)
2016{ 2008{
2017 struct request_queue *q = (struct request_queue *)data; 2009 struct request_queue *q = (struct request_queue *)data;
2018 int nr_pages = global_page_state(NR_FILE_DIRTY) + 2010 int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
2019 global_page_state(NR_UNSTABLE_NFS); 2011 global_node_page_state(NR_UNSTABLE_NFS);
2020 struct bdi_writeback *wb; 2012 struct bdi_writeback *wb;
2021 2013
2022 /* 2014 /*
@@ -2467,7 +2459,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2467 wb = inode_to_wb(inode); 2459 wb = inode_to_wb(inode);
2468 2460
2469 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2461 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2470 __inc_zone_page_state(page, NR_FILE_DIRTY); 2462 __inc_node_page_state(page, NR_FILE_DIRTY);
2463 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2471 __inc_zone_page_state(page, NR_DIRTIED); 2464 __inc_zone_page_state(page, NR_DIRTIED);
2472 __inc_wb_stat(wb, WB_RECLAIMABLE); 2465 __inc_wb_stat(wb, WB_RECLAIMABLE);
2473 __inc_wb_stat(wb, WB_DIRTIED); 2466 __inc_wb_stat(wb, WB_DIRTIED);
@@ -2488,7 +2481,8 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2488{ 2481{
2489 if (mapping_cap_account_dirty(mapping)) { 2482 if (mapping_cap_account_dirty(mapping)) {
2490 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2483 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2491 dec_zone_page_state(page, NR_FILE_DIRTY); 2484 dec_node_page_state(page, NR_FILE_DIRTY);
2485 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2492 dec_wb_stat(wb, WB_RECLAIMABLE); 2486 dec_wb_stat(wb, WB_RECLAIMABLE);
2493 task_io_account_cancelled_write(PAGE_SIZE); 2487 task_io_account_cancelled_write(PAGE_SIZE);
2494 } 2488 }
@@ -2744,7 +2738,8 @@ int clear_page_dirty_for_io(struct page *page)
2744 wb = unlocked_inode_to_wb_begin(inode, &locked); 2738 wb = unlocked_inode_to_wb_begin(inode, &locked);
2745 if (TestClearPageDirty(page)) { 2739 if (TestClearPageDirty(page)) {
2746 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2740 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2747 dec_zone_page_state(page, NR_FILE_DIRTY); 2741 dec_node_page_state(page, NR_FILE_DIRTY);
2742 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2748 dec_wb_stat(wb, WB_RECLAIMABLE); 2743 dec_wb_stat(wb, WB_RECLAIMABLE);
2749 ret = 1; 2744 ret = 1;
2750 } 2745 }
@@ -2790,7 +2785,8 @@ int test_clear_page_writeback(struct page *page)
2790 } 2785 }
2791 if (ret) { 2786 if (ret) {
2792 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2787 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2793 dec_zone_page_state(page, NR_WRITEBACK); 2788 dec_node_page_state(page, NR_WRITEBACK);
2789 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2794 inc_zone_page_state(page, NR_WRITTEN); 2790 inc_zone_page_state(page, NR_WRITTEN);
2795 } 2791 }
2796 unlock_page_memcg(page); 2792 unlock_page_memcg(page);
@@ -2844,7 +2840,8 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2844 } 2840 }
2845 if (!ret) { 2841 if (!ret) {
2846 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2842 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2847 inc_zone_page_state(page, NR_WRITEBACK); 2843 inc_node_page_state(page, NR_WRITEBACK);
2844 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2848 } 2845 }
2849 unlock_page_memcg(page); 2846 unlock_page_memcg(page);
2850 return ret; 2847 return ret;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c11935bf37cb..0f92e04b58db 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3492,14 +3492,12 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3492 * prevent from pre mature OOM 3492 * prevent from pre mature OOM
3493 */ 3493 */
3494 if (!did_some_progress) { 3494 if (!did_some_progress) {
3495 unsigned long writeback; 3495 unsigned long write_pending;
3496 unsigned long dirty;
3497 3496
3498 writeback = zone_page_state_snapshot(zone, 3497 write_pending = zone_page_state_snapshot(zone,
3499 NR_WRITEBACK); 3498 NR_ZONE_WRITE_PENDING);
3500 dirty = zone_page_state_snapshot(zone, NR_FILE_DIRTY);
3501 3499
3502 if (2*(writeback + dirty) > reclaimable) { 3500 if (2 * write_pending > reclaimable) {
3503 congestion_wait(BLK_RW_ASYNC, HZ/10); 3501 congestion_wait(BLK_RW_ASYNC, HZ/10);
3504 return true; 3502 return true;
3505 } 3503 }
@@ -4175,7 +4173,7 @@ EXPORT_SYMBOL_GPL(si_mem_available);
4175void si_meminfo(struct sysinfo *val) 4173void si_meminfo(struct sysinfo *val)
4176{ 4174{
4177 val->totalram = totalram_pages; 4175 val->totalram = totalram_pages;
4178 val->sharedram = global_page_state(NR_SHMEM); 4176 val->sharedram = global_node_page_state(NR_SHMEM);
4179 val->freeram = global_page_state(NR_FREE_PAGES); 4177 val->freeram = global_page_state(NR_FREE_PAGES);
4180 val->bufferram = nr_blockdev_pages(); 4178 val->bufferram = nr_blockdev_pages();
4181 val->totalhigh = totalhigh_pages; 4179 val->totalhigh = totalhigh_pages;
@@ -4197,7 +4195,7 @@ void si_meminfo_node(struct sysinfo *val, int nid)
4197 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 4195 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4198 managed_pages += pgdat->node_zones[zone_type].managed_pages; 4196 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4199 val->totalram = managed_pages; 4197 val->totalram = managed_pages;
4200 val->sharedram = sum_zone_node_page_state(nid, NR_SHMEM); 4198 val->sharedram = node_page_state(pgdat, NR_SHMEM);
4201 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 4199 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4202#ifdef CONFIG_HIGHMEM 4200#ifdef CONFIG_HIGHMEM
4203 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 4201 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
@@ -4296,9 +4294,6 @@ void show_free_areas(unsigned int filter)
4296 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 4294 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4297 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 4295 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4298 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 4296 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4299#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4300 " anon_thp: %lu shmem_thp: %lu shmem_pmdmapped: %lu\n"
4301#endif
4302 " free:%lu free_pcp:%lu free_cma:%lu\n", 4297 " free:%lu free_pcp:%lu free_cma:%lu\n",
4303 global_node_page_state(NR_ACTIVE_ANON), 4298 global_node_page_state(NR_ACTIVE_ANON),
4304 global_node_page_state(NR_INACTIVE_ANON), 4299 global_node_page_state(NR_INACTIVE_ANON),
@@ -4307,20 +4302,15 @@ void show_free_areas(unsigned int filter)
4307 global_node_page_state(NR_INACTIVE_FILE), 4302 global_node_page_state(NR_INACTIVE_FILE),
4308 global_node_page_state(NR_ISOLATED_FILE), 4303 global_node_page_state(NR_ISOLATED_FILE),
4309 global_node_page_state(NR_UNEVICTABLE), 4304 global_node_page_state(NR_UNEVICTABLE),
4310 global_page_state(NR_FILE_DIRTY), 4305 global_node_page_state(NR_FILE_DIRTY),
4311 global_page_state(NR_WRITEBACK), 4306 global_node_page_state(NR_WRITEBACK),
4312 global_page_state(NR_UNSTABLE_NFS), 4307 global_node_page_state(NR_UNSTABLE_NFS),
4313 global_page_state(NR_SLAB_RECLAIMABLE), 4308 global_page_state(NR_SLAB_RECLAIMABLE),
4314 global_page_state(NR_SLAB_UNRECLAIMABLE), 4309 global_page_state(NR_SLAB_UNRECLAIMABLE),
4315 global_node_page_state(NR_FILE_MAPPED), 4310 global_node_page_state(NR_FILE_MAPPED),
4316 global_page_state(NR_SHMEM), 4311 global_node_page_state(NR_SHMEM),
4317 global_page_state(NR_PAGETABLE), 4312 global_page_state(NR_PAGETABLE),
4318 global_page_state(NR_BOUNCE), 4313 global_page_state(NR_BOUNCE),
4319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4320 global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR,
4321 global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR,
4322 global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR,
4323#endif
4324 global_page_state(NR_FREE_PAGES), 4314 global_page_state(NR_FREE_PAGES),
4325 free_pcp, 4315 free_pcp,
4326 global_page_state(NR_FREE_CMA_PAGES)); 4316 global_page_state(NR_FREE_CMA_PAGES));
@@ -4335,6 +4325,16 @@ void show_free_areas(unsigned int filter)
4335 " isolated(anon):%lukB" 4325 " isolated(anon):%lukB"
4336 " isolated(file):%lukB" 4326 " isolated(file):%lukB"
4337 " mapped:%lukB" 4327 " mapped:%lukB"
4328 " dirty:%lukB"
4329 " writeback:%lukB"
4330 " shmem:%lukB"
4331#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4332 " shmem_thp: %lukB"
4333 " shmem_pmdmapped: %lukB"
4334 " anon_thp: %lukB"
4335#endif
4336 " writeback_tmp:%lukB"
4337 " unstable:%lukB"
4338 " all_unreclaimable? %s" 4338 " all_unreclaimable? %s"
4339 "\n", 4339 "\n",
4340 pgdat->node_id, 4340 pgdat->node_id,
@@ -4346,6 +4346,17 @@ void show_free_areas(unsigned int filter)
4346 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 4346 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4347 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 4347 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4348 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4348 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4349 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4350 K(node_page_state(pgdat, NR_WRITEBACK)),
4351#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4352 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4353 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4354 * HPAGE_PMD_NR),
4355 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4356#endif
4357 K(node_page_state(pgdat, NR_SHMEM)),
4358 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4359 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4349 !pgdat_reclaimable(pgdat) ? "yes" : "no"); 4360 !pgdat_reclaimable(pgdat) ? "yes" : "no");
4350 } 4361 }
4351 4362
@@ -4368,24 +4379,14 @@ void show_free_areas(unsigned int filter)
4368 " present:%lukB" 4379 " present:%lukB"
4369 " managed:%lukB" 4380 " managed:%lukB"
4370 " mlocked:%lukB" 4381 " mlocked:%lukB"
4371 " dirty:%lukB"
4372 " writeback:%lukB"
4373 " shmem:%lukB"
4374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4375 " shmem_thp: %lukB"
4376 " shmem_pmdmapped: %lukB"
4377 " anon_thp: %lukB"
4378#endif
4379 " slab_reclaimable:%lukB" 4382 " slab_reclaimable:%lukB"
4380 " slab_unreclaimable:%lukB" 4383 " slab_unreclaimable:%lukB"
4381 " kernel_stack:%lukB" 4384 " kernel_stack:%lukB"
4382 " pagetables:%lukB" 4385 " pagetables:%lukB"
4383 " unstable:%lukB"
4384 " bounce:%lukB" 4386 " bounce:%lukB"
4385 " free_pcp:%lukB" 4387 " free_pcp:%lukB"
4386 " local_pcp:%ukB" 4388 " local_pcp:%ukB"
4387 " free_cma:%lukB" 4389 " free_cma:%lukB"
4388 " writeback_tmp:%lukB"
4389 " node_pages_scanned:%lu" 4390 " node_pages_scanned:%lu"
4390 "\n", 4391 "\n",
4391 zone->name, 4392 zone->name,
@@ -4396,26 +4397,15 @@ void show_free_areas(unsigned int filter)
4396 K(zone->present_pages), 4397 K(zone->present_pages),
4397 K(zone->managed_pages), 4398 K(zone->managed_pages),
4398 K(zone_page_state(zone, NR_MLOCK)), 4399 K(zone_page_state(zone, NR_MLOCK)),
4399 K(zone_page_state(zone, NR_FILE_DIRTY)),
4400 K(zone_page_state(zone, NR_WRITEBACK)),
4401 K(zone_page_state(zone, NR_SHMEM)),
4402#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4403 K(zone_page_state(zone, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4404 K(zone_page_state(zone, NR_SHMEM_PMDMAPPED)
4405 * HPAGE_PMD_NR),
4406 K(zone_page_state(zone, NR_ANON_THPS) * HPAGE_PMD_NR),
4407#endif
4408 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 4400 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4409 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 4401 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
4410 zone_page_state(zone, NR_KERNEL_STACK) * 4402 zone_page_state(zone, NR_KERNEL_STACK) *
4411 THREAD_SIZE / 1024, 4403 THREAD_SIZE / 1024,
4412 K(zone_page_state(zone, NR_PAGETABLE)), 4404 K(zone_page_state(zone, NR_PAGETABLE)),
4413 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
4414 K(zone_page_state(zone, NR_BOUNCE)), 4405 K(zone_page_state(zone, NR_BOUNCE)),
4415 K(free_pcp), 4406 K(free_pcp),
4416 K(this_cpu_read(zone->pageset->pcp.count)), 4407 K(this_cpu_read(zone->pageset->pcp.count)),
4417 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 4408 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
4418 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
4419 K(node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED))); 4409 K(node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED)));
4420 printk("lowmem_reserve[]:"); 4410 printk("lowmem_reserve[]:");
4421 for (i = 0; i < MAX_NR_ZONES; i++) 4411 for (i = 0; i < MAX_NR_ZONES; i++)
@@ -4458,7 +4448,7 @@ void show_free_areas(unsigned int filter)
4458 4448
4459 hugetlb_show_meminfo(); 4449 hugetlb_show_meminfo();
4460 4450
4461 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 4451 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4462 4452
4463 show_swap_cache_info(); 4453 show_swap_cache_info();
4464} 4454}
diff --git a/mm/rmap.c b/mm/rmap.c
index fdb3b5b645eb..709bc83703b1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1213,7 +1213,7 @@ void do_page_add_anon_rmap(struct page *page,
1213 * disabled. 1213 * disabled.
1214 */ 1214 */
1215 if (compound) 1215 if (compound)
1216 __inc_zone_page_state(page, NR_ANON_THPS); 1216 __inc_node_page_state(page, NR_ANON_THPS);
1217 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1217 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1218 } 1218 }
1219 if (unlikely(PageKsm(page))) 1219 if (unlikely(PageKsm(page)))
@@ -1251,7 +1251,7 @@ void page_add_new_anon_rmap(struct page *page,
1251 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1251 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1252 /* increment count (starts at -1) */ 1252 /* increment count (starts at -1) */
1253 atomic_set(compound_mapcount_ptr(page), 0); 1253 atomic_set(compound_mapcount_ptr(page), 0);
1254 __inc_zone_page_state(page, NR_ANON_THPS); 1254 __inc_node_page_state(page, NR_ANON_THPS);
1255 } else { 1255 } else {
1256 /* Anon THP always mapped first with PMD */ 1256 /* Anon THP always mapped first with PMD */
1257 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1257 VM_BUG_ON_PAGE(PageTransCompound(page), page);
@@ -1282,7 +1282,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1282 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1282 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1283 goto out; 1283 goto out;
1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1285 __inc_zone_page_state(page, NR_SHMEM_PMDMAPPED); 1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1286 } else { 1286 } else {
1287 if (PageTransCompound(page)) { 1287 if (PageTransCompound(page)) {
1288 VM_BUG_ON_PAGE(!PageLocked(page), page); 1288 VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1322,7 +1322,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1322 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1322 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1323 goto out; 1323 goto out;
1324 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1324 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1325 __dec_zone_page_state(page, NR_SHMEM_PMDMAPPED); 1325 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1326 } else { 1326 } else {
1327 if (!atomic_add_negative(-1, &page->_mapcount)) 1327 if (!atomic_add_negative(-1, &page->_mapcount))
1328 goto out; 1328 goto out;
@@ -1356,7 +1356,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
1356 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1356 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1357 return; 1357 return;
1358 1358
1359 __dec_zone_page_state(page, NR_ANON_THPS); 1359 __dec_node_page_state(page, NR_ANON_THPS);
1360 1360
1361 if (TestClearPageDoubleMap(page)) { 1361 if (TestClearPageDoubleMap(page)) {
1362 /* 1362 /*
diff --git a/mm/shmem.c b/mm/shmem.c
index 62e42c7d544c..2ac19a61d565 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -575,9 +575,9 @@ static int shmem_add_to_page_cache(struct page *page,
575 if (!error) { 575 if (!error) {
576 mapping->nrpages += nr; 576 mapping->nrpages += nr;
577 if (PageTransHuge(page)) 577 if (PageTransHuge(page))
578 __inc_zone_page_state(page, NR_SHMEM_THPS); 578 __inc_node_page_state(page, NR_SHMEM_THPS);
579 __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, nr); 579 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
580 __mod_zone_page_state(page_zone(page), NR_SHMEM, nr); 580 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
581 spin_unlock_irq(&mapping->tree_lock); 581 spin_unlock_irq(&mapping->tree_lock);
582 } else { 582 } else {
583 page->mapping = NULL; 583 page->mapping = NULL;
@@ -601,8 +601,8 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
601 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 601 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
602 page->mapping = NULL; 602 page->mapping = NULL;
603 mapping->nrpages--; 603 mapping->nrpages--;
604 __dec_zone_page_state(page, NR_FILE_PAGES); 604 __dec_node_page_state(page, NR_FILE_PAGES);
605 __dec_zone_page_state(page, NR_SHMEM); 605 __dec_node_page_state(page, NR_SHMEM);
606 spin_unlock_irq(&mapping->tree_lock); 606 spin_unlock_irq(&mapping->tree_lock);
607 put_page(page); 607 put_page(page);
608 BUG_ON(error); 608 BUG_ON(error);
@@ -1493,8 +1493,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1493 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1493 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1494 newpage); 1494 newpage);
1495 if (!error) { 1495 if (!error) {
1496 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1496 __inc_node_page_state(newpage, NR_FILE_PAGES);
1497 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1497 __dec_node_page_state(oldpage, NR_FILE_PAGES);
1498 } 1498 }
1499 spin_unlock_irq(&swap_mapping->tree_lock); 1499 spin_unlock_irq(&swap_mapping->tree_lock);
1500 1500
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c99463ac02fb..c8310a37be3a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -95,7 +95,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
95 entry.val, page); 95 entry.val, page);
96 if (likely(!error)) { 96 if (likely(!error)) {
97 address_space->nrpages++; 97 address_space->nrpages++;
98 __inc_zone_page_state(page, NR_FILE_PAGES); 98 __inc_node_page_state(page, NR_FILE_PAGES);
99 INC_CACHE_INFO(add_total); 99 INC_CACHE_INFO(add_total);
100 } 100 }
101 spin_unlock_irq(&address_space->tree_lock); 101 spin_unlock_irq(&address_space->tree_lock);
@@ -147,7 +147,7 @@ void __delete_from_swap_cache(struct page *page)
147 set_page_private(page, 0); 147 set_page_private(page, 0);
148 ClearPageSwapCache(page); 148 ClearPageSwapCache(page);
149 address_space->nrpages--; 149 address_space->nrpages--;
150 __dec_zone_page_state(page, NR_FILE_PAGES); 150 __dec_node_page_state(page, NR_FILE_PAGES);
151 INC_CACHE_INFO(del_total); 151 INC_CACHE_INFO(del_total);
152} 152}
153 153
diff --git a/mm/util.c b/mm/util.c
index 8d010ef2ce1c..662cddf914af 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -528,7 +528,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
528 528
529 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 529 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
530 free = global_page_state(NR_FREE_PAGES); 530 free = global_page_state(NR_FREE_PAGES);
531 free += global_page_state(NR_FILE_PAGES); 531 free += global_node_page_state(NR_FILE_PAGES);
532 532
533 /* 533 /*
534 * shmem pages shouldn't be counted as free in this 534 * shmem pages shouldn't be counted as free in this
@@ -536,7 +536,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
536 * that won't affect the overall amount of available 536 * that won't affect the overall amount of available
537 * memory in the system. 537 * memory in the system.
538 */ 538 */
539 free -= global_page_state(NR_SHMEM); 539 free -= global_node_page_state(NR_SHMEM);
540 540
541 free += get_nr_swap_pages(); 541 free += get_nr_swap_pages();
542 542
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 90b46651d158..b797afec3057 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3587,11 +3587,11 @@ int sysctl_min_unmapped_ratio = 1;
3587 */ 3587 */
3588int sysctl_min_slab_ratio = 5; 3588int sysctl_min_slab_ratio = 5;
3589 3589
3590static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3590static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
3591{ 3591{
3592 unsigned long file_mapped = node_page_state(zone->zone_pgdat, NR_FILE_MAPPED); 3592 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
3593 unsigned long file_lru = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + 3593 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
3594 node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE); 3594 node_page_state(pgdat, NR_ACTIVE_FILE);
3595 3595
3596 /* 3596 /*
3597 * It's possible for there to be more file mapped pages than 3597 * It's possible for there to be more file mapped pages than
@@ -3610,17 +3610,17 @@ static unsigned long zone_pagecache_reclaimable(struct zone *zone)
3610 /* 3610 /*
3611 * If RECLAIM_UNMAP is set, then all file pages are considered 3611 * If RECLAIM_UNMAP is set, then all file pages are considered
3612 * potentially reclaimable. Otherwise, we have to worry about 3612 * potentially reclaimable. Otherwise, we have to worry about
3613 * pages like swapcache and zone_unmapped_file_pages() provides 3613 * pages like swapcache and node_unmapped_file_pages() provides
3614 * a better estimate 3614 * a better estimate
3615 */ 3615 */
3616 if (zone_reclaim_mode & RECLAIM_UNMAP) 3616 if (zone_reclaim_mode & RECLAIM_UNMAP)
3617 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3617 nr_pagecache_reclaimable = node_page_state(zone->zone_pgdat, NR_FILE_PAGES);
3618 else 3618 else
3619 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3619 nr_pagecache_reclaimable = node_unmapped_file_pages(zone->zone_pgdat);
3620 3620
3621 /* If we can't clean pages, remove dirty pages from consideration */ 3621 /* If we can't clean pages, remove dirty pages from consideration */
3622 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3622 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3623 delta += zone_page_state(zone, NR_FILE_DIRTY); 3623 delta += node_page_state(zone->zone_pgdat, NR_FILE_DIRTY);
3624 3624
3625 /* Watch for any possible underflows due to delta */ 3625 /* Watch for any possible underflows due to delta */
3626 if (unlikely(delta > nr_pagecache_reclaimable)) 3626 if (unlikely(delta > nr_pagecache_reclaimable))
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 02e7406e8fcd..455392158062 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -924,20 +924,15 @@ const char * const vmstat_text[] = {
924 "nr_alloc_batch", 924 "nr_alloc_batch",
925 "nr_zone_anon_lru", 925 "nr_zone_anon_lru",
926 "nr_zone_file_lru", 926 "nr_zone_file_lru",
927 "nr_zone_write_pending",
927 "nr_mlock", 928 "nr_mlock",
928 "nr_file_pages",
929 "nr_dirty",
930 "nr_writeback",
931 "nr_slab_reclaimable", 929 "nr_slab_reclaimable",
932 "nr_slab_unreclaimable", 930 "nr_slab_unreclaimable",
933 "nr_page_table_pages", 931 "nr_page_table_pages",
934 "nr_kernel_stack", 932 "nr_kernel_stack",
935 "nr_unstable",
936 "nr_bounce", 933 "nr_bounce",
937 "nr_vmscan_write", 934 "nr_vmscan_write",
938 "nr_vmscan_immediate_reclaim", 935 "nr_vmscan_immediate_reclaim",
939 "nr_writeback_temp",
940 "nr_shmem",
941 "nr_dirtied", 936 "nr_dirtied",
942 "nr_written", 937 "nr_written",
943#if IS_ENABLED(CONFIG_ZSMALLOC) 938#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -951,9 +946,6 @@ const char * const vmstat_text[] = {
951 "numa_local", 946 "numa_local",
952 "numa_other", 947 "numa_other",
953#endif 948#endif
954 "nr_anon_transparent_hugepages",
955 "nr_shmem_hugepages",
956 "nr_shmem_pmdmapped",
957 "nr_free_cma", 949 "nr_free_cma",
958 950
959 /* Node-based counters */ 951 /* Node-based counters */
@@ -970,6 +962,15 @@ const char * const vmstat_text[] = {
970 "workingset_nodereclaim", 962 "workingset_nodereclaim",
971 "nr_anon_pages", 963 "nr_anon_pages",
972 "nr_mapped", 964 "nr_mapped",
965 "nr_file_pages",
966 "nr_dirty",
967 "nr_writeback",
968 "nr_writeback_temp",
969 "nr_shmem",
970 "nr_shmem_hugepages",
971 "nr_shmem_pmdmapped",
972 "nr_anon_transparent_hugepages",
973 "nr_unstable",
973 974
974 /* enum writeback_stat_item counters */ 975 /* enum writeback_stat_item counters */
975 "nr_dirty_threshold", 976 "nr_dirty_threshold",