aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:46:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commitc4a25635b60d08853a3e4eaae3ab34419a36cfa2 (patch)
tree22fc50885a47c64be6e6cd2a8908025512eb1984
parent11fb998986a72aa7e997d96d63d52582a01228c5 (diff)
mm: move vmscan writes and file write accounting to the node
As reclaim is now node-based, it follows that page write activity due to page reclaim should also be accounted for on the node. For consistency, also account page writes and page dirtying on a per-node basis. After this patch, there are a few remaining zone counters that may appear strange but are fine. NUMA stats are still per-zone as this is a user-space interface that tools consume. NR_MLOCK, NR_SLAB_*, NR_PAGETABLE, NR_KERNEL_STACK and NR_BOUNCE are all allocations that potentially pin low memory and cannot trivially be reclaimed on demand. This information is still useful for debugging a page allocation failure warning. Link: http://lkml.kernel.org/r/1467970510-21195-21-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/trace/events/writeback.h4
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/vmscan.c4
-rw-r--r--mm/vmstat.c8
5 files changed, 15 insertions, 15 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index acd4665c3025..e3d6d42722a0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -122,10 +122,6 @@ enum zone_stat_item {
122 NR_KERNEL_STACK, 122 NR_KERNEL_STACK,
123 /* Second 128 byte cacheline */ 123 /* Second 128 byte cacheline */
124 NR_BOUNCE, 124 NR_BOUNCE,
125 NR_VMSCAN_WRITE,
126 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
127 NR_DIRTIED, /* page dirtyings since bootup */
128 NR_WRITTEN, /* page writings since bootup */
129#if IS_ENABLED(CONFIG_ZSMALLOC) 125#if IS_ENABLED(CONFIG_ZSMALLOC)
130 NR_ZSPAGES, /* allocated in zsmalloc */ 126 NR_ZSPAGES, /* allocated in zsmalloc */
131#endif 127#endif
@@ -165,6 +161,10 @@ enum node_stat_item {
165 NR_SHMEM_PMDMAPPED, 161 NR_SHMEM_PMDMAPPED,
166 NR_ANON_THPS, 162 NR_ANON_THPS,
167 NR_UNSTABLE_NFS, /* NFS unstable pages */ 163 NR_UNSTABLE_NFS, /* NFS unstable pages */
164 NR_VMSCAN_WRITE,
165 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
166 NR_DIRTIED, /* page dirtyings since bootup */
167 NR_WRITTEN, /* page writings since bootup */
168 NR_VM_NODE_STAT_ITEMS 168 NR_VM_NODE_STAT_ITEMS
169}; 169};
170 170
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index ad20f2d2b1f9..2ccd9ccbf9ef 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -415,8 +415,8 @@ TRACE_EVENT(global_dirty_state,
415 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); 415 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
416 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); 416 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
417 __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS); 417 __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
418 __entry->nr_dirtied = global_page_state(NR_DIRTIED); 418 __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
419 __entry->nr_written = global_page_state(NR_WRITTEN); 419 __entry->nr_written = global_node_page_state(NR_WRITTEN);
420 __entry->background_thresh = background_thresh; 420 __entry->background_thresh = background_thresh;
421 __entry->dirty_thresh = dirty_thresh; 421 __entry->dirty_thresh = dirty_thresh;
422 __entry->dirty_limit = global_wb_domain.dirty_limit; 422 __entry->dirty_limit = global_wb_domain.dirty_limit;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f97591d9fa00..3c02aa603f5a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2461,7 +2461,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2461 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2461 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2462 __inc_node_page_state(page, NR_FILE_DIRTY); 2462 __inc_node_page_state(page, NR_FILE_DIRTY);
2463 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2463 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2464 __inc_zone_page_state(page, NR_DIRTIED); 2464 __inc_node_page_state(page, NR_DIRTIED);
2465 __inc_wb_stat(wb, WB_RECLAIMABLE); 2465 __inc_wb_stat(wb, WB_RECLAIMABLE);
2466 __inc_wb_stat(wb, WB_DIRTIED); 2466 __inc_wb_stat(wb, WB_DIRTIED);
2467 task_io_account_write(PAGE_SIZE); 2467 task_io_account_write(PAGE_SIZE);
@@ -2550,7 +2550,7 @@ void account_page_redirty(struct page *page)
2550 2550
2551 wb = unlocked_inode_to_wb_begin(inode, &locked); 2551 wb = unlocked_inode_to_wb_begin(inode, &locked);
2552 current->nr_dirtied--; 2552 current->nr_dirtied--;
2553 dec_zone_page_state(page, NR_DIRTIED); 2553 dec_node_page_state(page, NR_DIRTIED);
2554 dec_wb_stat(wb, WB_DIRTIED); 2554 dec_wb_stat(wb, WB_DIRTIED);
2555 unlocked_inode_to_wb_end(inode, locked); 2555 unlocked_inode_to_wb_end(inode, locked);
2556 } 2556 }
@@ -2787,7 +2787,7 @@ int test_clear_page_writeback(struct page *page)
2787 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2787 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2788 dec_node_page_state(page, NR_WRITEBACK); 2788 dec_node_page_state(page, NR_WRITEBACK);
2789 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2789 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2790 inc_zone_page_state(page, NR_WRITTEN); 2790 inc_node_page_state(page, NR_WRITTEN);
2791 } 2791 }
2792 unlock_page_memcg(page); 2792 unlock_page_memcg(page);
2793 return ret; 2793 return ret;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b797afec3057..9b61a55b6e38 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -612,7 +612,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
612 ClearPageReclaim(page); 612 ClearPageReclaim(page);
613 } 613 }
614 trace_mm_vmscan_writepage(page); 614 trace_mm_vmscan_writepage(page);
615 inc_zone_page_state(page, NR_VMSCAN_WRITE); 615 inc_node_page_state(page, NR_VMSCAN_WRITE);
616 return PAGE_SUCCESS; 616 return PAGE_SUCCESS;
617 } 617 }
618 618
@@ -1117,7 +1117,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1117 * except we already have the page isolated 1117 * except we already have the page isolated
1118 * and know it's dirty 1118 * and know it's dirty
1119 */ 1119 */
1120 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); 1120 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1121 SetPageReclaim(page); 1121 SetPageReclaim(page);
1122 1122
1123 goto keep_locked; 1123 goto keep_locked;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 455392158062..bc94968400d0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -931,10 +931,6 @@ const char * const vmstat_text[] = {
931 "nr_page_table_pages", 931 "nr_page_table_pages",
932 "nr_kernel_stack", 932 "nr_kernel_stack",
933 "nr_bounce", 933 "nr_bounce",
934 "nr_vmscan_write",
935 "nr_vmscan_immediate_reclaim",
936 "nr_dirtied",
937 "nr_written",
938#if IS_ENABLED(CONFIG_ZSMALLOC) 934#if IS_ENABLED(CONFIG_ZSMALLOC)
939 "nr_zspages", 935 "nr_zspages",
940#endif 936#endif
@@ -971,6 +967,10 @@ const char * const vmstat_text[] = {
971 "nr_shmem_pmdmapped", 967 "nr_shmem_pmdmapped",
972 "nr_anon_transparent_hugepages", 968 "nr_anon_transparent_hugepages",
973 "nr_unstable", 969 "nr_unstable",
970 "nr_vmscan_write",
971 "nr_vmscan_immediate_reclaim",
972 "nr_dirtied",
973 "nr_written",
974 974
975 /* enum writeback_stat_item counters */ 975 /* enum writeback_stat_item counters */
976 "nr_dirty_threshold", 976 "nr_dirty_threshold",