aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:36 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:35 -0400
commitf3dbd34460ff54962d3e3244b6bcb7f5295356e6 (patch)
tree91caae2b90d684a7640b5da451a9a2ff8a5c8fb8
parentbf02cf4b6cf931d060ad5c6ce9b960af6faefd2d (diff)
[PATCH] zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED
The current NR_FILE_MAPPED is used by zone reclaim and the dirty load calculation as the number of mapped pagecache pages. However, that is not true. NR_FILE_MAPPED includes the mapped anonymous pages. This patch separates those and therefore allows an accurate tracking of the anonymous pages per zone. It then becomes possible to determine the number of unmapped pages per zone and we can avoid scanning for unmapped pages if there are none. Also it may now be possible to determine the mapped/unmapped ratio in get_dirty_limit. Isnt the number of anonymous pages irrelevant in that calculation? Note that this will change the meaning of the number of mapped pages reported in /proc/vmstat /proc/meminfo and in the per node statistics. This may affect user space tools that monitor these counters! NR_FILE_MAPPED works like NR_FILE_DIRTY. It is only valid for pagecache pages. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/base/node.c2
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/vmscan.c3
-rw-r--r--mm/vmstat.c1
7 files changed, 14 insertions, 5 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ae9e3fea4b31..c3bf05158c6d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -71,6 +71,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
71 "Node %d Writeback: %8lu kB\n" 71 "Node %d Writeback: %8lu kB\n"
72 "Node %d FilePages: %8lu kB\n" 72 "Node %d FilePages: %8lu kB\n"
73 "Node %d Mapped: %8lu kB\n" 73 "Node %d Mapped: %8lu kB\n"
74 "Node %d AnonPages: %8lu kB\n"
74 "Node %d Slab: %8lu kB\n", 75 "Node %d Slab: %8lu kB\n",
75 nid, K(i.totalram), 76 nid, K(i.totalram),
76 nid, K(i.freeram), 77 nid, K(i.freeram),
@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
85 nid, K(ps.nr_writeback), 86 nid, K(ps.nr_writeback),
86 nid, K(node_page_state(nid, NR_FILE_PAGES)), 87 nid, K(node_page_state(nid, NR_FILE_PAGES)),
87 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 88 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
89 nid, K(node_page_state(nid, NR_ANON_PAGES)),
88 nid, K(ps.nr_slab)); 90 nid, K(ps.nr_slab));
89 n += hugetlb_report_node_meminfo(nid, buf + n); 91 n += hugetlb_report_node_meminfo(nid, buf + n);
90 return n; 92 return n;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 1af12fd77fe6..ff809656ce31 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -168,6 +168,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
168 "SwapFree: %8lu kB\n" 168 "SwapFree: %8lu kB\n"
169 "Dirty: %8lu kB\n" 169 "Dirty: %8lu kB\n"
170 "Writeback: %8lu kB\n" 170 "Writeback: %8lu kB\n"
171 "AnonPages: %8lu kB\n"
171 "Mapped: %8lu kB\n" 172 "Mapped: %8lu kB\n"
172 "Slab: %8lu kB\n" 173 "Slab: %8lu kB\n"
173 "CommitLimit: %8lu kB\n" 174 "CommitLimit: %8lu kB\n"
@@ -191,6 +192,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
191 K(i.freeswap), 192 K(i.freeswap),
192 K(ps.nr_dirty), 193 K(ps.nr_dirty),
193 K(ps.nr_writeback), 194 K(ps.nr_writeback),
195 K(global_page_state(NR_ANON_PAGES)),
194 K(global_page_state(NR_FILE_MAPPED)), 196 K(global_page_state(NR_FILE_MAPPED)),
195 K(ps.nr_slab), 197 K(ps.nr_slab),
196 K(allowed), 198 K(allowed),
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 08be91e6cecf..4833abd4458b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -47,7 +47,8 @@ struct zone_padding {
47#endif 47#endif
48 48
49enum zone_stat_item { 49enum zone_stat_item {
50 NR_FILE_MAPPED, /* mapped into pagetables. 50 NR_ANON_PAGES, /* Mapped anonymous pages */
51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
51 only modified from process context */ 52 only modified from process context */
52 NR_FILE_PAGES, 53 NR_FILE_PAGES,
53 NR_VM_ZONE_STAT_ITEMS }; 54 NR_VM_ZONE_STAT_ITEMS };
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 60c7244c42e4..0faacfe18909 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -111,7 +111,8 @@ static void get_writeback_state(struct writeback_state *wbs)
111{ 111{
112 wbs->nr_dirty = read_page_state(nr_dirty); 112 wbs->nr_dirty = read_page_state(nr_dirty);
113 wbs->nr_unstable = read_page_state(nr_unstable); 113 wbs->nr_unstable = read_page_state(nr_unstable);
114 wbs->nr_mapped = global_page_state(NR_FILE_MAPPED); 114 wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
115 global_page_state(NR_ANON_PAGES);
115 wbs->nr_writeback = read_page_state(nr_writeback); 116 wbs->nr_writeback = read_page_state(nr_writeback);
116} 117}
117 118
diff --git a/mm/rmap.c b/mm/rmap.c
index af5e9808e65d..40158b59729e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
455 * nr_mapped state can be updated without turning off 455 * nr_mapped state can be updated without turning off
456 * interrupts because it is not modified via interrupt. 456 * interrupts because it is not modified via interrupt.
457 */ 457 */
458 __inc_zone_page_state(page, NR_FILE_MAPPED); 458 __inc_zone_page_state(page, NR_ANON_PAGES);
459} 459}
460 460
461/** 461/**
@@ -531,7 +531,8 @@ void page_remove_rmap(struct page *page)
531 */ 531 */
532 if (page_test_and_clear_dirty(page)) 532 if (page_test_and_clear_dirty(page))
533 set_page_dirty(page); 533 set_page_dirty(page);
534 __dec_zone_page_state(page, NR_FILE_MAPPED); 534 __dec_zone_page_state(page,
535 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
535 } 536 }
536} 537}
537 538
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 08bc54e80862..2f0390161c0e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -742,7 +742,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
742 * how much memory 742 * how much memory
743 * is mapped. 743 * is mapped.
744 */ 744 */
745 mapped_ratio = (global_page_state(NR_FILE_MAPPED) * 100) / 745 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
746 global_page_state(NR_ANON_PAGES)) * 100) /
746 vm_total_pages; 747 vm_total_pages;
747 748
748 /* 749 /*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f16b33eb6d5c..3baf4dffa62a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -395,6 +395,7 @@ struct seq_operations fragmentation_op = {
395 395
396static char *vmstat_text[] = { 396static char *vmstat_text[] = {
397 /* Zoned VM counters */ 397 /* Zoned VM counters */
398 "nr_anon_pages",
398 "nr_mapped", 399 "nr_mapped",
399 "nr_file_pages", 400 "nr_file_pages",
400 401