aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:34 -0400
commit65ba55f500a37272985d071c9bbb35256a2f7c14 (patch)
treee7735326ef2d2dca9d00a6c5ae47e9eb03c7834f
parent2244b95a7bcf8d24196f8a3a44187ba5dfff754c (diff)
[PATCH] zoned vm counters: convert nr_mapped to per zone counter
nr_mapped is important because it allows a determination of how many pages of a zone are not mapped, which would allow a more efficient means of determining when we need to reclaim memory in a zone. We take the nr_mapped field out of the page state structure and define a new per zone counter named NR_FILE_MAPPED (the anonymous pages will be split off from NR_MAPPED in the next patch). We replace the use of nr_mapped in various kernel locations. This avoids the looping over all processors in try_to_free_pages(), writeback, reclaim (swap + zone reclaim). [akpm@osdl.org: bugfix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/mm/pgtable.c2
-rw-r--r--drivers/base/node.c4
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/vmscan.c8
-rw-r--r--mm/vmstat.c2
10 files changed, 16 insertions, 17 deletions
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 2889567e21a1..f85f1a40e5c8 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -61,7 +61,7 @@ void show_mem(void)
61 get_page_state(&ps); 61 get_page_state(&ps);
62 printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty); 62 printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
63 printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); 63 printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
64 printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped); 64 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
65 printk(KERN_INFO "%lu pages slab\n", ps.nr_slab); 65 printk(KERN_INFO "%lu pages slab\n", ps.nr_slab);
66 printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages); 66 printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
67} 67}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eae2bdc183bb..8b1232320a99 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -54,8 +54,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
54 ps.nr_dirty = 0; 54 ps.nr_dirty = 0;
55 if ((long)ps.nr_writeback < 0) 55 if ((long)ps.nr_writeback < 0)
56 ps.nr_writeback = 0; 56 ps.nr_writeback = 0;
57 if ((long)ps.nr_mapped < 0)
58 ps.nr_mapped = 0;
59 if ((long)ps.nr_slab < 0) 57 if ((long)ps.nr_slab < 0)
60 ps.nr_slab = 0; 58 ps.nr_slab = 0;
61 59
@@ -84,7 +82,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
84 nid, K(i.freeram - i.freehigh), 82 nid, K(i.freeram - i.freehigh),
85 nid, K(ps.nr_dirty), 83 nid, K(ps.nr_dirty),
86 nid, K(ps.nr_writeback), 84 nid, K(ps.nr_writeback),
87 nid, K(ps.nr_mapped), 85 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
88 nid, K(ps.nr_slab)); 86 nid, K(ps.nr_slab));
89 n += hugetlb_report_node_meminfo(nid, buf + n); 87 n += hugetlb_report_node_meminfo(nid, buf + n);
90 return n; 88 return n;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 5c10ea157425..bc7d9abca743 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -190,7 +190,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
190 K(i.freeswap), 190 K(i.freeswap),
191 K(ps.nr_dirty), 191 K(ps.nr_dirty),
192 K(ps.nr_writeback), 192 K(ps.nr_writeback),
193 K(ps.nr_mapped), 193 K(global_page_state(NR_FILE_MAPPED)),
194 K(ps.nr_slab), 194 K(ps.nr_slab),
195 K(allowed), 195 K(allowed),
196 K(committed), 196 K(committed),
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 543f9e411563..eb42c1277023 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -47,6 +47,9 @@ struct zone_padding {
47#endif 47#endif
48 48
49enum zone_stat_item { 49enum zone_stat_item {
50 NR_FILE_MAPPED, /* mapped into pagetables.
51 only modified from process context */
52
50 NR_VM_ZONE_STAT_ITEMS }; 53 NR_VM_ZONE_STAT_ITEMS };
51 54
52struct per_cpu_pages { 55struct per_cpu_pages {
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3fd5c11e544a..8ab8229523e6 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -26,8 +26,6 @@ struct page_state {
26 unsigned long nr_writeback; /* Pages under writeback */ 26 unsigned long nr_writeback; /* Pages under writeback */
27 unsigned long nr_unstable; /* NFS unstable pages */ 27 unsigned long nr_unstable; /* NFS unstable pages */
28 unsigned long nr_page_table_pages;/* Pages used for pagetables */ 28 unsigned long nr_page_table_pages;/* Pages used for pagetables */
29 unsigned long nr_mapped; /* mapped into pagetables.
30 * only modified from process context */
31 unsigned long nr_slab; /* In slab */ 29 unsigned long nr_slab; /* In slab */
32#define GET_PAGE_STATE_LAST nr_slab 30#define GET_PAGE_STATE_LAST nr_slab
33 31
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4ec7026c7bab..60c7244c42e4 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -111,7 +111,7 @@ static void get_writeback_state(struct writeback_state *wbs)
111{ 111{
112 wbs->nr_dirty = read_page_state(nr_dirty); 112 wbs->nr_dirty = read_page_state(nr_dirty);
113 wbs->nr_unstable = read_page_state(nr_unstable); 113 wbs->nr_unstable = read_page_state(nr_unstable);
114 wbs->nr_mapped = read_page_state(nr_mapped); 114 wbs->nr_mapped = global_page_state(NR_FILE_MAPPED);
115 wbs->nr_writeback = read_page_state(nr_writeback); 115 wbs->nr_writeback = read_page_state(nr_writeback);
116} 116}
117 117
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3a877fecc300..04dd2b01b2b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1319,7 +1319,7 @@ void show_free_areas(void)
1319 ps.nr_unstable, 1319 ps.nr_unstable,
1320 nr_free_pages(), 1320 nr_free_pages(),
1321 ps.nr_slab, 1321 ps.nr_slab,
1322 ps.nr_mapped, 1322 global_page_state(NR_FILE_MAPPED),
1323 ps.nr_page_table_pages); 1323 ps.nr_page_table_pages);
1324 1324
1325 for_each_zone(zone) { 1325 for_each_zone(zone) {
diff --git a/mm/rmap.c b/mm/rmap.c
index e76909e880ca..af5e9808e65d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
455 * nr_mapped state can be updated without turning off 455 * nr_mapped state can be updated without turning off
456 * interrupts because it is not modified via interrupt. 456 * interrupts because it is not modified via interrupt.
457 */ 457 */
458 __inc_page_state(nr_mapped); 458 __inc_zone_page_state(page, NR_FILE_MAPPED);
459} 459}
460 460
461/** 461/**
@@ -499,7 +499,7 @@ void page_add_new_anon_rmap(struct page *page,
499void page_add_file_rmap(struct page *page) 499void page_add_file_rmap(struct page *page)
500{ 500{
501 if (atomic_inc_and_test(&page->_mapcount)) 501 if (atomic_inc_and_test(&page->_mapcount))
502 __inc_page_state(nr_mapped); 502 __inc_zone_page_state(page, NR_FILE_MAPPED);
503} 503}
504 504
505/** 505/**
@@ -531,7 +531,7 @@ void page_remove_rmap(struct page *page)
531 */ 531 */
532 if (page_test_and_clear_dirty(page)) 532 if (page_test_and_clear_dirty(page))
533 set_page_dirty(page); 533 set_page_dirty(page);
534 __dec_page_state(nr_mapped); 534 __dec_zone_page_state(page, NR_FILE_MAPPED);
535 } 535 }
536} 536}
537 537
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eeacb0d695c3..d2caf7471cf1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -990,7 +990,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
990 } 990 }
991 991
992 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 992 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
993 sc.nr_mapped = read_page_state(nr_mapped); 993 sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
994 sc.nr_scanned = 0; 994 sc.nr_scanned = 0;
995 if (!priority) 995 if (!priority)
996 disable_swap_token(); 996 disable_swap_token();
@@ -1075,7 +1075,7 @@ loop_again:
1075 total_scanned = 0; 1075 total_scanned = 0;
1076 nr_reclaimed = 0; 1076 nr_reclaimed = 0;
1077 sc.may_writepage = !laptop_mode; 1077 sc.may_writepage = !laptop_mode;
1078 sc.nr_mapped = read_page_state(nr_mapped); 1078 sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
1079 1079
1080 inc_page_state(pageoutrun); 1080 inc_page_state(pageoutrun);
1081 1081
@@ -1407,7 +1407,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1407 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 1407 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1408 unsigned long nr_to_scan = nr_pages - ret; 1408 unsigned long nr_to_scan = nr_pages - ret;
1409 1409
1410 sc.nr_mapped = read_page_state(nr_mapped); 1410 sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
1411 sc.nr_scanned = 0; 1411 sc.nr_scanned = 0;
1412 1412
1413 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); 1413 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
@@ -1548,7 +1548,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1548 struct scan_control sc = { 1548 struct scan_control sc = {
1549 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 1549 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1550 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 1550 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1551 .nr_mapped = read_page_state(nr_mapped), 1551 .nr_mapped = global_page_state(NR_FILE_MAPPED),
1552 .swap_cluster_max = max_t(unsigned long, nr_pages, 1552 .swap_cluster_max = max_t(unsigned long, nr_pages,
1553 SWAP_CLUSTER_MAX), 1553 SWAP_CLUSTER_MAX),
1554 .gfp_mask = gfp_mask, 1554 .gfp_mask = gfp_mask,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 210f9bbbb04f..4800091c129a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -401,13 +401,13 @@ struct seq_operations fragmentation_op = {
401 401
402static char *vmstat_text[] = { 402static char *vmstat_text[] = {
403 /* Zoned VM counters */ 403 /* Zoned VM counters */
404 "nr_mapped",
404 405
405 /* Page state */ 406 /* Page state */
406 "nr_dirty", 407 "nr_dirty",
407 "nr_writeback", 408 "nr_writeback",
408 "nr_unstable", 409 "nr_unstable",
409 "nr_page_table_pages", 410 "nr_page_table_pages",
410 "nr_mapped",
411 "nr_slab", 411 "nr_slab",
412 412
413 "pgpgin", 413 "pgpgin",