diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 04:55:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:35 -0400 |
commit | b1e7a8fd854d2f895730e82137400012b509650e (patch) | |
tree | 9fba87ff6b0146ebd4ee5bc7d5f0c8b037dbb3ad /mm | |
parent | df849a1529c106f7460e51479ca78fe07b07dc8c (diff) |
[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
This makes nr_dirty a per zone counter. Looping over all processors is
avoided during writeback state determination.
The counter aggregation for nr_dirty had to be undone in the NFS layer since
we summed up the page counts from multiple zones. Someone more familiar with
NFS should probably review what I have done.
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
3 files changed, 8 insertions, 7 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0faacfe18909..da8547830098 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -109,7 +109,7 @@ struct writeback_state | |||
109 | 109 | ||
110 | static void get_writeback_state(struct writeback_state *wbs) | 110 | static void get_writeback_state(struct writeback_state *wbs) |
111 | { | 111 | { |
112 | wbs->nr_dirty = read_page_state(nr_dirty); | 112 | wbs->nr_dirty = global_page_state(NR_FILE_DIRTY); |
113 | wbs->nr_unstable = read_page_state(nr_unstable); | 113 | wbs->nr_unstable = read_page_state(nr_unstable); |
114 | wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) + | 114 | wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) + |
115 | global_page_state(NR_ANON_PAGES); | 115 | global_page_state(NR_ANON_PAGES); |
@@ -641,7 +641,8 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
641 | if (mapping2) { /* Race with truncate? */ | 641 | if (mapping2) { /* Race with truncate? */ |
642 | BUG_ON(mapping2 != mapping); | 642 | BUG_ON(mapping2 != mapping); |
643 | if (mapping_cap_account_dirty(mapping)) | 643 | if (mapping_cap_account_dirty(mapping)) |
644 | inc_page_state(nr_dirty); | 644 | __inc_zone_page_state(page, |
645 | NR_FILE_DIRTY); | ||
645 | radix_tree_tag_set(&mapping->page_tree, | 646 | radix_tree_tag_set(&mapping->page_tree, |
646 | page_index(page), PAGECACHE_TAG_DIRTY); | 647 | page_index(page), PAGECACHE_TAG_DIRTY); |
647 | } | 648 | } |
@@ -728,9 +729,9 @@ int test_clear_page_dirty(struct page *page) | |||
728 | radix_tree_tag_clear(&mapping->page_tree, | 729 | radix_tree_tag_clear(&mapping->page_tree, |
729 | page_index(page), | 730 | page_index(page), |
730 | PAGECACHE_TAG_DIRTY); | 731 | PAGECACHE_TAG_DIRTY); |
731 | write_unlock_irqrestore(&mapping->tree_lock, flags); | ||
732 | if (mapping_cap_account_dirty(mapping)) | 732 | if (mapping_cap_account_dirty(mapping)) |
733 | dec_page_state(nr_dirty); | 733 | __dec_zone_page_state(page, NR_FILE_DIRTY); |
734 | write_unlock_irqrestore(&mapping->tree_lock, flags); | ||
734 | return 1; | 735 | return 1; |
735 | } | 736 | } |
736 | write_unlock_irqrestore(&mapping->tree_lock, flags); | 737 | write_unlock_irqrestore(&mapping->tree_lock, flags); |
@@ -761,7 +762,7 @@ int clear_page_dirty_for_io(struct page *page) | |||
761 | if (mapping) { | 762 | if (mapping) { |
762 | if (TestClearPageDirty(page)) { | 763 | if (TestClearPageDirty(page)) { |
763 | if (mapping_cap_account_dirty(mapping)) | 764 | if (mapping_cap_account_dirty(mapping)) |
764 | dec_page_state(nr_dirty); | 765 | dec_zone_page_state(page, NR_FILE_DIRTY); |
765 | return 1; | 766 | return 1; |
766 | } | 767 | } |
767 | return 0; | 768 | return 0; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ed3f2a7b4071..c2b9aa4acc46 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1314,7 +1314,7 @@ void show_free_areas(void) | |||
1314 | "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", | 1314 | "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", |
1315 | active, | 1315 | active, |
1316 | inactive, | 1316 | inactive, |
1317 | ps.nr_dirty, | 1317 | global_page_state(NR_FILE_DIRTY), |
1318 | ps.nr_writeback, | 1318 | ps.nr_writeback, |
1319 | ps.nr_unstable, | 1319 | ps.nr_unstable, |
1320 | nr_free_pages(), | 1320 | nr_free_pages(), |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 292a35fe56c9..1982fb533a40 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -400,9 +400,9 @@ static char *vmstat_text[] = { | |||
400 | "nr_file_pages", | 400 | "nr_file_pages", |
401 | "nr_slab", | 401 | "nr_slab", |
402 | "nr_page_table_pages", | 402 | "nr_page_table_pages", |
403 | "nr_dirty", | ||
403 | 404 | ||
404 | /* Page state */ | 405 | /* Page state */ |
405 | "nr_dirty", | ||
406 | "nr_writeback", | 406 | "nr_writeback", |
407 | "nr_unstable", | 407 | "nr_unstable", |
408 | 408 | ||