diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 04:55:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:35 -0400 |
commit | b1e7a8fd854d2f895730e82137400012b509650e (patch) | |
tree | 9fba87ff6b0146ebd4ee5bc7d5f0c8b037dbb3ad /mm/page-writeback.c | |
parent | df849a1529c106f7460e51479ca78fe07b07dc8c (diff) |
[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
This makes nr_dirty a per zone counter. Looping over all processors is
avoided during writeback state determination.
The counter aggregation for nr_dirty had to be undone in the NFS layer since
we summed up the page counts from multiple zones. Someone more familiar with
NFS should probably review what I have done.
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0faacfe18909..da8547830098 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -109,7 +109,7 @@ struct writeback_state | |||
109 | 109 | ||
110 | static void get_writeback_state(struct writeback_state *wbs) | 110 | static void get_writeback_state(struct writeback_state *wbs) |
111 | { | 111 | { |
112 | wbs->nr_dirty = read_page_state(nr_dirty); | 112 | wbs->nr_dirty = global_page_state(NR_FILE_DIRTY); |
113 | wbs->nr_unstable = read_page_state(nr_unstable); | 113 | wbs->nr_unstable = read_page_state(nr_unstable); |
114 | wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) + | 114 | wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) + |
115 | global_page_state(NR_ANON_PAGES); | 115 | global_page_state(NR_ANON_PAGES); |
@@ -641,7 +641,8 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
641 | if (mapping2) { /* Race with truncate? */ | 641 | if (mapping2) { /* Race with truncate? */ |
642 | BUG_ON(mapping2 != mapping); | 642 | BUG_ON(mapping2 != mapping); |
643 | if (mapping_cap_account_dirty(mapping)) | 643 | if (mapping_cap_account_dirty(mapping)) |
644 | inc_page_state(nr_dirty); | 644 | __inc_zone_page_state(page, |
645 | NR_FILE_DIRTY); | ||
645 | radix_tree_tag_set(&mapping->page_tree, | 646 | radix_tree_tag_set(&mapping->page_tree, |
646 | page_index(page), PAGECACHE_TAG_DIRTY); | 647 | page_index(page), PAGECACHE_TAG_DIRTY); |
647 | } | 648 | } |
@@ -728,9 +729,9 @@ int test_clear_page_dirty(struct page *page) | |||
728 | radix_tree_tag_clear(&mapping->page_tree, | 729 | radix_tree_tag_clear(&mapping->page_tree, |
729 | page_index(page), | 730 | page_index(page), |
730 | PAGECACHE_TAG_DIRTY); | 731 | PAGECACHE_TAG_DIRTY); |
731 | write_unlock_irqrestore(&mapping->tree_lock, flags); | ||
732 | if (mapping_cap_account_dirty(mapping)) | 732 | if (mapping_cap_account_dirty(mapping)) |
733 | dec_page_state(nr_dirty); | 733 | __dec_zone_page_state(page, NR_FILE_DIRTY); |
734 | write_unlock_irqrestore(&mapping->tree_lock, flags); | ||
734 | return 1; | 735 | return 1; |
735 | } | 736 | } |
736 | write_unlock_irqrestore(&mapping->tree_lock, flags); | 737 | write_unlock_irqrestore(&mapping->tree_lock, flags); |
@@ -761,7 +762,7 @@ int clear_page_dirty_for_io(struct page *page) | |||
761 | if (mapping) { | 762 | if (mapping) { |
762 | if (TestClearPageDirty(page)) { | 763 | if (TestClearPageDirty(page)) { |
763 | if (mapping_cap_account_dirty(mapping)) | 764 | if (mapping_cap_account_dirty(mapping)) |
764 | dec_page_state(nr_dirty); | 765 | dec_zone_page_state(page, NR_FILE_DIRTY); |
765 | return 1; | 766 | return 1; |
766 | } | 767 | } |
767 | return 0; | 768 | return 0; |