diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 04:55:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:34 -0400 |
commit | 347ce434d57da80fd5809c0c836f206a50999c26 (patch) | |
tree | f730d151be77977f594e5cc083a93bbeb4c602cc | |
parent | 65ba55f500a37272985d071c9bbb35256a2f7c14 (diff) |
[PATCH] zoned vm counters: conversion of nr_pagecache to per zone counter
Currently a single atomic variable is used to establish the size of the page
cache in the whole machine. The zoned VM counters have the same method of
implementation as the nr_pagecache code but also allow the determination of
the pagecache size per zone.
Remove the special implementation for nr_pagecache and make it a zoned counter
named NR_FILE_PAGES.
Updates of the page cache counters are always performed with interrupts off.
We can therefore use the __ variant here.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/s390/appldata/appldata_mem.c | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/sys_sunos.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sys_sunos32.c | 2 | ||||
-rw-r--r-- | drivers/base/node.c | 2 | ||||
-rw-r--r-- | fs/proc/proc_misc.c | 3 | ||||
-rw-r--r-- | include/linux/mmzone.h | 2 | ||||
-rw-r--r-- | include/linux/pagemap.h | 45 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/swap_state.c | 4 | ||||
-rw-r--r-- | mm/vmstat.c | 7 |
13 files changed, 16 insertions, 67 deletions
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 7915a197d96d..180ba79a6267 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
@@ -130,7 +130,8 @@ static void appldata_get_mem_data(void *data) | |||
130 | mem_data->totalhigh = P2K(val.totalhigh); | 130 | mem_data->totalhigh = P2K(val.totalhigh); |
131 | mem_data->freehigh = P2K(val.freehigh); | 131 | mem_data->freehigh = P2K(val.freehigh); |
132 | mem_data->bufferram = P2K(val.bufferram); | 132 | mem_data->bufferram = P2K(val.bufferram); |
133 | mem_data->cached = P2K(atomic_read(&nr_pagecache) - val.bufferram); | 133 | mem_data->cached = P2K(global_page_state(NR_FILE_PAGES) |
134 | - val.bufferram); | ||
134 | 135 | ||
135 | si_swapinfo(&val); | 136 | si_swapinfo(&val); |
136 | mem_data->totalswap = P2K(val.totalswap); | 137 | mem_data->totalswap = P2K(val.totalswap); |
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c index 288de276d9ff..aa0fb2efb615 100644 --- a/arch/sparc/kernel/sys_sunos.c +++ b/arch/sparc/kernel/sys_sunos.c | |||
@@ -196,7 +196,7 @@ asmlinkage int sunos_brk(unsigned long brk) | |||
196 | * simple, it hopefully works in most obvious cases.. Easy to | 196 | * simple, it hopefully works in most obvious cases.. Easy to |
197 | * fool it, but this should catch most mistakes. | 197 | * fool it, but this should catch most mistakes. |
198 | */ | 198 | */ |
199 | freepages = get_page_cache_size(); | 199 | freepages = global_page_state(NR_FILE_PAGES); |
200 | freepages >>= 1; | 200 | freepages >>= 1; |
201 | freepages += nr_free_pages(); | 201 | freepages += nr_free_pages(); |
202 | freepages += nr_swap_pages; | 202 | freepages += nr_swap_pages; |
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c index ae5b32f817f0..87ebdf858a3a 100644 --- a/arch/sparc64/kernel/sys_sunos32.c +++ b/arch/sparc64/kernel/sys_sunos32.c | |||
@@ -155,7 +155,7 @@ asmlinkage int sunos_brk(u32 baddr) | |||
155 | * simple, it hopefully works in most obvious cases.. Easy to | 155 | * simple, it hopefully works in most obvious cases.. Easy to |
156 | * fool it, but this should catch most mistakes. | 156 | * fool it, but this should catch most mistakes. |
157 | */ | 157 | */ |
158 | freepages = get_page_cache_size(); | 158 | freepages = global_page_state(NR_FILE_PAGES); |
159 | freepages >>= 1; | 159 | freepages >>= 1; |
160 | freepages += nr_free_pages(); | 160 | freepages += nr_free_pages(); |
161 | freepages += nr_swap_pages; | 161 | freepages += nr_swap_pages; |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 8b1232320a99..ae9e3fea4b31 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -69,6 +69,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) | |||
69 | "Node %d LowFree: %8lu kB\n" | 69 | "Node %d LowFree: %8lu kB\n" |
70 | "Node %d Dirty: %8lu kB\n" | 70 | "Node %d Dirty: %8lu kB\n" |
71 | "Node %d Writeback: %8lu kB\n" | 71 | "Node %d Writeback: %8lu kB\n" |
72 | "Node %d FilePages: %8lu kB\n" | ||
72 | "Node %d Mapped: %8lu kB\n" | 73 | "Node %d Mapped: %8lu kB\n" |
73 | "Node %d Slab: %8lu kB\n", | 74 | "Node %d Slab: %8lu kB\n", |
74 | nid, K(i.totalram), | 75 | nid, K(i.totalram), |
@@ -82,6 +83,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) | |||
82 | nid, K(i.freeram - i.freehigh), | 83 | nid, K(i.freeram - i.freehigh), |
83 | nid, K(ps.nr_dirty), | 84 | nid, K(ps.nr_dirty), |
84 | nid, K(ps.nr_writeback), | 85 | nid, K(ps.nr_writeback), |
86 | nid, K(node_page_state(nid, NR_FILE_PAGES)), | ||
85 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), | 87 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), |
86 | nid, K(ps.nr_slab)); | 88 | nid, K(ps.nr_slab)); |
87 | n += hugetlb_report_node_meminfo(nid, buf + n); | 89 | n += hugetlb_report_node_meminfo(nid, buf + n); |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index bc7d9abca743..1af12fd77fe6 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -142,7 +142,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
142 | allowed = ((totalram_pages - hugetlb_total_pages()) | 142 | allowed = ((totalram_pages - hugetlb_total_pages()) |
143 | * sysctl_overcommit_ratio / 100) + total_swap_pages; | 143 | * sysctl_overcommit_ratio / 100) + total_swap_pages; |
144 | 144 | ||
145 | cached = get_page_cache_size() - total_swapcache_pages - i.bufferram; | 145 | cached = global_page_state(NR_FILE_PAGES) - |
146 | total_swapcache_pages - i.bufferram; | ||
146 | if (cached < 0) | 147 | if (cached < 0) |
147 | cached = 0; | 148 | cached = 0; |
148 | 149 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index eb42c1277023..08be91e6cecf 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -49,7 +49,7 @@ struct zone_padding { | |||
49 | enum zone_stat_item { | 49 | enum zone_stat_item { |
50 | NR_FILE_MAPPED, /* mapped into pagetables. | 50 | NR_FILE_MAPPED, /* mapped into pagetables. |
51 | only modified from process context */ | 51 | only modified from process context */ |
52 | 52 | NR_FILE_PAGES, | |
53 | NR_VM_ZONE_STAT_ITEMS }; | 53 | NR_VM_ZONE_STAT_ITEMS }; |
54 | 54 | ||
55 | struct per_cpu_pages { | 55 | struct per_cpu_pages { |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7141aa..0a2f5d27f60e 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |||
113 | extern void remove_from_page_cache(struct page *page); | 113 | extern void remove_from_page_cache(struct page *page); |
114 | extern void __remove_from_page_cache(struct page *page); | 114 | extern void __remove_from_page_cache(struct page *page); |
115 | 115 | ||
116 | extern atomic_t nr_pagecache; | ||
117 | |||
118 | #ifdef CONFIG_SMP | ||
119 | |||
120 | #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) | ||
121 | DECLARE_PER_CPU(long, nr_pagecache_local); | ||
122 | |||
123 | /* | ||
124 | * pagecache_acct implements approximate accounting for pagecache. | ||
125 | * vm_enough_memory() do not need high accuracy. Writers will keep | ||
126 | * an offset in their per-cpu arena and will spill that into the | ||
127 | * global count whenever the absolute value of the local count | ||
128 | * exceeds the counter's threshold. | ||
129 | * | ||
130 | * MUST be protected from preemption. | ||
131 | * current protection is mapping->page_lock. | ||
132 | */ | ||
133 | static inline void pagecache_acct(int count) | ||
134 | { | ||
135 | long *local; | ||
136 | |||
137 | local = &__get_cpu_var(nr_pagecache_local); | ||
138 | *local += count; | ||
139 | if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { | ||
140 | atomic_add(*local, &nr_pagecache); | ||
141 | *local = 0; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | #else | ||
146 | |||
147 | static inline void pagecache_acct(int count) | ||
148 | { | ||
149 | atomic_add(count, &nr_pagecache); | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | static inline unsigned long get_page_cache_size(void) | ||
154 | { | ||
155 | int ret = atomic_read(&nr_pagecache); | ||
156 | if (unlikely(ret < 0)) | ||
157 | ret = 0; | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /* | 116 | /* |
162 | * Return byte-offset into filesystem object for page. | 117 | * Return byte-offset into filesystem object for page. |
163 | */ | 118 | */ |
diff --git a/mm/filemap.c b/mm/filemap.c index 648f2c0c8e18..87d62c44c3f0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -120,7 +120,7 @@ void __remove_from_page_cache(struct page *page) | |||
120 | radix_tree_delete(&mapping->page_tree, page->index); | 120 | radix_tree_delete(&mapping->page_tree, page->index); |
121 | page->mapping = NULL; | 121 | page->mapping = NULL; |
122 | mapping->nrpages--; | 122 | mapping->nrpages--; |
123 | pagecache_acct(-1); | 123 | __dec_zone_page_state(page, NR_FILE_PAGES); |
124 | } | 124 | } |
125 | 125 | ||
126 | void remove_from_page_cache(struct page *page) | 126 | void remove_from_page_cache(struct page *page) |
@@ -449,7 +449,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, | |||
449 | page->mapping = mapping; | 449 | page->mapping = mapping; |
450 | page->index = offset; | 450 | page->index = offset; |
451 | mapping->nrpages++; | 451 | mapping->nrpages++; |
452 | pagecache_acct(1); | 452 | __inc_zone_page_state(page, NR_FILE_PAGES); |
453 | } | 453 | } |
454 | write_unlock_irq(&mapping->tree_lock); | 454 | write_unlock_irq(&mapping->tree_lock); |
455 | radix_tree_preload_end(); | 455 | radix_tree_preload_end(); |
@@ -96,7 +96,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
96 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 96 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
97 | unsigned long n; | 97 | unsigned long n; |
98 | 98 | ||
99 | free = get_page_cache_size(); | 99 | free = global_page_state(NR_FILE_PAGES); |
100 | free += nr_swap_pages; | 100 | free += nr_swap_pages; |
101 | 101 | ||
102 | /* | 102 | /* |
diff --git a/mm/nommu.c b/mm/nommu.c index 029fadac0fb5..5151c44a8257 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1122,7 +1122,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
1122 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 1122 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
1123 | unsigned long n; | 1123 | unsigned long n; |
1124 | 1124 | ||
1125 | free = get_page_cache_size(); | 1125 | free = global_page_state(NR_FILE_PAGES); |
1126 | free += nr_swap_pages; | 1126 | free += nr_swap_pages; |
1127 | 1127 | ||
1128 | /* | 1128 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 04dd2b01b2b7..8350720f98a8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2124,16 +2124,11 @@ static int page_alloc_cpu_notify(struct notifier_block *self, | |||
2124 | unsigned long action, void *hcpu) | 2124 | unsigned long action, void *hcpu) |
2125 | { | 2125 | { |
2126 | int cpu = (unsigned long)hcpu; | 2126 | int cpu = (unsigned long)hcpu; |
2127 | long *count; | ||
2128 | unsigned long *src, *dest; | 2127 | unsigned long *src, *dest; |
2129 | 2128 | ||
2130 | if (action == CPU_DEAD) { | 2129 | if (action == CPU_DEAD) { |
2131 | int i; | 2130 | int i; |
2132 | 2131 | ||
2133 | /* Drain local pagecache count. */ | ||
2134 | count = &per_cpu(nr_pagecache_local, cpu); | ||
2135 | atomic_add(*count, &nr_pagecache); | ||
2136 | *count = 0; | ||
2137 | local_irq_disable(); | 2132 | local_irq_disable(); |
2138 | __drain_pages(cpu); | 2133 | __drain_pages(cpu); |
2139 | 2134 | ||
diff --git a/mm/swap_state.c b/mm/swap_state.c index 7535211bb495..fccbd9bba77b 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -87,7 +87,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, | |||
87 | SetPageSwapCache(page); | 87 | SetPageSwapCache(page); |
88 | set_page_private(page, entry.val); | 88 | set_page_private(page, entry.val); |
89 | total_swapcache_pages++; | 89 | total_swapcache_pages++; |
90 | pagecache_acct(1); | 90 | __inc_zone_page_state(page, NR_FILE_PAGES); |
91 | } | 91 | } |
92 | write_unlock_irq(&swapper_space.tree_lock); | 92 | write_unlock_irq(&swapper_space.tree_lock); |
93 | radix_tree_preload_end(); | 93 | radix_tree_preload_end(); |
@@ -132,7 +132,7 @@ void __delete_from_swap_cache(struct page *page) | |||
132 | set_page_private(page, 0); | 132 | set_page_private(page, 0); |
133 | ClearPageSwapCache(page); | 133 | ClearPageSwapCache(page); |
134 | total_swapcache_pages--; | 134 | total_swapcache_pages--; |
135 | pagecache_acct(-1); | 135 | __dec_zone_page_state(page, NR_FILE_PAGES); |
136 | INC_CACHE_INFO(del_total); | 136 | INC_CACHE_INFO(del_total); |
137 | } | 137 | } |
138 | 138 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 4800091c129a..f16b33eb6d5c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -20,12 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | DEFINE_PER_CPU(struct page_state, page_states) = {0}; | 21 | DEFINE_PER_CPU(struct page_state, page_states) = {0}; |
22 | 22 | ||
23 | atomic_t nr_pagecache = ATOMIC_INIT(0); | ||
24 | EXPORT_SYMBOL(nr_pagecache); | ||
25 | #ifdef CONFIG_SMP | ||
26 | DEFINE_PER_CPU(long, nr_pagecache_local) = 0; | ||
27 | #endif | ||
28 | |||
29 | static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) | 23 | static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) |
30 | { | 24 | { |
31 | unsigned cpu; | 25 | unsigned cpu; |
@@ -402,6 +396,7 @@ struct seq_operations fragmentation_op = { | |||
402 | static char *vmstat_text[] = { | 396 | static char *vmstat_text[] = { |
403 | /* Zoned VM counters */ | 397 | /* Zoned VM counters */ |
404 | "nr_mapped", | 398 | "nr_mapped", |
399 | "nr_file_pages", | ||
405 | 400 | ||
406 | /* Page state */ | 401 | /* Page state */ |
407 | "nr_dirty", | 402 | "nr_dirty", |