diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 04:55:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:34 -0400 |
commit | 347ce434d57da80fd5809c0c836f206a50999c26 (patch) | |
tree | f730d151be77977f594e5cc083a93bbeb4c602cc /include/linux/pagemap.h | |
parent | 65ba55f500a37272985d071c9bbb35256a2f7c14 (diff) |
[PATCH] zoned vm counters: conversion of nr_pagecache to per zone counter
Currently a single atomic variable is used to establish the size of the page
cache in the whole machine. The zoned VM counters have the same method of
implementation as the nr_pagecache code but also allow the determination of
the pagecache size per zone.
Remove the special implementation for nr_pagecache and make it a zoned counter
named NR_FILE_PAGES.
Updates of the page cache counters are always performed with interrupts off.
We can therefore use the __ variant here.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 45 |
1 files changed, 0 insertions, 45 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7141aa..0a2f5d27f60e 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |||
113 | extern void remove_from_page_cache(struct page *page); | 113 | extern void remove_from_page_cache(struct page *page); |
114 | extern void __remove_from_page_cache(struct page *page); | 114 | extern void __remove_from_page_cache(struct page *page); |
115 | 115 | ||
116 | extern atomic_t nr_pagecache; | ||
117 | |||
118 | #ifdef CONFIG_SMP | ||
119 | |||
120 | #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) | ||
121 | DECLARE_PER_CPU(long, nr_pagecache_local); | ||
122 | |||
123 | /* | ||
124 | * pagecache_acct implements approximate accounting for pagecache. | ||
125 | * vm_enough_memory() do not need high accuracy. Writers will keep | ||
126 | * an offset in their per-cpu arena and will spill that into the | ||
127 | * global count whenever the absolute value of the local count | ||
128 | * exceeds the counter's threshold. | ||
129 | * | ||
130 | * MUST be protected from preemption. | ||
131 | * current protection is mapping->page_lock. | ||
132 | */ | ||
133 | static inline void pagecache_acct(int count) | ||
134 | { | ||
135 | long *local; | ||
136 | |||
137 | local = &__get_cpu_var(nr_pagecache_local); | ||
138 | *local += count; | ||
139 | if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { | ||
140 | atomic_add(*local, &nr_pagecache); | ||
141 | *local = 0; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | #else | ||
146 | |||
147 | static inline void pagecache_acct(int count) | ||
148 | { | ||
149 | atomic_add(count, &nr_pagecache); | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | static inline unsigned long get_page_cache_size(void) | ||
154 | { | ||
155 | int ret = atomic_read(&nr_pagecache); | ||
156 | if (unlikely(ret < 0)) | ||
157 | ret = 0; | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /* | 116 | /* |
162 | * Return byte-offset into filesystem object for page. | 117 | * Return byte-offset into filesystem object for page. |
163 | */ | 118 | */ |