diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/mmzone.h | 9 | ||||
-rw-r--r-- | include/linux/vmstat.h | 129 |
2 files changed, 137 insertions, 1 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d6120fa69116..543f9e411563 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -46,6 +46,9 @@ struct zone_padding { | |||
46 | #define ZONE_PADDING(name) | 46 | #define ZONE_PADDING(name) |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | enum zone_stat_item { | ||
50 | NR_VM_ZONE_STAT_ITEMS }; | ||
51 | |||
49 | struct per_cpu_pages { | 52 | struct per_cpu_pages { |
50 | int count; /* number of pages in the list */ | 53 | int count; /* number of pages in the list */ |
51 | int high; /* high watermark, emptying needed */ | 54 | int high; /* high watermark, emptying needed */ |
@@ -55,6 +58,10 @@ struct per_cpu_pages { | |||
55 | 58 | ||
56 | struct per_cpu_pageset { | 59 | struct per_cpu_pageset { |
57 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ | 60 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ |
61 | #ifdef CONFIG_SMP | ||
62 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | ||
63 | #endif | ||
64 | |||
58 | #ifdef CONFIG_NUMA | 65 | #ifdef CONFIG_NUMA |
59 | unsigned long numa_hit; /* allocated in intended node */ | 66 | unsigned long numa_hit; /* allocated in intended node */ |
60 | unsigned long numa_miss; /* allocated in non intended node */ | 67 | unsigned long numa_miss; /* allocated in non intended node */ |
@@ -165,6 +172,8 @@ struct zone { | |||
165 | /* A count of how many reclaimers are scanning this zone */ | 172 | /* A count of how many reclaimers are scanning this zone */ |
166 | atomic_t reclaim_in_progress; | 173 | atomic_t reclaim_in_progress; |
167 | 174 | ||
175 | /* Zone statistics */ | ||
176 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | ||
168 | /* | 177 | /* |
169 | * timestamp (in jiffies) of the last zone reclaim that did not | 178 | * timestamp (in jiffies) of the last zone reclaim that did not |
170 | * result in freeing of pages. This is used to avoid repeated scans | 179 | * result in freeing of pages. This is used to avoid repeated scans |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3ca0c1989fc2..3fd5c11e544a 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -3,6 +3,9 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/percpu.h> | 5 | #include <linux/percpu.h> |
6 | #include <linux/config.h> | ||
7 | #include <linux/mmzone.h> | ||
8 | #include <asm/atomic.h> | ||
6 | 9 | ||
7 | /* | 10 | /* |
8 | * Global page accounting. One instance per CPU. Only unsigned longs are | 11 | * Global page accounting. One instance per CPU. Only unsigned longs are |
@@ -134,5 +137,129 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); | |||
134 | 137 | ||
135 | DECLARE_PER_CPU(struct page_state, page_states); | 138 | DECLARE_PER_CPU(struct page_state, page_states); |
136 | 139 | ||
137 | #endif /* _LINUX_VMSTAT_H */ | 140 | /* |
141 | * Zone based page accounting with per cpu differentials. | ||
142 | */ | ||
143 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | ||
144 | |||
145 | static inline void zone_page_state_add(long x, struct zone *zone, | ||
146 | enum zone_stat_item item) | ||
147 | { | ||
148 | atomic_long_add(x, &zone->vm_stat[item]); | ||
149 | atomic_long_add(x, &vm_stat[item]); | ||
150 | } | ||
151 | |||
152 | static inline unsigned long global_page_state(enum zone_stat_item item) | ||
153 | { | ||
154 | long x = atomic_long_read(&vm_stat[item]); | ||
155 | #ifdef CONFIG_SMP | ||
156 | if (x < 0) | ||
157 | x = 0; | ||
158 | #endif | ||
159 | return x; | ||
160 | } | ||
161 | |||
162 | static inline unsigned long zone_page_state(struct zone *zone, | ||
163 | enum zone_stat_item item) | ||
164 | { | ||
165 | long x = atomic_long_read(&zone->vm_stat[item]); | ||
166 | #ifdef CONFIG_SMP | ||
167 | if (x < 0) | ||
168 | x = 0; | ||
169 | #endif | ||
170 | return x; | ||
171 | } | ||
172 | |||
173 | #ifdef CONFIG_NUMA | ||
174 | /* | ||
175 | * Determine the per node value of a stat item. This function | ||
176 | * is called frequently in a NUMA machine, so try to be as | ||
177 | * frugal as possible. | ||
178 | */ | ||
179 | static inline unsigned long node_page_state(int node, | ||
180 | enum zone_stat_item item) | ||
181 | { | ||
182 | struct zone *zones = NODE_DATA(node)->node_zones; | ||
183 | |||
184 | return | ||
185 | #ifndef CONFIG_DMA_IS_NORMAL | ||
186 | #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64 | ||
187 | zone_page_state(&zones[ZONE_DMA32], item) + | ||
188 | #endif | ||
189 | zone_page_state(&zones[ZONE_NORMAL], item) + | ||
190 | #endif | ||
191 | #ifdef CONFIG_HIGHMEM | ||
192 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | ||
193 | #endif | ||
194 | zone_page_state(&zones[ZONE_DMA], item); | ||
195 | } | ||
196 | #else | ||
197 | #define node_page_state(node, item) global_page_state(item) | ||
198 | #endif | ||
199 | |||
200 | #define __add_zone_page_state(__z, __i, __d) \ | ||
201 | __mod_zone_page_state(__z, __i, __d) | ||
202 | #define __sub_zone_page_state(__z, __i, __d) \ | ||
203 | __mod_zone_page_state(__z, __i,-(__d)) | ||
204 | |||
205 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | ||
206 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | ||
207 | |||
208 | static inline void zap_zone_vm_stats(struct zone *zone) | ||
209 | { | ||
210 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | ||
211 | } | ||
212 | |||
213 | #ifdef CONFIG_SMP | ||
214 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | ||
215 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | ||
216 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | ||
138 | 217 | ||
218 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); | ||
219 | void inc_zone_page_state(struct page *, enum zone_stat_item); | ||
220 | void dec_zone_page_state(struct page *, enum zone_stat_item); | ||
221 | |||
222 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | ||
223 | |||
224 | void refresh_cpu_vm_stats(int); | ||
225 | void refresh_vm_stats(void); | ||
226 | |||
227 | #else /* CONFIG_SMP */ | ||
228 | |||
229 | /* | ||
230 | * We do not maintain differentials in a single processor configuration. | ||
231 | * The functions directly modify the zone and global counters. | ||
232 | */ | ||
233 | static inline void __mod_zone_page_state(struct zone *zone, | ||
234 | enum zone_stat_item item, int delta) | ||
235 | { | ||
236 | zone_page_state_add(delta, zone, item); | ||
237 | } | ||
238 | |||
239 | static inline void __inc_zone_page_state(struct page *page, | ||
240 | enum zone_stat_item item) | ||
241 | { | ||
242 | atomic_long_inc(&page_zone(page)->vm_stat[item]); | ||
243 | atomic_long_inc(&vm_stat[item]); | ||
244 | } | ||
245 | |||
246 | static inline void __dec_zone_page_state(struct page *page, | ||
247 | enum zone_stat_item item) | ||
248 | { | ||
249 | atomic_long_dec(&page_zone(page)->vm_stat[item]); | ||
250 | atomic_long_dec(&vm_stat[item]); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * We only use atomic operations to update counters. So there is no need to | ||
255 | * disable interrupts. | ||
256 | */ | ||
257 | #define inc_zone_page_state __inc_zone_page_state | ||
258 | #define dec_zone_page_state __dec_zone_page_state | ||
259 | #define mod_zone_page_state __mod_zone_page_state | ||
260 | |||
261 | static inline void refresh_cpu_vm_stats(int cpu) { } | ||
262 | static inline void refresh_vm_stats(void) { } | ||
263 | #endif | ||
264 | |||
265 | #endif /* _LINUX_VMSTAT_H */ | ||