aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/vmstat.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/vmstat.h')
-rw-r--r--include/linux/vmstat.h129
1 files changed, 128 insertions, 1 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3ca0c1989fc2..3fd5c11e544a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -3,6 +3,9 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/config.h>
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
6 9
7/* 10/*
8 * Global page accounting. One instance per CPU. Only unsigned longs are 11 * Global page accounting. One instance per CPU. Only unsigned longs are
@@ -134,5 +137,129 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
134 137
135DECLARE_PER_CPU(struct page_state, page_states); 138DECLARE_PER_CPU(struct page_state, page_states);
136 139
137#endif /* _LINUX_VMSTAT_H */ 140/*
141 * Zone based page accounting with per cpu differentials.
142 */
143extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
144
145static inline void zone_page_state_add(long x, struct zone *zone,
146 enum zone_stat_item item)
147{
148 atomic_long_add(x, &zone->vm_stat[item]);
149 atomic_long_add(x, &vm_stat[item]);
150}
151
152static inline unsigned long global_page_state(enum zone_stat_item item)
153{
154 long x = atomic_long_read(&vm_stat[item]);
155#ifdef CONFIG_SMP
156 if (x < 0)
157 x = 0;
158#endif
159 return x;
160}
161
162static inline unsigned long zone_page_state(struct zone *zone,
163 enum zone_stat_item item)
164{
165 long x = atomic_long_read(&zone->vm_stat[item]);
166#ifdef CONFIG_SMP
167 if (x < 0)
168 x = 0;
169#endif
170 return x;
171}
172
173#ifdef CONFIG_NUMA
174/*
175 * Determine the per node value of a stat item. This function
176 * is called frequently in a NUMA machine, so try to be as
177 * frugal as possible.
178 */
179static inline unsigned long node_page_state(int node,
180 enum zone_stat_item item)
181{
182 struct zone *zones = NODE_DATA(node)->node_zones;
183
184 return
185#ifndef CONFIG_DMA_IS_NORMAL
186#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
187 zone_page_state(&zones[ZONE_DMA32], item) +
188#endif
189 zone_page_state(&zones[ZONE_NORMAL], item) +
190#endif
191#ifdef CONFIG_HIGHMEM
192 zone_page_state(&zones[ZONE_HIGHMEM], item) +
193#endif
194 zone_page_state(&zones[ZONE_DMA], item);
195}
196#else
197#define node_page_state(node, item) global_page_state(item)
198#endif
199
200#define __add_zone_page_state(__z, __i, __d) \
201 __mod_zone_page_state(__z, __i, __d)
202#define __sub_zone_page_state(__z, __i, __d) \
203 __mod_zone_page_state(__z, __i,-(__d))
204
205#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
206#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
207
208static inline void zap_zone_vm_stats(struct zone *zone)
209{
210 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
211}
212
213#ifdef CONFIG_SMP
214void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
215void __inc_zone_page_state(struct page *, enum zone_stat_item);
216void __dec_zone_page_state(struct page *, enum zone_stat_item);
138 217
218void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
219void inc_zone_page_state(struct page *, enum zone_stat_item);
220void dec_zone_page_state(struct page *, enum zone_stat_item);
221
222extern void inc_zone_state(struct zone *, enum zone_stat_item);
223
224void refresh_cpu_vm_stats(int);
225void refresh_vm_stats(void);
226
227#else /* CONFIG_SMP */
228
229/*
230 * We do not maintain differentials in a single processor configuration.
231 * The functions directly modify the zone and global counters.
232 */
233static inline void __mod_zone_page_state(struct zone *zone,
234 enum zone_stat_item item, int delta)
235{
236 zone_page_state_add(delta, zone, item);
237}
238
239static inline void __inc_zone_page_state(struct page *page,
240 enum zone_stat_item item)
241{
242 atomic_long_inc(&page_zone(page)->vm_stat[item]);
243 atomic_long_inc(&vm_stat[item]);
244}
245
246static inline void __dec_zone_page_state(struct page *page,
247 enum zone_stat_item item)
248{
249 atomic_long_dec(&page_zone(page)->vm_stat[item]);
250 atomic_long_dec(&vm_stat[item]);
251}
252
253/*
254 * We only use atomic operations to update counters. So there is no need to
255 * disable interrupts.
256 */
257#define inc_zone_page_state __inc_zone_page_state
258#define dec_zone_page_state __dec_zone_page_state
259#define mod_zone_page_state __mod_zone_page_state
260
261static inline void refresh_cpu_vm_stats(int cpu) { }
262static inline void refresh_vm_stats(void) { }
263#endif
264
265#endif /* _LINUX_VMSTAT_H */