aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/vmstat.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/vmstat.h')
-rw-r--r--include/linux/vmstat.h170
1 files changed, 66 insertions, 104 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 16173b63ee67..3e0daf54133e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -7,115 +7,77 @@
7#include <linux/mmzone.h> 7#include <linux/mmzone.h>
8#include <asm/atomic.h> 8#include <asm/atomic.h>
9 9
10#ifdef CONFIG_VM_EVENT_COUNTERS
10/* 11/*
11 * Global page accounting. One instance per CPU. Only unsigned longs are 12 * Light weight per cpu counter implementation.
12 * allowed.
13 * 13 *
14 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at 14 * Counters should only be incremented and no critical kernel component
15 * any time safely (which protects the instance from modification by 15 * should rely on the counter values.
16 * interrupt. 16 *
17 * - The __xxx_page_state variants can be used safely when interrupts are 17 * Counters are handled completely inline. On many platforms the code
18 * disabled. 18 * generated will simply be the increment of a global address.
19 * - The __xxx_page_state variants can be used if the field is only
20 * modified from process context and protected from preemption, or only
21 * modified from interrupt context. In this case, the field should be
22 * commented here.
23 */ 19 */
24struct page_state { 20
25 unsigned long pgpgin; /* Disk reads */ 21#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
26 unsigned long pgpgout; /* Disk writes */ 22
27 unsigned long pswpin; /* swap reads */ 23enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
28 unsigned long pswpout; /* swap writes */ 24 FOR_ALL_ZONES(PGALLOC),
29 25 PGFREE, PGACTIVATE, PGDEACTIVATE,
30 unsigned long pgalloc_high; /* page allocations */ 26 PGFAULT, PGMAJFAULT,
31 unsigned long pgalloc_normal; 27 FOR_ALL_ZONES(PGREFILL),
32 unsigned long pgalloc_dma32; 28 FOR_ALL_ZONES(PGSTEAL),
33 unsigned long pgalloc_dma; 29 FOR_ALL_ZONES(PGSCAN_KSWAPD),
34 30 FOR_ALL_ZONES(PGSCAN_DIRECT),
35 unsigned long pgfree; /* page freeings */ 31 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
36 unsigned long pgactivate; /* pages moved inactive->active */ 32 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
37 unsigned long pgdeactivate; /* pages moved active->inactive */ 33 NR_VM_EVENT_ITEMS
38 34};
39 unsigned long pgfault; /* faults (major+minor) */ 35
40 unsigned long pgmajfault; /* faults (major only) */ 36struct vm_event_state {
41 37 unsigned long event[NR_VM_EVENT_ITEMS];
42 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
43 unsigned long pgrefill_normal;
44 unsigned long pgrefill_dma32;
45 unsigned long pgrefill_dma;
46
47 unsigned long pgsteal_high; /* total highmem pages reclaimed */
48 unsigned long pgsteal_normal;
49 unsigned long pgsteal_dma32;
50 unsigned long pgsteal_dma;
51
52 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
53 unsigned long pgscan_kswapd_normal;
54 unsigned long pgscan_kswapd_dma32;
55 unsigned long pgscan_kswapd_dma;
56
57 unsigned long pgscan_direct_high;/* total highmem pages scanned */
58 unsigned long pgscan_direct_normal;
59 unsigned long pgscan_direct_dma32;
60 unsigned long pgscan_direct_dma;
61
62 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
63 unsigned long slabs_scanned; /* slab objects scanned */
64 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
65 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
66 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
67 unsigned long allocstall; /* direct reclaim calls */
68
69 unsigned long pgrotated; /* pages rotated to tail of the LRU */
70}; 38};
71 39
72extern void get_full_page_state(struct page_state *ret); 40DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
73extern void mod_page_state_offset(unsigned long offset, unsigned long delta); 41
74extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); 42static inline void __count_vm_event(enum vm_event_item item)
75 43{
76#define mod_page_state(member, delta) \ 44 __get_cpu_var(vm_event_states.event[item])++;
77 mod_page_state_offset(offsetof(struct page_state, member), (delta)) 45}
78 46
79#define __mod_page_state(member, delta) \ 47static inline void count_vm_event(enum vm_event_item item)
80 __mod_page_state_offset(offsetof(struct page_state, member), (delta)) 48{
81 49 get_cpu_var(vm_event_states.event[item])++;
82#define inc_page_state(member) mod_page_state(member, 1UL) 50 put_cpu();
83#define dec_page_state(member) mod_page_state(member, 0UL - 1) 51}
84#define add_page_state(member,delta) mod_page_state(member, (delta)) 52
85#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) 53static inline void __count_vm_events(enum vm_event_item item, long delta)
86 54{
87#define __inc_page_state(member) __mod_page_state(member, 1UL) 55 __get_cpu_var(vm_event_states.event[item]) += delta;
88#define __dec_page_state(member) __mod_page_state(member, 0UL - 1) 56}
89#define __add_page_state(member,delta) __mod_page_state(member, (delta)) 57
90#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) 58static inline void count_vm_events(enum vm_event_item item, long delta)
91 59{
92#define page_state(member) (*__page_state(offsetof(struct page_state, member))) 60 get_cpu_var(vm_event_states.event[item])++;
93 61 put_cpu();
94#define state_zone_offset(zone, member) \ 62}
95({ \ 63
96 unsigned offset; \ 64extern void all_vm_events(unsigned long *);
97 if (is_highmem(zone)) \ 65extern void vm_events_fold_cpu(int cpu);
98 offset = offsetof(struct page_state, member##_high); \ 66
99 else if (is_normal(zone)) \ 67#else
100 offset = offsetof(struct page_state, member##_normal); \ 68
101 else if (is_dma32(zone)) \ 69/* Disable counters */
102 offset = offsetof(struct page_state, member##_dma32); \ 70#define get_cpu_vm_events(e) 0L
103 else \ 71#define count_vm_event(e) do { } while (0)
104 offset = offsetof(struct page_state, member##_dma); \ 72#define count_vm_events(e,d) do { } while (0)
105 offset; \ 73#define __count_vm_event(e) do { } while (0)
106}) 74#define __count_vm_events(e,d) do { } while (0)
107 75#define vm_events_fold_cpu(x) do { } while (0)
108#define __mod_page_state_zone(zone, member, delta) \ 76
109 do { \ 77#endif /* CONFIG_VM_EVENT_COUNTERS */
110 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ 78
111 } while (0) 79#define __count_zone_vm_events(item, zone, delta) \
112 80 __count_vm_events(item##_DMA + zone_idx(zone), delta)
113#define mod_page_state_zone(zone, member, delta) \
114 do { \
115 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
116 } while (0)
117
118DECLARE_PER_CPU(struct page_state, page_states);
119 81
120/* 82/*
121 * Zone based page accounting with per cpu differentials. 83 * Zone based page accounting with per cpu differentials.