diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-13 01:43:25 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-13 01:43:25 -0500 |
commit | d9bc125caf592b7d081021f32ce5b717efdf70c8 (patch) | |
tree | 263b7066ba22ddce21db610c0300f6eaac6f2064 /include/linux/vmstat.h | |
parent | 43d78ef2ba5bec26d0315859e8324bfc0be23766 (diff) | |
parent | ec2f9d1331f658433411c58077871e1eef4ee1b4 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts:
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_spkm3_token.c
net/sunrpc/clnt.c
Merge with mainline and fix conflicts.
Diffstat (limited to 'include/linux/vmstat.h')
-rw-r--r-- | include/linux/vmstat.h | 72 |
1 files changed, 51 insertions, 21 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 5e9803ed17fc..acb1f105870c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -3,20 +3,15 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/percpu.h> | 5 | #include <linux/percpu.h> |
6 | #include <linux/mm.h> | ||
6 | #include <linux/mmzone.h> | 7 | #include <linux/mmzone.h> |
7 | #include <asm/atomic.h> | 8 | #include <asm/atomic.h> |
8 | 9 | ||
9 | #ifdef CONFIG_VM_EVENT_COUNTERS | 10 | #ifdef CONFIG_ZONE_DMA |
10 | /* | 11 | #define DMA_ZONE(xx) xx##_DMA, |
11 | * Light weight per cpu counter implementation. | 12 | #else |
12 | * | 13 | #define DMA_ZONE(xx) |
13 | * Counters should only be incremented. You need to set EMBEDDED | 14 | #endif |
14 | * to disable VM_EVENT_COUNTERS. Things like procps (vmstat, | ||
15 | * top, etc) use /proc/vmstat and depend on these counters. | ||
16 | * | ||
17 | * Counters are handled completely inline. On many platforms the code | ||
18 | * generated will simply be the increment of a global address. | ||
19 | */ | ||
20 | 15 | ||
21 | #ifdef CONFIG_ZONE_DMA32 | 16 | #ifdef CONFIG_ZONE_DMA32 |
22 | #define DMA32_ZONE(xx) xx##_DMA32, | 17 | #define DMA32_ZONE(xx) xx##_DMA32, |
@@ -30,7 +25,7 @@ | |||
30 | #define HIGHMEM_ZONE(xx) | 25 | #define HIGHMEM_ZONE(xx) |
31 | #endif | 26 | #endif |
32 | 27 | ||
33 | #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) | 28 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) |
34 | 29 | ||
35 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | 30 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
36 | FOR_ALL_ZONES(PGALLOC), | 31 | FOR_ALL_ZONES(PGALLOC), |
@@ -45,6 +40,17 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
45 | NR_VM_EVENT_ITEMS | 40 | NR_VM_EVENT_ITEMS |
46 | }; | 41 | }; |
47 | 42 | ||
43 | #ifdef CONFIG_VM_EVENT_COUNTERS | ||
44 | /* | ||
45 | * Light weight per cpu counter implementation. | ||
46 | * | ||
47 | * Counters should only be incremented and no critical kernel component | ||
48 | * should rely on the counter values. | ||
49 | * | ||
50 | * Counters are handled completely inline. On many platforms the code | ||
51 | * generated will simply be the increment of a global address. | ||
52 | */ | ||
53 | |||
48 | struct vm_event_state { | 54 | struct vm_event_state { |
49 | unsigned long event[NR_VM_EVENT_ITEMS]; | 55 | unsigned long event[NR_VM_EVENT_ITEMS]; |
50 | }; | 56 | }; |
@@ -85,17 +91,30 @@ static inline void vm_events_fold_cpu(int cpu) | |||
85 | #else | 91 | #else |
86 | 92 | ||
87 | /* Disable counters */ | 93 | /* Disable counters */ |
88 | #define get_cpu_vm_events(e) 0L | 94 | static inline void count_vm_event(enum vm_event_item item) |
89 | #define count_vm_event(e) do { } while (0) | 95 | { |
90 | #define count_vm_events(e,d) do { } while (0) | 96 | } |
91 | #define __count_vm_event(e) do { } while (0) | 97 | static inline void count_vm_events(enum vm_event_item item, long delta) |
92 | #define __count_vm_events(e,d) do { } while (0) | 98 | { |
93 | #define vm_events_fold_cpu(x) do { } while (0) | 99 | } |
100 | static inline void __count_vm_event(enum vm_event_item item) | ||
101 | { | ||
102 | } | ||
103 | static inline void __count_vm_events(enum vm_event_item item, long delta) | ||
104 | { | ||
105 | } | ||
106 | static inline void all_vm_events(unsigned long *ret) | ||
107 | { | ||
108 | } | ||
109 | static inline void vm_events_fold_cpu(int cpu) | ||
110 | { | ||
111 | } | ||
94 | 112 | ||
95 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | 113 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
96 | 114 | ||
97 | #define __count_zone_vm_events(item, zone, delta) \ | 115 | #define __count_zone_vm_events(item, zone, delta) \ |
98 | __count_vm_events(item##_DMA + zone_idx(zone), delta) | 116 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
117 | zone_idx(zone), delta) | ||
99 | 118 | ||
100 | /* | 119 | /* |
101 | * Zone based page accounting with per cpu differentials. | 120 | * Zone based page accounting with per cpu differentials. |
@@ -142,14 +161,16 @@ static inline unsigned long node_page_state(int node, | |||
142 | struct zone *zones = NODE_DATA(node)->node_zones; | 161 | struct zone *zones = NODE_DATA(node)->node_zones; |
143 | 162 | ||
144 | return | 163 | return |
164 | #ifdef CONFIG_ZONE_DMA | ||
165 | zone_page_state(&zones[ZONE_DMA], item) + | ||
166 | #endif | ||
145 | #ifdef CONFIG_ZONE_DMA32 | 167 | #ifdef CONFIG_ZONE_DMA32 |
146 | zone_page_state(&zones[ZONE_DMA32], item) + | 168 | zone_page_state(&zones[ZONE_DMA32], item) + |
147 | #endif | 169 | #endif |
148 | zone_page_state(&zones[ZONE_NORMAL], item) + | ||
149 | #ifdef CONFIG_HIGHMEM | 170 | #ifdef CONFIG_HIGHMEM |
150 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | 171 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
151 | #endif | 172 | #endif |
152 | zone_page_state(&zones[ZONE_DMA], item); | 173 | zone_page_state(&zones[ZONE_NORMAL], item); |
153 | } | 174 | } |
154 | 175 | ||
155 | extern void zone_statistics(struct zonelist *, struct zone *); | 176 | extern void zone_statistics(struct zonelist *, struct zone *); |
@@ -186,6 +207,9 @@ void inc_zone_page_state(struct page *, enum zone_stat_item); | |||
186 | void dec_zone_page_state(struct page *, enum zone_stat_item); | 207 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
187 | 208 | ||
188 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | 209 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
210 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); | ||
211 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | ||
212 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | ||
189 | 213 | ||
190 | void refresh_cpu_vm_stats(int); | 214 | void refresh_cpu_vm_stats(int); |
191 | void refresh_vm_stats(void); | 215 | void refresh_vm_stats(void); |
@@ -214,6 +238,12 @@ static inline void __inc_zone_page_state(struct page *page, | |||
214 | __inc_zone_state(page_zone(page), item); | 238 | __inc_zone_state(page_zone(page), item); |
215 | } | 239 | } |
216 | 240 | ||
241 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | ||
242 | { | ||
243 | atomic_long_dec(&zone->vm_stat[item]); | ||
244 | atomic_long_dec(&vm_stat[item]); | ||
245 | } | ||
246 | |||
217 | static inline void __dec_zone_page_state(struct page *page, | 247 | static inline void __dec_zone_page_state(struct page *page, |
218 | enum zone_stat_item item) | 248 | enum zone_stat_item item) |
219 | { | 249 | { |