aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:34 -0400
commit2244b95a7bcf8d24196f8a3a44187ba5dfff754c (patch)
tree771ef8eae45c2794fd73f870109c74d67c28888a /include/linux
parentf6ac2354d791195ca40822b84d73d48a4e8b7f2b (diff)
[PATCH] zoned vm counters: basic ZVC (zoned vm counter) implementation
Per zone counter infrastructure The counters that we currently have for the VM are split per processor. The processor however has not much to do with the zone these pages belong to. We cannot tell f.e. how many ZONE_DMA pages are dirty. So we are blind to potentially inbalances in the usage of memory in various zones. F.e. in a NUMA system we cannot tell how many pages are dirty on a particular node. If we knew then we could put measures into the VM to balance the use of memory between different zones and different nodes in a NUMA system. For example it would be possible to limit the dirty pages per node so that fast local memory is kept available even if a process is dirtying huge amounts of pages. Another example is zone reclaim. We do not know how many unmapped pages exist per zone. So we just have to try to reclaim. If it is not working then we pause and try again later. It would be better if we knew when it makes sense to reclaim unmapped pages from a zone. This patchset allows the determination of the number of unmapped pages per zone. We can remove the zone reclaim interval with the counters introduced here. Futhermore the ability to have various usage statistics available will allow the development of new NUMA balancing algorithms that may be able to improve the decision making in the scheduler of when to move a process to another node and hopefully will also enable automatic page migration through a user space program that can analyse the memory load distribution and then rebalance memory use in order to increase performance. The counter framework here implements differential counters for each processor in struct zone. The differential counters are consolidated when a threshold is exceeded (like done in the current implementation for nr_pageache), when slab reaping occurs or when a consolidation function is called. Consolidation uses atomic operations and accumulates counters per zone in the zone structure and also globally in the vm_stat array. VM functions can access the counts by simply indexing a global or zone specific array. The arrangement of counters in an array also simplifies processing when output has to be generated for /proc/*. Counters can be updated by calling inc/dec_zone_page_state or _inc/dec_zone_page_state analogous to *_page_state. The second group of functions can be called if it is known that interrupts are disabled. Special optimized increment and decrement functions are provided. These can avoid certain checks and use increment or decrement instructions that an architecture may provide. We also add a new CONFIG_DMA_IS_NORMAL that signifies that an architecture can do DMA to all memory and therefore ZONE_NORMAL will not be populated. This is only currently set for IA64 SGI SN2 and currently only affects node_page_state(). In the best case node_page_state can be reduced to retrieving a single counter for the one zone on the node. [akpm@osdl.org: cleanups] [akpm@osdl.org: export vm_stat[] for filesystems] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/vmstat.h129
2 files changed, 137 insertions, 1 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d6120fa69116..543f9e411563 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -46,6 +46,9 @@ struct zone_padding {
46#define ZONE_PADDING(name) 46#define ZONE_PADDING(name)
47#endif 47#endif
48 48
49enum zone_stat_item {
50 NR_VM_ZONE_STAT_ITEMS };
51
49struct per_cpu_pages { 52struct per_cpu_pages {
50 int count; /* number of pages in the list */ 53 int count; /* number of pages in the list */
51 int high; /* high watermark, emptying needed */ 54 int high; /* high watermark, emptying needed */
@@ -55,6 +58,10 @@ struct per_cpu_pages {
55 58
56struct per_cpu_pageset { 59struct per_cpu_pageset {
57 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 60 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
61#ifdef CONFIG_SMP
62 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
63#endif
64
58#ifdef CONFIG_NUMA 65#ifdef CONFIG_NUMA
59 unsigned long numa_hit; /* allocated in intended node */ 66 unsigned long numa_hit; /* allocated in intended node */
60 unsigned long numa_miss; /* allocated in non intended node */ 67 unsigned long numa_miss; /* allocated in non intended node */
@@ -165,6 +172,8 @@ struct zone {
165 /* A count of how many reclaimers are scanning this zone */ 172 /* A count of how many reclaimers are scanning this zone */
166 atomic_t reclaim_in_progress; 173 atomic_t reclaim_in_progress;
167 174
175 /* Zone statistics */
176 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
168 /* 177 /*
169 * timestamp (in jiffies) of the last zone reclaim that did not 178 * timestamp (in jiffies) of the last zone reclaim that did not
170 * result in freeing of pages. This is used to avoid repeated scans 179 * result in freeing of pages. This is used to avoid repeated scans
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3ca0c1989fc2..3fd5c11e544a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -3,6 +3,9 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/config.h>
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
6 9
7/* 10/*
8 * Global page accounting. One instance per CPU. Only unsigned longs are 11 * Global page accounting. One instance per CPU. Only unsigned longs are
@@ -134,5 +137,129 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
134 137
135DECLARE_PER_CPU(struct page_state, page_states); 138DECLARE_PER_CPU(struct page_state, page_states);
136 139
137#endif /* _LINUX_VMSTAT_H */ 140/*
141 * Zone based page accounting with per cpu differentials.
142 */
143extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
144
145static inline void zone_page_state_add(long x, struct zone *zone,
146 enum zone_stat_item item)
147{
148 atomic_long_add(x, &zone->vm_stat[item]);
149 atomic_long_add(x, &vm_stat[item]);
150}
151
152static inline unsigned long global_page_state(enum zone_stat_item item)
153{
154 long x = atomic_long_read(&vm_stat[item]);
155#ifdef CONFIG_SMP
156 if (x < 0)
157 x = 0;
158#endif
159 return x;
160}
161
162static inline unsigned long zone_page_state(struct zone *zone,
163 enum zone_stat_item item)
164{
165 long x = atomic_long_read(&zone->vm_stat[item]);
166#ifdef CONFIG_SMP
167 if (x < 0)
168 x = 0;
169#endif
170 return x;
171}
172
173#ifdef CONFIG_NUMA
174/*
175 * Determine the per node value of a stat item. This function
176 * is called frequently in a NUMA machine, so try to be as
177 * frugal as possible.
178 */
179static inline unsigned long node_page_state(int node,
180 enum zone_stat_item item)
181{
182 struct zone *zones = NODE_DATA(node)->node_zones;
183
184 return
185#ifndef CONFIG_DMA_IS_NORMAL
186#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
187 zone_page_state(&zones[ZONE_DMA32], item) +
188#endif
189 zone_page_state(&zones[ZONE_NORMAL], item) +
190#endif
191#ifdef CONFIG_HIGHMEM
192 zone_page_state(&zones[ZONE_HIGHMEM], item) +
193#endif
194 zone_page_state(&zones[ZONE_DMA], item);
195}
196#else
197#define node_page_state(node, item) global_page_state(item)
198#endif
199
200#define __add_zone_page_state(__z, __i, __d) \
201 __mod_zone_page_state(__z, __i, __d)
202#define __sub_zone_page_state(__z, __i, __d) \
203 __mod_zone_page_state(__z, __i,-(__d))
204
205#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
206#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
207
208static inline void zap_zone_vm_stats(struct zone *zone)
209{
210 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
211}
212
213#ifdef CONFIG_SMP
214void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
215void __inc_zone_page_state(struct page *, enum zone_stat_item);
216void __dec_zone_page_state(struct page *, enum zone_stat_item);
138 217
218void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
219void inc_zone_page_state(struct page *, enum zone_stat_item);
220void dec_zone_page_state(struct page *, enum zone_stat_item);
221
222extern void inc_zone_state(struct zone *, enum zone_stat_item);
223
224void refresh_cpu_vm_stats(int);
225void refresh_vm_stats(void);
226
227#else /* CONFIG_SMP */
228
229/*
230 * We do not maintain differentials in a single processor configuration.
231 * The functions directly modify the zone and global counters.
232 */
233static inline void __mod_zone_page_state(struct zone *zone,
234 enum zone_stat_item item, int delta)
235{
236 zone_page_state_add(delta, zone, item);
237}
238
239static inline void __inc_zone_page_state(struct page *page,
240 enum zone_stat_item item)
241{
242 atomic_long_inc(&page_zone(page)->vm_stat[item]);
243 atomic_long_inc(&vm_stat[item]);
244}
245
246static inline void __dec_zone_page_state(struct page *page,
247 enum zone_stat_item item)
248{
249 atomic_long_dec(&page_zone(page)->vm_stat[item]);
250 atomic_long_dec(&vm_stat[item]);
251}
252
253/*
254 * We only use atomic operations to update counters. So there is no need to
255 * disable interrupts.
256 */
257#define inc_zone_page_state __inc_zone_page_state
258#define dec_zone_page_state __dec_zone_page_state
259#define mod_zone_page_state __mod_zone_page_state
260
261static inline void refresh_cpu_vm_stats(int cpu) { }
262static inline void refresh_vm_stats(void) { }
263#endif
264
265#endif /* _LINUX_VMSTAT_H */