aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:52 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:51 -0400
commit0ff38490c836dc379ff7ec45b10a15a662f4e5f6 (patch)
treecb42d5d3cace3c8d12f0b304879039c503807981 /include/linux/mmzone.h
parent972d1a7b140569084439a81265a0f15b74e924e0 (diff)
[PATCH] zone_reclaim: dynamic slab reclaim
Currently one can enable slab reclaim by setting an explicit option in /proc/sys/vm/zone_reclaim_mode. Slab reclaim is then used as a final option if the freeing of unmapped file backed pages is not enough to free enough pages to allow a local allocation. However, that means that the slab can grow excessively and that most memory of a node may be used by slabs. We have had a case where a machine with 46GB of memory was using 40-42GB for slab. Zone reclaim was effective in dealing with pagecache pages. However, slab reclaim was only done during global reclaim (which is a bit rare on NUMA systems). This patch implements slab reclaim during zone reclaim. Zone reclaim occurs if there is a danger of an off node allocation. At that point we 1. Shrink the per node page cache if the number of pagecache pages is more than min_unmapped_ratio percent of pages in a zone. 2. Shrink the slab cache if the number of the nodes reclaimable slab pages (patch depends on earlier one that implements that counter) are more than min_slab_ratio (a new /proc/sys/vm tunable). The shrinking of the slab cache is a bit problematic since it is not node specific. So we simply calculate what point in the slab we want to reach (current per node slab use minus the number of pages that neeed to be allocated) and then repeately run the global reclaim until that is unsuccessful or we have reached the limit. I hope we will have zone based slab reclaim at some point which will make that easier. The default for the min_slab_ratio is 5% Also remove the slab option from /proc/sys/vm/zone_reclaim_mode. [akpm@osdl.org: cleanups] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 08c41b9f92e0..3693f1a52788 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -171,6 +171,7 @@ struct zone {
171 * zone reclaim becomes active if more unmapped pages exist. 171 * zone reclaim becomes active if more unmapped pages exist.
172 */ 172 */
173 unsigned long min_unmapped_pages; 173 unsigned long min_unmapped_pages;
174 unsigned long min_slab_pages;
174 struct per_cpu_pageset *pageset[NR_CPUS]; 175 struct per_cpu_pageset *pageset[NR_CPUS];
175#else 176#else
176 struct per_cpu_pageset pageset[NR_CPUS]; 177 struct per_cpu_pageset pageset[NR_CPUS];
@@ -448,6 +449,8 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file
448 void __user *, size_t *, loff_t *); 449 void __user *, size_t *, loff_t *);
449int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 450int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
450 struct file *, void __user *, size_t *, loff_t *); 451 struct file *, void __user *, size_t *, loff_t *);
452int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
453 struct file *, void __user *, size_t *, loff_t *);
451 454
452#include <linux/topology.h> 455#include <linux/topology.h>
453/* Returns the number of the current Node. */ 456/* Returns the number of the current Node. */