aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm/numa.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2005-08-26 21:34:10 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-26 22:37:12 -0400
commit485761bd6a72d33b3d4fa884927b2b0d983b701e (patch)
treec75562513489f62c8dcfd41acd467bca3d3202cc /arch/x86_64/mm/numa.c
parentbebf4688e9dbbfdd421736685d607bced91a3c91 (diff)
[PATCH] x86_64: Tell VM about holes in nodes
Some nodes can have large holes on x86-64. This fixes problems with the VM allowing too many dirty pages because it overestimates the number of available RAM in a node. In extreme cases you can end up with all RAM filled with dirty pages which can lead to deadlocks and other nasty behaviour. This patch just tells the VM about the known holes from e820. Reserved (like the kernel text or mem_map) is still not taken into account, but that should be only a few percent error now. Small detail is that the flat setup uses the NUMA free_area_init_node() now too because it offers more flexibility. (akpm: lotsa thanks to Martin for working this problem out) Cc: Martin Bligh <mbligh@mbligh.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm/numa.c')
-rw-r--r--arch/x86_64/mm/numa.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 70cb2904a90f..6a156f5692ae 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -126,9 +126,11 @@ void __init setup_node_zones(int nodeid)
126{ 126{
127 unsigned long start_pfn, end_pfn; 127 unsigned long start_pfn, end_pfn;
128 unsigned long zones[MAX_NR_ZONES]; 128 unsigned long zones[MAX_NR_ZONES];
129 unsigned long holes[MAX_NR_ZONES];
129 unsigned long dma_end_pfn; 130 unsigned long dma_end_pfn;
130 131
131 memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES); 132 memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
133 memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
132 134
133 start_pfn = node_start_pfn(nodeid); 135 start_pfn = node_start_pfn(nodeid);
134 end_pfn = node_end_pfn(nodeid); 136 end_pfn = node_end_pfn(nodeid);
@@ -139,13 +141,17 @@ void __init setup_node_zones(int nodeid)
139 dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT; 141 dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
140 if (start_pfn < dma_end_pfn) { 142 if (start_pfn < dma_end_pfn) {
141 zones[ZONE_DMA] = dma_end_pfn - start_pfn; 143 zones[ZONE_DMA] = dma_end_pfn - start_pfn;
144 holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
142 zones[ZONE_NORMAL] = end_pfn - dma_end_pfn; 145 zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
146 holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
147
143 } else { 148 } else {
144 zones[ZONE_NORMAL] = end_pfn - start_pfn; 149 zones[ZONE_NORMAL] = end_pfn - start_pfn;
150 holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
145 } 151 }
146 152
147 free_area_init_node(nodeid, NODE_DATA(nodeid), zones, 153 free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
148 start_pfn, NULL); 154 start_pfn, holes);
149} 155}
150 156
151void __init numa_init_array(void) 157void __init numa_init_array(void)