aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2007-07-21 11:11:29 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 21:37:14 -0400
commita7e96629efcab1ccea3a376f3f5683c8d9e882c1 (patch)
tree8bbd5361289975ffe8e2673746842472a76a3420 /arch
parentbc2cea6a34fdb30f118ec75db39a46a191870607 (diff)
x86_64: fix e820_hole_size based on address ranges
e820_hole_size() now uses the newly extracted helper function, e820_find_active_region(), to determine the size of usable RAM in a range of PFN's. This was previously broken because of two reasons: - The start and end PFN's of each e820 entry were not properly rounded prior to excluding those entries in the range, and - Entries smaller than a page were not properly excluded from being accumulated. This resulted in emulated nodes being incorrectly mapped to ranges that were completely reserved and not candidates for being registered as active ranges. Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86_64/kernel/e820.c54
-rw-r--r--arch/x86_64/mm/numa.c8
2 files changed, 25 insertions, 37 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 2570643ba1c5..62e051bab3b2 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -194,37 +194,6 @@ unsigned long __init e820_end_of_ram(void)
194} 194}
195 195
196/* 196/*
197 * Find the hole size in the range.
198 */
199unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
200{
201 unsigned long ram = 0;
202 int i;
203
204 for (i = 0; i < e820.nr_map; i++) {
205 struct e820entry *ei = &e820.map[i];
206 unsigned long last, addr;
207
208 if (ei->type != E820_RAM ||
209 ei->addr+ei->size <= start ||
210 ei->addr >= end)
211 continue;
212
213 addr = round_up(ei->addr, PAGE_SIZE);
214 if (addr < start)
215 addr = start;
216
217 last = round_down(ei->addr + ei->size, PAGE_SIZE);
218 if (last >= end)
219 last = end;
220
221 if (last > addr)
222 ram += last - addr;
223 }
224 return ((end - start) - ram);
225}
226
227/*
228 * Mark e820 reserved areas as busy for the resource manager. 197 * Mark e820 reserved areas as busy for the resource manager.
229 */ 198 */
230void __init e820_reserve_resources(void) 199void __init e820_reserve_resources(void)
@@ -364,6 +333,29 @@ void __init add_memory_region(unsigned long start, unsigned long size, int type)
364 e820.nr_map++; 333 e820.nr_map++;
365} 334}
366 335
336/*
337 * Find the hole size (in bytes) in the memory range.
338 * @start: starting address of the memory range to scan
339 * @end: ending address of the memory range to scan
340 */
341unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
342{
343 unsigned long start_pfn = start >> PAGE_SHIFT;
344 unsigned long end_pfn = end >> PAGE_SHIFT;
345 unsigned long ei_startpfn;
346 unsigned long ei_endpfn;
347 unsigned long ram = 0;
348 int i;
349
350 for (i = 0; i < e820.nr_map; i++) {
351 if (e820_find_active_region(&e820.map[i],
352 start_pfn, end_pfn,
353 &ei_startpfn, &ei_endpfn))
354 ram += ei_endpfn - ei_startpfn;
355 }
356 return end - start - (ram << PAGE_SHIFT);
357}
358
367void __init e820_print_map(char *who) 359void __init e820_print_map(char *who)
368{ 360{
369 int i; 361 int i;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 30bf8043984d..0191b1ca6aa4 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -273,9 +273,6 @@ void __init numa_init_array(void)
273 273
274#ifdef CONFIG_NUMA_EMU 274#ifdef CONFIG_NUMA_EMU
275/* Numa emulation */ 275/* Numa emulation */
276#define E820_ADDR_HOLE_SIZE(start, end) \
277 (e820_hole_size((start) >> PAGE_SHIFT, (end) >> PAGE_SHIFT) << \
278 PAGE_SHIFT)
279char *cmdline __initdata; 276char *cmdline __initdata;
280 277
281/* 278/*
@@ -319,7 +316,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
319 return -1; 316 return -1;
320 if (num_nodes > MAX_NUMNODES) 317 if (num_nodes > MAX_NUMNODES)
321 num_nodes = MAX_NUMNODES; 318 num_nodes = MAX_NUMNODES;
322 size = (max_addr - *addr - E820_ADDR_HOLE_SIZE(*addr, max_addr)) / 319 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
323 num_nodes; 320 num_nodes;
324 /* 321 /*
325 * Calculate the number of big nodes that can be allocated as a result 322 * Calculate the number of big nodes that can be allocated as a result
@@ -347,7 +344,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
347 if (i == num_nodes + node_start - 1) 344 if (i == num_nodes + node_start - 1)
348 end = max_addr; 345 end = max_addr;
349 else 346 else
350 while (end - *addr - E820_ADDR_HOLE_SIZE(*addr, end) < 347 while (end - *addr - e820_hole_size(*addr, end) <
351 size) { 348 size) {
352 end += FAKE_NODE_MIN_SIZE; 349 end += FAKE_NODE_MIN_SIZE;
353 if (end > max_addr) { 350 if (end > max_addr) {
@@ -488,7 +485,6 @@ out:
488 numa_init_array(); 485 numa_init_array();
489 return 0; 486 return 0;
490} 487}
491#undef E820_ADDR_HOLE_SIZE
492#endif /* CONFIG_NUMA_EMU */ 488#endif /* CONFIG_NUMA_EMU */
493 489
494void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) 490void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)