aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/discontig_32.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-02 00:06:31 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-03 07:26:28 -0400
commit287572cb38de7f270b59191a0fecfa5c5de7765d (patch)
treeaa4f44ca3b2a3390faf4fc4791bdaab9119d2e98 /arch/x86/mm/discontig_32.c
parent2944e16b25e7fb8b5ee0dd9dc7197a0f9e523cfd (diff)
x86, numa, 32-bit: avoid clash between ramdisk and kva
use find_e820_area to get address space... Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/discontig_32.c')
-rw-r--r--arch/x86/mm/discontig_32.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 3150ad385672..73a983489c60 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -38,6 +38,7 @@
38#include <asm/setup.h> 38#include <asm/setup.h>
39#include <asm/mmzone.h> 39#include <asm/mmzone.h>
40#include <asm/bios_ebda.h> 40#include <asm/bios_ebda.h>
41#include <asm/proto.h>
41 42
42struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 43struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
43EXPORT_SYMBOL(node_data); 44EXPORT_SYMBOL(node_data);
@@ -326,7 +327,6 @@ unsigned long __init setup_memory(void)
326{ 327{
327 int nid; 328 int nid;
328 unsigned long system_start_pfn, system_max_low_pfn; 329 unsigned long system_start_pfn, system_max_low_pfn;
329 unsigned long wasted_pages;
330 330
331 /* 331 /*
332 * When mapping a NUMA machine we allocate the node_mem_map arrays 332 * When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -337,29 +337,18 @@ unsigned long __init setup_memory(void)
337 */ 337 */
338 get_memcfg_numa(); 338 get_memcfg_numa();
339 339
340 kva_pages = calculate_numa_remap_pages(); 340 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE);
341 341
342 /* partially used pages are not usable - thus round upwards */ 342 /* partially used pages are not usable - thus round upwards */
343 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); 343 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
344 344
345 kva_start_pfn = find_max_low_pfn() - kva_pages;
346
347#ifdef CONFIG_BLK_DEV_INITRD
348 /* Numa kva area is below the initrd */
349 if (initrd_start)
350 kva_start_pfn = PFN_DOWN(initrd_start - PAGE_OFFSET)
351 - kva_pages;
352#endif
353
354 /*
355 * We waste pages past at the end of the KVA for no good reason other
356 * than how it is located. This is bad.
357 */
358 wasted_pages = kva_start_pfn & (PTRS_PER_PTE-1);
359 kva_start_pfn -= wasted_pages;
360 kva_pages += wasted_pages;
361
362 system_max_low_pfn = max_low_pfn = find_max_low_pfn(); 345 system_max_low_pfn = max_low_pfn = find_max_low_pfn();
346 kva_start_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
347 kva_start_pfn = find_e820_area(kva_start_pfn<<PAGE_SHIFT,
348 max_low_pfn<<PAGE_SHIFT,
349 kva_pages<<PAGE_SHIFT,
350 PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
351
363 printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n", 352 printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
364 kva_start_pfn, max_low_pfn); 353 kva_start_pfn, max_low_pfn);
365 printk("max_pfn = %ld\n", max_pfn); 354 printk("max_pfn = %ld\n", max_pfn);