aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/discontig_32.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-03 22:35:04 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-04 06:01:58 -0400
commit7b2a0a6c4866cac146dcb0433e6984eb19a81335 (patch)
treed498d35f1281894fddbf39dcfd5c8d848ab87bb9 /arch/x86/mm/discontig_32.c
parentee0c80fadfa56bf4f9d90c1c023429a6bd8edd69 (diff)
x86: make 32-bit use e820_register_active_regions()
this way 32-bit is more similar to 64-bit, and smarter e820 and numa. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/discontig_32.c')
-rw-r--r--arch/x86/mm/discontig_32.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 7ced26ab9ae..a89ccf3d4c1 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -120,10 +120,9 @@ int __init get_memcfg_numa_flat(void)
120{ 120{
121 printk("NUMA - single node, flat memory mode\n"); 121 printk("NUMA - single node, flat memory mode\n");
122 122
123 /* Run the memory configuration and find the top of memory. */
124 find_max_pfn();
125 node_start_pfn[0] = 0; 123 node_start_pfn[0] = 0;
126 node_end_pfn[0] = max_pfn; 124 node_end_pfn[0] = max_pfn;
125 e820_register_active_regions(0, 0, max_pfn);
127 memory_present(0, 0, max_pfn); 126 memory_present(0, 0, max_pfn);
128 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); 127 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
129 128
@@ -337,6 +336,11 @@ unsigned long __init setup_memory(void)
337 * this space and use it to adjust the boundary between ZONE_NORMAL 336 * this space and use it to adjust the boundary between ZONE_NORMAL
338 * and ZONE_HIGHMEM. 337 * and ZONE_HIGHMEM.
339 */ 338 */
339
340 /* call find_max_low_pfn at first, it could update max_pfn */
341 system_max_low_pfn = max_low_pfn = find_max_low_pfn();
342
343 remove_all_active_ranges();
340 get_memcfg_numa(); 344 get_memcfg_numa();
341 345
342 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); 346 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE);
@@ -344,7 +348,6 @@ unsigned long __init setup_memory(void)
344 /* partially used pages are not usable - thus round upwards */ 348 /* partially used pages are not usable - thus round upwards */
345 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); 349 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
346 350
347 system_max_low_pfn = max_low_pfn = find_max_low_pfn();
348 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 351 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
349 do { 352 do {
350 kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT, 353 kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
@@ -402,7 +405,6 @@ unsigned long __init setup_memory(void)
402 405
403void __init zone_sizes_init(void) 406void __init zone_sizes_init(void)
404{ 407{
405 int nid;
406 unsigned long max_zone_pfns[MAX_NR_ZONES]; 408 unsigned long max_zone_pfns[MAX_NR_ZONES];
407 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 409 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
408 max_zone_pfns[ZONE_DMA] = 410 max_zone_pfns[ZONE_DMA] =
@@ -412,15 +414,6 @@ void __init zone_sizes_init(void)
412 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; 414 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
413#endif 415#endif
414 416
415 /* If SRAT has not registered memory, register it now */
416 if (find_max_pfn_with_active_regions() == 0) {
417 for_each_online_node(nid) {
418 if (node_has_online_mem(nid))
419 add_active_range(nid, node_start_pfn[nid],
420 node_end_pfn[nid]);
421 }
422 }
423
424 free_area_init_nodes(max_zone_pfns); 417 free_area_init_nodes(max_zone_pfns);
425 return; 418 return;
426} 419}