aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/discontig.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/discontig.c')
-rw-r--r--arch/ia64/mm/discontig.c52
1 files changed, 18 insertions, 34 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..16835108bb5b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -37,7 +37,9 @@ struct early_node_data {
37 unsigned long pernode_size; 37 unsigned long pernode_size;
38 struct bootmem_data bootmem_data; 38 struct bootmem_data bootmem_data;
39 unsigned long num_physpages; 39 unsigned long num_physpages;
40#ifdef CONFIG_ZONE_DMA
40 unsigned long num_dma_physpages; 41 unsigned long num_dma_physpages;
42#endif
41 unsigned long min_pfn; 43 unsigned long min_pfn;
42 unsigned long max_pfn; 44 unsigned long max_pfn;
43}; 45};
@@ -412,37 +414,6 @@ static void __init memory_less_nodes(void)
412 return; 414 return;
413} 415}
414 416
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 417/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 418 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 419 *
@@ -473,6 +444,9 @@ void __init find_memory(void)
473 node_clear(node, memory_less_mask); 444 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 445 mem_data[node].min_pfn = ~0UL;
475 } 446 }
447
448 efi_memmap_walk(register_active_ranges, NULL);
449
476 /* 450 /*
477 * Initialize the boot memory maps in reverse order since that's 451 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 452 * what the bootmem allocator expects
@@ -506,6 +480,12 @@ void __init find_memory(void)
506 max_pfn = max_low_pfn; 480 max_pfn = max_low_pfn;
507 481
508 find_initrd(); 482 find_initrd();
483
484#ifdef CONFIG_CRASH_DUMP
485 /* If we are doing a crash dump, we still need to know the real mem
486 * size before original memory map is reset. */
487 saved_max_pfn = max_pfn;
488#endif
509} 489}
510 490
511#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
@@ -654,11 +634,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 634{
655 unsigned long end = start + len; 635 unsigned long end = start + len;
656 636
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 637 mem_data[node].num_physpages += len >> PAGE_SHIFT;
638#ifdef CONFIG_ZONE_DMA
659 if (start <= __pa(MAX_DMA_ADDRESS)) 639 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 640 mem_data[node].num_dma_physpages +=
661 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; 641 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
642#endif
662 start = GRANULEROUNDDOWN(start); 643 start = GRANULEROUNDDOWN(start);
663 start = ORDERROUNDDOWN(start); 644 start = ORDERROUNDDOWN(start);
664 end = GRANULEROUNDUP(end); 645 end = GRANULEROUNDUP(end);
@@ -686,10 +667,11 @@ void __init paging_init(void)
686 667
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 668 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 669
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 670 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 671
672 sparse_memory_present_with_active_regions(MAX_NUMNODES);
673 sparse_init();
674
693#ifdef CONFIG_VIRTUAL_MEM_MAP 675#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 676 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 677 sizeof(struct page));
@@ -710,7 +692,9 @@ void __init paging_init(void)
710 } 692 }
711 693
712 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 694 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
695#ifdef CONFIG_ZONE_DMA
713 max_zone_pfns[ZONE_DMA] = max_dma; 696 max_zone_pfns[ZONE_DMA] = max_dma;
697#endif
714 max_zone_pfns[ZONE_NORMAL] = max_pfn; 698 max_zone_pfns[ZONE_NORMAL] = max_pfn;
715 free_area_init_nodes(max_zone_pfns); 699 free_area_init_nodes(max_zone_pfns);
716 700