aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-07-23 01:56:57 -0400
committerTony Luck <tony.luck@intel.com>2018-07-23 13:30:08 -0400
commitfb63fbee423afc6fa6f982d31c6894bb2da8f7ef (patch)
treee963af8a41bdd1b2861d85a416b015ed1cd767a1
parent0617c50a081262f8d515a62f062c0c5bc9c10630 (diff)
ia64: use mem_data to detect nodes' minimal and maximal PFNs
When EFI memory map is traversed to determine the extents of each node, the minimal and maximal PFNs are stored in the bootmem_data structures. The same information ls later stored in the mem_data array of 'struct early_node_data'. Switch to using mem_data from the very beginning. Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/mm/discontig.c60
1 files changed, 14 insertions, 46 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 6148ea8338ad..8e99d8e2455e 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -57,33 +57,31 @@ pg_data_t *pgdat_list[MAX_NUMNODES];
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) 57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
58 58
59/** 59/**
60 * build_node_maps - callback to setup bootmem structs for each node 60 * build_node_maps - callback to setup mem_data structs for each node
61 * @start: physical start of range 61 * @start: physical start of range
62 * @len: length of range 62 * @len: length of range
63 * @node: node where this range resides 63 * @node: node where this range resides
64 * 64 *
65 * We allocate a struct bootmem_data for each piece of memory that we wish to 65 * Detect extents of each piece of memory that we wish to
66 * treat as a virtually contiguous block (i.e. each node). Each such block 66 * treat as a virtually contiguous block (i.e. each node). Each such block
67 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down 67 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
68 * if necessary. Any non-existent pages will simply be part of the virtual 68 * if necessary. Any non-existent pages will simply be part of the virtual
69 * memmap. We also update min_low_pfn and max_low_pfn here as we receive 69 * memmap.
70 * memory ranges from the caller.
71 */ 70 */
72static int __init build_node_maps(unsigned long start, unsigned long len, 71static int __init build_node_maps(unsigned long start, unsigned long len,
73 int node) 72 int node)
74{ 73{
75 unsigned long spfn, epfn, end = start + len; 74 unsigned long spfn, epfn, end = start + len;
76 struct bootmem_data *bdp = &bootmem_node_data[node];
77 75
78 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; 76 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
79 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; 77 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
80 78
81 if (!bdp->node_low_pfn) { 79 if (!mem_data[node].min_pfn) {
82 bdp->node_min_pfn = spfn; 80 mem_data[node].min_pfn = spfn;
83 bdp->node_low_pfn = epfn; 81 mem_data[node].max_pfn = epfn;
84 } else { 82 } else {
85 bdp->node_min_pfn = min(spfn, bdp->node_min_pfn); 83 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
86 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); 84 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
87 } 85 }
88 86
89 return 0; 87 return 0;
@@ -323,19 +321,18 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
323{ 321{
324 unsigned long spfn, epfn; 322 unsigned long spfn, epfn;
325 unsigned long pernodesize = 0, pernode, pages, mapsize; 323 unsigned long pernodesize = 0, pernode, pages, mapsize;
326 struct bootmem_data *bdp = &bootmem_node_data[node];
327 324
328 spfn = start >> PAGE_SHIFT; 325 spfn = start >> PAGE_SHIFT;
329 epfn = (start + len) >> PAGE_SHIFT; 326 epfn = (start + len) >> PAGE_SHIFT;
330 327
331 pages = bdp->node_low_pfn - bdp->node_min_pfn; 328 pages = mem_data[node].max_pfn - mem_data[node].min_pfn;
332 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; 329 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
333 330
334 /* 331 /*
335 * Make sure this memory falls within this node's usable memory 332 * Make sure this memory falls within this node's usable memory
336 * since we may have thrown some away in build_maps(). 333 * since we may have thrown some away in build_maps().
337 */ 334 */
338 if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn) 335 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
339 return 0; 336 return 0;
340 337
341 /* Don't setup this node's local space twice... */ 338 /* Don't setup this node's local space twice... */
@@ -397,7 +394,7 @@ static void __init reserve_pernode_space(void)
397 bdp = pdp->bdata; 394 bdp = pdp->bdata;
398 395
399 /* First the bootmem_map itself */ 396 /* First the bootmem_map itself */
400 pages = bdp->node_low_pfn - bdp->node_min_pfn; 397 pages = mem_data[node].max_pfn - mem_data[node].min_pfn;
401 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; 398 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
402 base = __pa(bdp->node_bootmem_map); 399 base = __pa(bdp->node_bootmem_map);
403 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); 400 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
@@ -541,10 +538,8 @@ void __init find_memory(void)
541 efi_memmap_walk(find_max_min_low_pfn, NULL); 538 efi_memmap_walk(find_max_min_low_pfn, NULL);
542 539
543 for_each_online_node(node) 540 for_each_online_node(node)
544 if (bootmem_node_data[node].node_low_pfn) { 541 if (mem_data[node].min_pfn)
545 node_clear(node, memory_less_mask); 542 node_clear(node, memory_less_mask);
546 mem_data[node].min_pfn = ~0UL;
547 }
548 543
549 efi_memmap_walk(filter_memory, register_active_ranges); 544 efi_memmap_walk(filter_memory, register_active_ranges);
550 545
@@ -568,8 +563,8 @@ void __init find_memory(void)
568 563
569 init_bootmem_node(pgdat_list[node], 564 init_bootmem_node(pgdat_list[node],
570 map>>PAGE_SHIFT, 565 map>>PAGE_SHIFT,
571 bdp->node_min_pfn, 566 mem_data[node].min_pfn,
572 bdp->node_low_pfn); 567 mem_data[node].max_pfn);
573 } 568 }
574 569
575 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); 570 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
@@ -652,31 +647,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
652} 647}
653 648
654/** 649/**
655 * count_node_pages - callback to build per-node memory info structures
656 * @start: physical start of range
657 * @len: length of range
658 * @node: node where this range resides
659 *
660 * Each node has it's own number of physical pages, DMAable pages, start, and
661 * end page frame number. This routine will be called by call_pernode_memory()
662 * for each piece of usable memory and will setup these values for each node.
663 * Very similar to build_maps().
664 */
665static __init int count_node_pages(unsigned long start, unsigned long len, int node)
666{
667 unsigned long end = start + len;
668
669 start = GRANULEROUNDDOWN(start);
670 end = GRANULEROUNDUP(end);
671 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
672 end >> PAGE_SHIFT);
673 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
674 start >> PAGE_SHIFT);
675
676 return 0;
677}
678
679/**
680 * paging_init - setup page tables 650 * paging_init - setup page tables
681 * 651 *
682 * paging_init() sets up the page tables for each node of the system and frees 652 * paging_init() sets up the page tables for each node of the system and frees
@@ -692,8 +662,6 @@ void __init paging_init(void)
692 662
693 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 663 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
694 664
695 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
696
697 sparse_memory_present_with_active_regions(MAX_NUMNODES); 665 sparse_memory_present_with_active_regions(MAX_NUMNODES);
698 sparse_init(); 666 sparse_init();
699 667