aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-20 22:37:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-20 22:37:09 -0400
commit778a33959a8ad4cb1ea2f4c5119f9e1e8b9f9d9b (patch)
treea5de7c25f129ca823fb578c8c35868993558b7c8
parent6b2edf27fe26c73cd67b6bf5ffb23dce882e1455 (diff)
parentf62800992e5917f2bed143dbcdce2501bc6933a9 (diff)
Merge tag 'please-pull-noboot' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux
Pull ia64 NO_BOOTMEM conversion from Tony Luck: "Mike Rapoport kindly fixed up ia64 to work with NO_BOOTMEM" * tag 'please-pull-noboot' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: ia64: switch to NO_BOOTMEM ia64: use mem_data to detect nodes' minimal and maximal PFNs ia64: remove unused num_dma_physpages member from 'struct early_node_data' ia64: contig/paging_init: reduce code duplication
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/mm/contig.c75
-rw-r--r--arch/ia64/mm/discontig.c134
4 files changed, 33 insertions, 188 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 2bf4ef792f2c..8b4a0c1748c0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -28,6 +28,7 @@ config IA64
28 select HAVE_ARCH_TRACEHOOK 28 select HAVE_ARCH_TRACEHOOK
29 select HAVE_MEMBLOCK 29 select HAVE_MEMBLOCK
30 select HAVE_MEMBLOCK_NODE_MAP 30 select HAVE_MEMBLOCK_NODE_MAP
31 select NO_BOOTMEM
31 select HAVE_VIRT_CPU_ACCOUNTING 32 select HAVE_VIRT_CPU_ACCOUNTING
32 select ARCH_HAS_DMA_MARK_CLEAN 33 select ARCH_HAS_DMA_MARK_CLEAN
33 select ARCH_HAS_SG_CHAIN 34 select ARCH_HAS_SG_CHAIN
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad43cbf70628..0e6c2d9fb498 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/memblock.h>
35#include <linux/reboot.h> 36#include <linux/reboot.h>
36#include <linux/sched/mm.h> 37#include <linux/sched/mm.h>
37#include <linux/sched/clock.h> 38#include <linux/sched/clock.h>
@@ -383,8 +384,16 @@ reserve_memory (void)
383 384
384 sort_regions(rsvd_region, num_rsvd_regions); 385 sort_regions(rsvd_region, num_rsvd_regions);
385 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); 386 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
386}
387 387
388 /* reserve all regions except the end of memory marker with memblock */
389 for (n = 0; n < num_rsvd_regions - 1; n++) {
390 struct rsvd_region *region = &rsvd_region[n];
391 phys_addr_t addr = __pa(region->start);
392 phys_addr_t size = region->end - region->start;
393
394 memblock_reserve(addr, size);
395 }
396}
388 397
389/** 398/**
390 * find_initrd - get initrd parameters from the boot parameter structure 399 * find_initrd - get initrd parameters from the boot parameter structure
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 7d64b30913d1..e2e40bbd391c 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -34,53 +34,6 @@ static unsigned long max_gap;
34/* physical address where the bootmem map is located */ 34/* physical address where the bootmem map is located */
35unsigned long bootmap_start; 35unsigned long bootmap_start;
36 36
37/**
38 * find_bootmap_location - callback to find a memory area for the bootmap
39 * @start: start of region
40 * @end: end of region
41 * @arg: unused callback data
42 *
43 * Find a place to put the bootmap and return its starting address in
44 * bootmap_start. This address must be page-aligned.
45 */
46static int __init
47find_bootmap_location (u64 start, u64 end, void *arg)
48{
49 u64 needed = *(unsigned long *)arg;
50 u64 range_start, range_end, free_start;
51 int i;
52
53#if IGNORE_PFN0
54 if (start == PAGE_OFFSET) {
55 start += PAGE_SIZE;
56 if (start >= end)
57 return 0;
58 }
59#endif
60
61 free_start = PAGE_OFFSET;
62
63 for (i = 0; i < num_rsvd_regions; i++) {
64 range_start = max(start, free_start);
65 range_end = min(end, rsvd_region[i].start & PAGE_MASK);
66
67 free_start = PAGE_ALIGN(rsvd_region[i].end);
68
69 if (range_end <= range_start)
70 continue; /* skip over empty range */
71
72 if (range_end - range_start >= needed) {
73 bootmap_start = __pa(range_start);
74 return -1; /* done */
75 }
76
77 /* nothing more available in this segment */
78 if (range_end == end)
79 return 0;
80 }
81 return 0;
82}
83
84#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
85static void *cpu_data; 38static void *cpu_data;
86/** 39/**
@@ -196,8 +149,6 @@ setup_per_cpu_areas(void)
196void __init 149void __init
197find_memory (void) 150find_memory (void)
198{ 151{
199 unsigned long bootmap_size;
200
201 reserve_memory(); 152 reserve_memory();
202 153
203 /* first find highest page frame number */ 154 /* first find highest page frame number */
@@ -205,21 +156,12 @@ find_memory (void)
205 max_low_pfn = 0; 156 max_low_pfn = 0;
206 efi_memmap_walk(find_max_min_low_pfn, NULL); 157 efi_memmap_walk(find_max_min_low_pfn, NULL);
207 max_pfn = max_low_pfn; 158 max_pfn = max_low_pfn;
208 /* how many bytes to cover all the pages */
209 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
210
211 /* look for a location to hold the bootmap */
212 bootmap_start = ~0UL;
213 efi_memmap_walk(find_bootmap_location, &bootmap_size);
214 if (bootmap_start == ~0UL)
215 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
216 159
217 bootmap_size = init_bootmem_node(NODE_DATA(0), 160#ifdef CONFIG_VIRTUAL_MEM_MAP
218 (bootmap_start >> PAGE_SHIFT), 0, max_pfn); 161 efi_memmap_walk(filter_memory, register_active_ranges);
219 162#else
220 /* Free all available memory, then mark bootmem-map as being in use. */ 163 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
221 efi_memmap_walk(filter_rsvd_memory, free_bootmem); 164#endif
222 reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
223 165
224 find_initrd(); 166 find_initrd();
225 167
@@ -244,11 +186,9 @@ paging_init (void)
244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 186 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
245 187
246#ifdef CONFIG_VIRTUAL_MEM_MAP 188#ifdef CONFIG_VIRTUAL_MEM_MAP
247 efi_memmap_walk(filter_memory, register_active_ranges);
248 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 189 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
249 if (max_gap < LARGE_GAP) { 190 if (max_gap < LARGE_GAP) {
250 vmem_map = (struct page *) 0; 191 vmem_map = (struct page *) 0;
251 free_area_init_nodes(max_zone_pfns);
252 } else { 192 } else {
253 unsigned long map_size; 193 unsigned long map_size;
254 194
@@ -266,13 +206,10 @@ paging_init (void)
266 */ 206 */
267 NODE_DATA(0)->node_mem_map = vmem_map + 207 NODE_DATA(0)->node_mem_map = vmem_map +
268 find_min_pfn_with_active_regions(); 208 find_min_pfn_with_active_regions();
269 free_area_init_nodes(max_zone_pfns);
270 209
271 printk("Virtual mem_map starts at 0x%p\n", mem_map); 210 printk("Virtual mem_map starts at 0x%p\n", mem_map);
272 } 211 }
273#else /* !CONFIG_VIRTUAL_MEM_MAP */
274 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
275 free_area_init_nodes(max_zone_pfns);
276#endif /* !CONFIG_VIRTUAL_MEM_MAP */ 212#endif /* !CONFIG_VIRTUAL_MEM_MAP */
213 free_area_init_nodes(max_zone_pfns);
277 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 214 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
278} 215}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 7d9bd20319ff..1928d5719e41 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -20,6 +20,7 @@
20#include <linux/nmi.h> 20#include <linux/nmi.h>
21#include <linux/swap.h> 21#include <linux/swap.h>
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/memblock.h>
23#include <linux/acpi.h> 24#include <linux/acpi.h>
24#include <linux/efi.h> 25#include <linux/efi.h>
25#include <linux/nodemask.h> 26#include <linux/nodemask.h>
@@ -38,9 +39,6 @@ struct early_node_data {
38 struct ia64_node_data *node_data; 39 struct ia64_node_data *node_data;
39 unsigned long pernode_addr; 40 unsigned long pernode_addr;
40 unsigned long pernode_size; 41 unsigned long pernode_size;
41#ifdef CONFIG_ZONE_DMA32
42 unsigned long num_dma_physpages;
43#endif
44 unsigned long min_pfn; 42 unsigned long min_pfn;
45 unsigned long max_pfn; 43 unsigned long max_pfn;
46}; 44};
@@ -60,33 +58,31 @@ pg_data_t *pgdat_list[MAX_NUMNODES];
60 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) 58 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
61 59
62/** 60/**
63 * build_node_maps - callback to setup bootmem structs for each node 61 * build_node_maps - callback to setup mem_data structs for each node
64 * @start: physical start of range 62 * @start: physical start of range
65 * @len: length of range 63 * @len: length of range
66 * @node: node where this range resides 64 * @node: node where this range resides
67 * 65 *
68 * We allocate a struct bootmem_data for each piece of memory that we wish to 66 * Detect extents of each piece of memory that we wish to
69 * treat as a virtually contiguous block (i.e. each node). Each such block 67 * treat as a virtually contiguous block (i.e. each node). Each such block
70 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down 68 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
71 * if necessary. Any non-existent pages will simply be part of the virtual 69 * if necessary. Any non-existent pages will simply be part of the virtual
72 * memmap. We also update min_low_pfn and max_low_pfn here as we receive 70 * memmap.
73 * memory ranges from the caller.
74 */ 71 */
75static int __init build_node_maps(unsigned long start, unsigned long len, 72static int __init build_node_maps(unsigned long start, unsigned long len,
76 int node) 73 int node)
77{ 74{
78 unsigned long spfn, epfn, end = start + len; 75 unsigned long spfn, epfn, end = start + len;
79 struct bootmem_data *bdp = &bootmem_node_data[node];
80 76
81 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; 77 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
82 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; 78 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
83 79
84 if (!bdp->node_low_pfn) { 80 if (!mem_data[node].min_pfn) {
85 bdp->node_min_pfn = spfn; 81 mem_data[node].min_pfn = spfn;
86 bdp->node_low_pfn = epfn; 82 mem_data[node].max_pfn = epfn;
87 } else { 83 } else {
88 bdp->node_min_pfn = min(spfn, bdp->node_min_pfn); 84 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
89 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); 85 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
90 } 86 }
91 87
92 return 0; 88 return 0;
@@ -269,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
269{ 265{
270 void *cpu_data; 266 void *cpu_data;
271 int cpus = early_nr_cpus_node(node); 267 int cpus = early_nr_cpus_node(node);
272 struct bootmem_data *bdp = &bootmem_node_data[node];
273 268
274 mem_data[node].pernode_addr = pernode; 269 mem_data[node].pernode_addr = pernode;
275 mem_data[node].pernode_size = pernodesize; 270 mem_data[node].pernode_size = pernodesize;
@@ -284,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
284 279
285 mem_data[node].node_data = __va(pernode); 280 mem_data[node].node_data = __va(pernode);
286 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); 281 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
287
288 pgdat_list[node]->bdata = bdp;
289 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 282 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
290 283
291 cpu_data = per_cpu_node_setup(cpu_data, node); 284 cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -325,20 +318,16 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
325 int node) 318 int node)
326{ 319{
327 unsigned long spfn, epfn; 320 unsigned long spfn, epfn;
328 unsigned long pernodesize = 0, pernode, pages, mapsize; 321 unsigned long pernodesize = 0, pernode;
329 struct bootmem_data *bdp = &bootmem_node_data[node];
330 322
331 spfn = start >> PAGE_SHIFT; 323 spfn = start >> PAGE_SHIFT;
332 epfn = (start + len) >> PAGE_SHIFT; 324 epfn = (start + len) >> PAGE_SHIFT;
333 325
334 pages = bdp->node_low_pfn - bdp->node_min_pfn;
335 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
336
337 /* 326 /*
338 * Make sure this memory falls within this node's usable memory 327 * Make sure this memory falls within this node's usable memory
339 * since we may have thrown some away in build_maps(). 328 * since we may have thrown some away in build_maps().
340 */ 329 */
341 if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn) 330 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
342 return 0; 331 return 0;
343 332
344 /* Don't setup this node's local space twice... */ 333 /* Don't setup this node's local space twice... */
@@ -353,32 +342,13 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
353 pernode = NODEDATA_ALIGN(start, node); 342 pernode = NODEDATA_ALIGN(start, node);
354 343
355 /* Is this range big enough for what we want to store here? */ 344 /* Is this range big enough for what we want to store here? */
356 if (start + len > (pernode + pernodesize + mapsize)) 345 if (start + len > (pernode + pernodesize))
357 fill_pernode(node, pernode, pernodesize); 346 fill_pernode(node, pernode, pernodesize);
358 347
359 return 0; 348 return 0;
360} 349}
361 350
362/** 351/**
363 * free_node_bootmem - free bootmem allocator memory for use
364 * @start: physical start of range
365 * @len: length of range
366 * @node: node where this range resides
367 *
368 * Simply calls the bootmem allocator to free the specified ranged from
369 * the given pg_data_t's bdata struct. After this function has been called
370 * for all the entries in the EFI memory map, the bootmem allocator will
371 * be ready to service allocation requests.
372 */
373static int __init free_node_bootmem(unsigned long start, unsigned long len,
374 int node)
375{
376 free_bootmem_node(pgdat_list[node], start, len);
377
378 return 0;
379}
380
381/**
382 * reserve_pernode_space - reserve memory for per-node space 352 * reserve_pernode_space - reserve memory for per-node space
383 * 353 *
384 * Reserve the space used by the bootmem maps & per-node space in the boot 354 * Reserve the space used by the bootmem maps & per-node space in the boot
@@ -387,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len,
387 */ 357 */
388static void __init reserve_pernode_space(void) 358static void __init reserve_pernode_space(void)
389{ 359{
390 unsigned long base, size, pages; 360 unsigned long base, size;
391 struct bootmem_data *bdp;
392 int node; 361 int node;
393 362
394 for_each_online_node(node) { 363 for_each_online_node(node) {
395 pg_data_t *pdp = pgdat_list[node];
396
397 if (node_isset(node, memory_less_mask)) 364 if (node_isset(node, memory_less_mask))
398 continue; 365 continue;
399 366
400 bdp = pdp->bdata;
401
402 /* First the bootmem_map itself */
403 pages = bdp->node_low_pfn - bdp->node_min_pfn;
404 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
405 base = __pa(bdp->node_bootmem_map);
406 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
407
408 /* Now the per-node space */ 367 /* Now the per-node space */
409 size = mem_data[node].pernode_size; 368 size = mem_data[node].pernode_size;
410 base = __pa(mem_data[node].pernode_addr); 369 base = __pa(mem_data[node].pernode_addr);
411 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); 370 memblock_reserve(base, size);
412 } 371 }
413} 372}
414 373
@@ -528,6 +487,7 @@ void __init find_memory(void)
528 int node; 487 int node;
529 488
530 reserve_memory(); 489 reserve_memory();
490 efi_memmap_walk(filter_memory, register_active_ranges);
531 491
532 if (num_online_nodes() == 0) { 492 if (num_online_nodes() == 0) {
533 printk(KERN_ERR "node info missing!\n"); 493 printk(KERN_ERR "node info missing!\n");
@@ -544,38 +504,8 @@ void __init find_memory(void)
544 efi_memmap_walk(find_max_min_low_pfn, NULL); 504 efi_memmap_walk(find_max_min_low_pfn, NULL);
545 505
546 for_each_online_node(node) 506 for_each_online_node(node)
547 if (bootmem_node_data[node].node_low_pfn) { 507 if (mem_data[node].min_pfn)
548 node_clear(node, memory_less_mask); 508 node_clear(node, memory_less_mask);
549 mem_data[node].min_pfn = ~0UL;
550 }
551
552 efi_memmap_walk(filter_memory, register_active_ranges);
553
554 /*
555 * Initialize the boot memory maps in reverse order since that's
556 * what the bootmem allocator expects
557 */
558 for (node = MAX_NUMNODES - 1; node >= 0; node--) {
559 unsigned long pernode, pernodesize, map;
560 struct bootmem_data *bdp;
561
562 if (!node_online(node))
563 continue;
564 else if (node_isset(node, memory_less_mask))
565 continue;
566
567 bdp = &bootmem_node_data[node];
568 pernode = mem_data[node].pernode_addr;
569 pernodesize = mem_data[node].pernode_size;
570 map = pernode + pernodesize;
571
572 init_bootmem_node(pgdat_list[node],
573 map>>PAGE_SHIFT,
574 bdp->node_min_pfn,
575 bdp->node_low_pfn);
576 }
577
578 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
579 509
580 reserve_pernode_space(); 510 reserve_pernode_space();
581 memory_less_nodes(); 511 memory_less_nodes();
@@ -655,36 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
655} 585}
656 586
657/** 587/**
658 * count_node_pages - callback to build per-node memory info structures
659 * @start: physical start of range
660 * @len: length of range
661 * @node: node where this range resides
662 *
663 * Each node has it's own number of physical pages, DMAable pages, start, and
664 * end page frame number. This routine will be called by call_pernode_memory()
665 * for each piece of usable memory and will setup these values for each node.
666 * Very similar to build_maps().
667 */
668static __init int count_node_pages(unsigned long start, unsigned long len, int node)
669{
670 unsigned long end = start + len;
671
672#ifdef CONFIG_ZONE_DMA32
673 if (start <= __pa(MAX_DMA_ADDRESS))
674 mem_data[node].num_dma_physpages +=
675 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
676#endif
677 start = GRANULEROUNDDOWN(start);
678 end = GRANULEROUNDUP(end);
679 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
680 end >> PAGE_SHIFT);
681 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
682 start >> PAGE_SHIFT);
683
684 return 0;
685}
686
687/**
688 * paging_init - setup page tables 588 * paging_init - setup page tables
689 * 589 *
690 * paging_init() sets up the page tables for each node of the system and frees 590 * paging_init() sets up the page tables for each node of the system and frees
@@ -700,8 +600,6 @@ void __init paging_init(void)
700 600
701 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 601 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
702 602
703 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
704
705 sparse_memory_present_with_active_regions(MAX_NUMNODES); 603 sparse_memory_present_with_active_regions(MAX_NUMNODES);
706 sparse_init(); 604 sparse_init();
707 605