summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-07-23 01:56:58 -0400
committerTony Luck <tony.luck@intel.com>2018-07-23 14:32:36 -0400
commitf62800992e5917f2bed143dbcdce2501bc6933a9 (patch)
tree93de6d99759cc4268ffb542e7fd284b691cc38a8
parentfb63fbee423afc6fa6f982d31c6894bb2da8f7ef (diff)
ia64: switch to NO_BOOTMEM
Since ia64 already uses memblock to register available physical memory it is only required to move the calls to register_active_ranges() that wrap memblock_add_node() earlier and replace bootmem memory reservations with memblock_reserve(). Of course, all the code that find the place to put the bootmem bitmap is removed. Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/mm/contig.c71
-rw-r--r--arch/ia64/mm/discontig.c74
4 files changed, 22 insertions, 135 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ff861420b8f5..107b1389a450 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -31,6 +31,7 @@ config IA64
31 select HAVE_ARCH_TRACEHOOK 31 select HAVE_ARCH_TRACEHOOK
32 select HAVE_MEMBLOCK 32 select HAVE_MEMBLOCK
33 select HAVE_MEMBLOCK_NODE_MAP 33 select HAVE_MEMBLOCK_NODE_MAP
34 select NO_BOOTMEM
34 select HAVE_VIRT_CPU_ACCOUNTING 35 select HAVE_VIRT_CPU_ACCOUNTING
35 select ARCH_HAS_DMA_MARK_CLEAN 36 select ARCH_HAS_DMA_MARK_CLEAN
36 select ARCH_HAS_SG_CHAIN 37 select ARCH_HAS_SG_CHAIN
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad43cbf70628..0e6c2d9fb498 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/memblock.h>
35#include <linux/reboot.h> 36#include <linux/reboot.h>
36#include <linux/sched/mm.h> 37#include <linux/sched/mm.h>
37#include <linux/sched/clock.h> 38#include <linux/sched/clock.h>
@@ -383,8 +384,16 @@ reserve_memory (void)
383 384
384 sort_regions(rsvd_region, num_rsvd_regions); 385 sort_regions(rsvd_region, num_rsvd_regions);
385 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); 386 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
386}
387 387
388 /* reserve all regions except the end of memory marker with memblock */
389 for (n = 0; n < num_rsvd_regions - 1; n++) {
390 struct rsvd_region *region = &rsvd_region[n];
391 phys_addr_t addr = __pa(region->start);
392 phys_addr_t size = region->end - region->start;
393
394 memblock_reserve(addr, size);
395 }
396}
388 397
389/** 398/**
390 * find_initrd - get initrd parameters from the boot parameter structure 399 * find_initrd - get initrd parameters from the boot parameter structure
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1835144268ec..e2e40bbd391c 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -34,53 +34,6 @@ static unsigned long max_gap;
34/* physical address where the bootmem map is located */ 34/* physical address where the bootmem map is located */
35unsigned long bootmap_start; 35unsigned long bootmap_start;
36 36
37/**
38 * find_bootmap_location - callback to find a memory area for the bootmap
39 * @start: start of region
40 * @end: end of region
41 * @arg: unused callback data
42 *
43 * Find a place to put the bootmap and return its starting address in
44 * bootmap_start. This address must be page-aligned.
45 */
46static int __init
47find_bootmap_location (u64 start, u64 end, void *arg)
48{
49 u64 needed = *(unsigned long *)arg;
50 u64 range_start, range_end, free_start;
51 int i;
52
53#if IGNORE_PFN0
54 if (start == PAGE_OFFSET) {
55 start += PAGE_SIZE;
56 if (start >= end)
57 return 0;
58 }
59#endif
60
61 free_start = PAGE_OFFSET;
62
63 for (i = 0; i < num_rsvd_regions; i++) {
64 range_start = max(start, free_start);
65 range_end = min(end, rsvd_region[i].start & PAGE_MASK);
66
67 free_start = PAGE_ALIGN(rsvd_region[i].end);
68
69 if (range_end <= range_start)
70 continue; /* skip over empty range */
71
72 if (range_end - range_start >= needed) {
73 bootmap_start = __pa(range_start);
74 return -1; /* done */
75 }
76
77 /* nothing more available in this segment */
78 if (range_end == end)
79 return 0;
80 }
81 return 0;
82}
83
84#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
85static void *cpu_data; 38static void *cpu_data;
86/** 39/**
@@ -196,8 +149,6 @@ setup_per_cpu_areas(void)
196void __init 149void __init
197find_memory (void) 150find_memory (void)
198{ 151{
199 unsigned long bootmap_size;
200
201 reserve_memory(); 152 reserve_memory();
202 153
203 /* first find highest page frame number */ 154 /* first find highest page frame number */
@@ -205,21 +156,12 @@ find_memory (void)
205 max_low_pfn = 0; 156 max_low_pfn = 0;
206 efi_memmap_walk(find_max_min_low_pfn, NULL); 157 efi_memmap_walk(find_max_min_low_pfn, NULL);
207 max_pfn = max_low_pfn; 158 max_pfn = max_low_pfn;
208 /* how many bytes to cover all the pages */
209 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
210 159
211 /* look for a location to hold the bootmap */ 160#ifdef CONFIG_VIRTUAL_MEM_MAP
212 bootmap_start = ~0UL; 161 efi_memmap_walk(filter_memory, register_active_ranges);
213 efi_memmap_walk(find_bootmap_location, &bootmap_size); 162#else
214 if (bootmap_start == ~0UL) 163 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
215 panic("Cannot find %ld bytes for bootmap\n", bootmap_size); 164#endif
216
217 bootmap_size = init_bootmem_node(NODE_DATA(0),
218 (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
219
220 /* Free all available memory, then mark bootmem-map as being in use. */
221 efi_memmap_walk(filter_rsvd_memory, free_bootmem);
222 reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
223 165
224 find_initrd(); 166 find_initrd();
225 167
@@ -244,7 +186,6 @@ paging_init (void)
244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 186 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
245 187
246#ifdef CONFIG_VIRTUAL_MEM_MAP 188#ifdef CONFIG_VIRTUAL_MEM_MAP
247 efi_memmap_walk(filter_memory, register_active_ranges);
248 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 189 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
249 if (max_gap < LARGE_GAP) { 190 if (max_gap < LARGE_GAP) {
250 vmem_map = (struct page *) 0; 191 vmem_map = (struct page *) 0;
@@ -268,8 +209,6 @@ paging_init (void)
268 209
269 printk("Virtual mem_map starts at 0x%p\n", mem_map); 210 printk("Virtual mem_map starts at 0x%p\n", mem_map);
270 } 211 }
271#else /* !CONFIG_VIRTUAL_MEM_MAP */
272 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
273#endif /* !CONFIG_VIRTUAL_MEM_MAP */ 212#endif /* !CONFIG_VIRTUAL_MEM_MAP */
274 free_area_init_nodes(max_zone_pfns); 213 free_area_init_nodes(max_zone_pfns);
275 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 214 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 8e99d8e2455e..1928d5719e41 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -20,6 +20,7 @@
20#include <linux/nmi.h> 20#include <linux/nmi.h>
21#include <linux/swap.h> 21#include <linux/swap.h>
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/memblock.h>
23#include <linux/acpi.h> 24#include <linux/acpi.h>
24#include <linux/efi.h> 25#include <linux/efi.h>
25#include <linux/nodemask.h> 26#include <linux/nodemask.h>
@@ -264,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
264{ 265{
265 void *cpu_data; 266 void *cpu_data;
266 int cpus = early_nr_cpus_node(node); 267 int cpus = early_nr_cpus_node(node);
267 struct bootmem_data *bdp = &bootmem_node_data[node];
268 268
269 mem_data[node].pernode_addr = pernode; 269 mem_data[node].pernode_addr = pernode;
270 mem_data[node].pernode_size = pernodesize; 270 mem_data[node].pernode_size = pernodesize;
@@ -279,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
279 279
280 mem_data[node].node_data = __va(pernode); 280 mem_data[node].node_data = __va(pernode);
281 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); 281 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
282
283 pgdat_list[node]->bdata = bdp;
284 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 282 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
285 283
286 cpu_data = per_cpu_node_setup(cpu_data, node); 284 cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -320,14 +318,11 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
320 int node) 318 int node)
321{ 319{
322 unsigned long spfn, epfn; 320 unsigned long spfn, epfn;
323 unsigned long pernodesize = 0, pernode, pages, mapsize; 321 unsigned long pernodesize = 0, pernode;
324 322
325 spfn = start >> PAGE_SHIFT; 323 spfn = start >> PAGE_SHIFT;
326 epfn = (start + len) >> PAGE_SHIFT; 324 epfn = (start + len) >> PAGE_SHIFT;
327 325
328 pages = mem_data[node].max_pfn - mem_data[node].min_pfn;
329 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
330
331 /* 326 /*
332 * Make sure this memory falls within this node's usable memory 327 * Make sure this memory falls within this node's usable memory
333 * since we may have thrown some away in build_maps(). 328 * since we may have thrown some away in build_maps().
@@ -347,32 +342,13 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
347 pernode = NODEDATA_ALIGN(start, node); 342 pernode = NODEDATA_ALIGN(start, node);
348 343
349 /* Is this range big enough for what we want to store here? */ 344 /* Is this range big enough for what we want to store here? */
350 if (start + len > (pernode + pernodesize + mapsize)) 345 if (start + len > (pernode + pernodesize))
351 fill_pernode(node, pernode, pernodesize); 346 fill_pernode(node, pernode, pernodesize);
352 347
353 return 0; 348 return 0;
354} 349}
355 350
356/** 351/**
357 * free_node_bootmem - free bootmem allocator memory for use
358 * @start: physical start of range
359 * @len: length of range
360 * @node: node where this range resides
361 *
362 * Simply calls the bootmem allocator to free the specified ranged from
363 * the given pg_data_t's bdata struct. After this function has been called
364 * for all the entries in the EFI memory map, the bootmem allocator will
365 * be ready to service allocation requests.
366 */
367static int __init free_node_bootmem(unsigned long start, unsigned long len,
368 int node)
369{
370 free_bootmem_node(pgdat_list[node], start, len);
371
372 return 0;
373}
374
375/**
376 * reserve_pernode_space - reserve memory for per-node space 352 * reserve_pernode_space - reserve memory for per-node space
377 * 353 *
378 * Reserve the space used by the bootmem maps & per-node space in the boot 354 * Reserve the space used by the bootmem maps & per-node space in the boot
@@ -381,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len,
381 */ 357 */
382static void __init reserve_pernode_space(void) 358static void __init reserve_pernode_space(void)
383{ 359{
384 unsigned long base, size, pages; 360 unsigned long base, size;
385 struct bootmem_data *bdp;
386 int node; 361 int node;
387 362
388 for_each_online_node(node) { 363 for_each_online_node(node) {
389 pg_data_t *pdp = pgdat_list[node];
390
391 if (node_isset(node, memory_less_mask)) 364 if (node_isset(node, memory_less_mask))
392 continue; 365 continue;
393 366
394 bdp = pdp->bdata;
395
396 /* First the bootmem_map itself */
397 pages = mem_data[node].max_pfn - mem_data[node].min_pfn;
398 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
399 base = __pa(bdp->node_bootmem_map);
400 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
401
402 /* Now the per-node space */ 367 /* Now the per-node space */
403 size = mem_data[node].pernode_size; 368 size = mem_data[node].pernode_size;
404 base = __pa(mem_data[node].pernode_addr); 369 base = __pa(mem_data[node].pernode_addr);
405 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); 370 memblock_reserve(base, size);
406 } 371 }
407} 372}
408 373
@@ -522,6 +487,7 @@ void __init find_memory(void)
522 int node; 487 int node;
523 488
524 reserve_memory(); 489 reserve_memory();
490 efi_memmap_walk(filter_memory, register_active_ranges);
525 491
526 if (num_online_nodes() == 0) { 492 if (num_online_nodes() == 0) {
527 printk(KERN_ERR "node info missing!\n"); 493 printk(KERN_ERR "node info missing!\n");
@@ -541,34 +507,6 @@ void __init find_memory(void)
541 if (mem_data[node].min_pfn) 507 if (mem_data[node].min_pfn)
542 node_clear(node, memory_less_mask); 508 node_clear(node, memory_less_mask);
543 509
544 efi_memmap_walk(filter_memory, register_active_ranges);
545
546 /*
547 * Initialize the boot memory maps in reverse order since that's
548 * what the bootmem allocator expects
549 */
550 for (node = MAX_NUMNODES - 1; node >= 0; node--) {
551 unsigned long pernode, pernodesize, map;
552 struct bootmem_data *bdp;
553
554 if (!node_online(node))
555 continue;
556 else if (node_isset(node, memory_less_mask))
557 continue;
558
559 bdp = &bootmem_node_data[node];
560 pernode = mem_data[node].pernode_addr;
561 pernodesize = mem_data[node].pernode_size;
562 map = pernode + pernodesize;
563
564 init_bootmem_node(pgdat_list[node],
565 map>>PAGE_SHIFT,
566 mem_data[node].min_pfn,
567 mem_data[node].max_pfn);
568 }
569
570 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
571
572 reserve_pernode_space(); 510 reserve_pernode_space();
573 memory_less_nodes(); 511 memory_less_nodes();
574 initialize_pernode_data(); 512 initialize_pernode_data();