aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/init.c')
-rw-r--r--arch/arm/mm/init.c72
1 files changed, 28 insertions, 44 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 13ce33e096b5..5958ac05181e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -90,24 +90,21 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2);
90 * initialization functions, as well as show_mem() for the skipping 90 * initialization functions, as well as show_mem() for the skipping
91 * of holes in the memory map. It is populated by arm_add_memory(). 91 * of holes in the memory map. It is populated by arm_add_memory().
92 */ 92 */
93struct meminfo meminfo;
94
95void show_mem(unsigned int filter) 93void show_mem(unsigned int filter)
96{ 94{
97 int free = 0, total = 0, reserved = 0; 95 int free = 0, total = 0, reserved = 0;
98 int shared = 0, cached = 0, slab = 0, i; 96 int shared = 0, cached = 0, slab = 0;
99 struct meminfo * mi = &meminfo; 97 struct memblock_region *reg;
100 98
101 printk("Mem-info:\n"); 99 printk("Mem-info:\n");
102 show_free_areas(filter); 100 show_free_areas(filter);
103 101
104 for_each_bank (i, mi) { 102 for_each_memblock (memory, reg) {
105 struct membank *bank = &mi->bank[i];
106 unsigned int pfn1, pfn2; 103 unsigned int pfn1, pfn2;
107 struct page *page, *end; 104 struct page *page, *end;
108 105
109 pfn1 = bank_pfn_start(bank); 106 pfn1 = memblock_region_memory_base_pfn(reg);
110 pfn2 = bank_pfn_end(bank); 107 pfn2 = memblock_region_memory_end_pfn(reg);
111 108
112 page = pfn_to_page(pfn1); 109 page = pfn_to_page(pfn1);
113 end = pfn_to_page(pfn2 - 1) + 1; 110 end = pfn_to_page(pfn2 - 1) + 1;
@@ -124,8 +121,9 @@ void show_mem(unsigned int filter)
124 free++; 121 free++;
125 else 122 else
126 shared += page_count(page) - 1; 123 shared += page_count(page) - 1;
127 page++; 124 pfn1++;
128 } while (page < end); 125 page = pfn_to_page(pfn1);
126 } while (pfn1 < pfn2);
129 } 127 }
130 128
131 printk("%d pages of RAM\n", total); 129 printk("%d pages of RAM\n", total);
@@ -139,16 +137,9 @@ void show_mem(unsigned int filter)
139static void __init find_limits(unsigned long *min, unsigned long *max_low, 137static void __init find_limits(unsigned long *min, unsigned long *max_low,
140 unsigned long *max_high) 138 unsigned long *max_high)
141{ 139{
142 struct meminfo *mi = &meminfo; 140 *max_low = PFN_DOWN(memblock_get_current_limit());
143 int i; 141 *min = PFN_UP(memblock_start_of_DRAM());
144 142 *max_high = PFN_DOWN(memblock_end_of_DRAM());
145 /* This assumes the meminfo array is properly sorted */
146 *min = bank_pfn_start(&mi->bank[0]);
147 for_each_bank (i, mi)
148 if (mi->bank[i].highmem)
149 break;
150 *max_low = bank_pfn_end(&mi->bank[i - 1]);
151 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
152} 143}
153 144
154#ifdef CONFIG_ZONE_DMA 145#ifdef CONFIG_ZONE_DMA
@@ -283,14 +274,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
283 return phys; 274 return phys;
284} 275}
285 276
286void __init arm_memblock_init(struct meminfo *mi, 277void __init arm_memblock_init(const struct machine_desc *mdesc)
287 const struct machine_desc *mdesc)
288{ 278{
289 int i;
290
291 for (i = 0; i < mi->nr_banks; i++)
292 memblock_add(mi->bank[i].start, mi->bank[i].size);
293
294 /* Register the kernel text, kernel data and initrd with memblock. */ 279 /* Register the kernel text, kernel data and initrd with memblock. */
295#ifdef CONFIG_XIP_KERNEL 280#ifdef CONFIG_XIP_KERNEL
296 memblock_reserve(__pa(_sdata), _end - _sdata); 281 memblock_reserve(__pa(_sdata), _end - _sdata);
@@ -422,54 +407,53 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
422/* 407/*
423 * The mem_map array can get very big. Free the unused area of the memory map. 408 * The mem_map array can get very big. Free the unused area of the memory map.
424 */ 409 */
425static void __init free_unused_memmap(struct meminfo *mi) 410static void __init free_unused_memmap(void)
426{ 411{
427 unsigned long bank_start, prev_bank_end = 0; 412 unsigned long start, prev_end = 0;
428 unsigned int i; 413 struct memblock_region *reg;
429 414
430 /* 415 /*
431 * This relies on each bank being in address order. 416 * This relies on each bank being in address order.
432 * The banks are sorted previously in bootmem_init(). 417 * The banks are sorted previously in bootmem_init().
433 */ 418 */
434 for_each_bank(i, mi) { 419 for_each_memblock(memory, reg) {
435 struct membank *bank = &mi->bank[i]; 420 start = memblock_region_memory_base_pfn(reg);
436
437 bank_start = bank_pfn_start(bank);
438 421
439#ifdef CONFIG_SPARSEMEM 422#ifdef CONFIG_SPARSEMEM
440 /* 423 /*
441 * Take care not to free memmap entries that don't exist 424 * Take care not to free memmap entries that don't exist
442 * due to SPARSEMEM sections which aren't present. 425 * due to SPARSEMEM sections which aren't present.
443 */ 426 */
444 bank_start = min(bank_start, 427 start = min(start,
445 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 428 ALIGN(prev_end, PAGES_PER_SECTION));
446#else 429#else
447 /* 430 /*
448 * Align down here since the VM subsystem insists that the 431 * Align down here since the VM subsystem insists that the
449 * memmap entries are valid from the bank start aligned to 432 * memmap entries are valid from the bank start aligned to
450 * MAX_ORDER_NR_PAGES. 433 * MAX_ORDER_NR_PAGES.
451 */ 434 */
452 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); 435 start = round_down(start, MAX_ORDER_NR_PAGES);
453#endif 436#endif
454 /* 437 /*
455 * If we had a previous bank, and there is a space 438 * If we had a previous bank, and there is a space
456 * between the current bank and the previous, free it. 439 * between the current bank and the previous, free it.
457 */ 440 */
458 if (prev_bank_end && prev_bank_end < bank_start) 441 if (prev_end && prev_end < start)
459 free_memmap(prev_bank_end, bank_start); 442 free_memmap(prev_end, start);
460 443
461 /* 444 /*
462 * Align up here since the VM subsystem insists that the 445 * Align up here since the VM subsystem insists that the
463 * memmap entries are valid from the bank end aligned to 446 * memmap entries are valid from the bank end aligned to
464 * MAX_ORDER_NR_PAGES. 447 * MAX_ORDER_NR_PAGES.
465 */ 448 */
466 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 449 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
450 MAX_ORDER_NR_PAGES);
467 } 451 }
468 452
469#ifdef CONFIG_SPARSEMEM 453#ifdef CONFIG_SPARSEMEM
470 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 454 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
471 free_memmap(prev_bank_end, 455 free_memmap(prev_end,
472 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 456 ALIGN(prev_end, PAGES_PER_SECTION));
473#endif 457#endif
474} 458}
475 459
@@ -545,7 +529,7 @@ void __init mem_init(void)
545 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 529 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
546 530
547 /* this will put all unused low memory onto the freelists */ 531 /* this will put all unused low memory onto the freelists */
548 free_unused_memmap(&meminfo); 532 free_unused_memmap();
549 free_all_bootmem(); 533 free_all_bootmem();
550 534
551#ifdef CONFIG_SA1111 535#ifdef CONFIG_SA1111