aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/init.c')
-rw-r--r--arch/arm/mm/init.c266
1 files changed, 193 insertions, 73 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7185b00650fe..c19571c40a21 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,11 +15,14 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/of_fdt.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/gfp.h> 20#include <linux/gfp.h>
20#include <linux/memblock.h> 21#include <linux/memblock.h>
22#include <linux/sort.h>
21 23
22#include <asm/mach-types.h> 24#include <asm/mach-types.h>
25#include <asm/prom.h>
23#include <asm/sections.h> 26#include <asm/sections.h>
24#include <asm/setup.h> 27#include <asm/setup.h>
25#include <asm/sizes.h> 28#include <asm/sizes.h>
@@ -70,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag)
70 73
71__tagtable(ATAG_INITRD2, parse_tag_initrd2); 74__tagtable(ATAG_INITRD2, parse_tag_initrd2);
72 75
76#ifdef CONFIG_OF_FLATTREE
77void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78{
79 phys_initrd_start = start;
80 phys_initrd_size = end - start;
81}
82#endif /* CONFIG_OF_FLATTREE */
83
73/* 84/*
74 * This keeps memory configuration data used by a couple memory 85 * This keeps memory configuration data used by a couple memory
75 * initialization functions, as well as show_mem() for the skipping 86 * initialization functions, as well as show_mem() for the skipping
@@ -77,14 +88,14 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2);
77 */ 88 */
78struct meminfo meminfo; 89struct meminfo meminfo;
79 90
80void show_mem(void) 91void show_mem(unsigned int filter)
81{ 92{
82 int free = 0, total = 0, reserved = 0; 93 int free = 0, total = 0, reserved = 0;
83 int shared = 0, cached = 0, slab = 0, i; 94 int shared = 0, cached = 0, slab = 0, i;
84 struct meminfo * mi = &meminfo; 95 struct meminfo * mi = &meminfo;
85 96
86 printk("Mem-info:\n"); 97 printk("Mem-info:\n");
87 show_free_areas(); 98 show_free_areas(filter);
88 99
89 for_each_bank (i, mi) { 100 for_each_bank (i, mi) {
90 struct membank *bank = &mi->bank[i]; 101 struct membank *bank = &mi->bank[i];
@@ -121,9 +132,10 @@ void show_mem(void)
121 printk("%d pages swap cached\n", cached); 132 printk("%d pages swap cached\n", cached);
122} 133}
123 134
124static void __init find_limits(struct meminfo *mi, 135static void __init find_limits(unsigned long *min, unsigned long *max_low,
125 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 136 unsigned long *max_high)
126{ 137{
138 struct meminfo *mi = &meminfo;
127 int i; 139 int i;
128 140
129 *min = -1UL; 141 *min = -1UL;
@@ -147,13 +159,13 @@ static void __init find_limits(struct meminfo *mi,
147 } 159 }
148} 160}
149 161
150static void __init arm_bootmem_init(struct meminfo *mi, 162static void __init arm_bootmem_init(unsigned long start_pfn,
151 unsigned long start_pfn, unsigned long end_pfn) 163 unsigned long end_pfn)
152{ 164{
165 struct memblock_region *reg;
153 unsigned int boot_pages; 166 unsigned int boot_pages;
154 phys_addr_t bitmap; 167 phys_addr_t bitmap;
155 pg_data_t *pgdat; 168 pg_data_t *pgdat;
156 int i;
157 169
158 /* 170 /*
159 * Allocate the bootmem bitmap page. This must be in a region 171 * Allocate the bootmem bitmap page. This must be in a region
@@ -171,30 +183,53 @@ static void __init arm_bootmem_init(struct meminfo *mi,
171 pgdat = NODE_DATA(0); 183 pgdat = NODE_DATA(0);
172 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 184 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
173 185
174 for_each_bank(i, mi) { 186 /* Free the lowmem regions from memblock into bootmem. */
175 struct membank *bank = &mi->bank[i]; 187 for_each_memblock(memory, reg) {
176 if (!bank->highmem) 188 unsigned long start = memblock_region_memory_base_pfn(reg);
177 free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); 189 unsigned long end = memblock_region_memory_end_pfn(reg);
190
191 if (end >= end_pfn)
192 end = end_pfn;
193 if (start >= end)
194 break;
195
196 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
178 } 197 }
179 198
180 /* 199 /* Reserve the lowmem memblock reserved regions in bootmem. */
181 * Reserve the memblock reserved regions in bootmem. 200 for_each_memblock(reserved, reg) {
182 */ 201 unsigned long start = memblock_region_reserved_base_pfn(reg);
183 for (i = 0; i < memblock.reserved.cnt; i++) { 202 unsigned long end = memblock_region_reserved_end_pfn(reg);
184 phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); 203
185 if (start >= start_pfn && 204 if (end >= end_pfn)
186 memblock_end_pfn(&memblock.reserved, i) <= end_pfn) 205 end = end_pfn;
187 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 206 if (start >= end)
188 memblock_size_bytes(&memblock.reserved, i), 207 break;
189 BOOTMEM_DEFAULT); 208
209 reserve_bootmem(__pfn_to_phys(start),
210 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
190 } 211 }
191} 212}
192 213
193static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, 214#ifdef CONFIG_ZONE_DMA
194 unsigned long max_low, unsigned long max_high) 215static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
216 unsigned long dma_size)
217{
218 if (size[0] <= dma_size)
219 return;
220
221 size[ZONE_NORMAL] = size[0] - dma_size;
222 size[ZONE_DMA] = dma_size;
223 hole[ZONE_NORMAL] = hole[0];
224 hole[ZONE_DMA] = 0;
225}
226#endif
227
228static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
229 unsigned long max_high)
195{ 230{
196 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 231 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
197 int i; 232 struct memblock_region *reg;
198 233
199 /* 234 /*
200 * initialise the zones. 235 * initialise the zones.
@@ -216,72 +251,97 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
216 * holes = node_size - sum(bank_sizes) 251 * holes = node_size - sum(bank_sizes)
217 */ 252 */
218 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 253 memcpy(zhole_size, zone_size, sizeof(zhole_size));
219 for_each_bank(i, mi) { 254 for_each_memblock(memory, reg) {
220 int idx = 0; 255 unsigned long start = memblock_region_memory_base_pfn(reg);
256 unsigned long end = memblock_region_memory_end_pfn(reg);
257
258 if (start < max_low) {
259 unsigned long low_end = min(end, max_low);
260 zhole_size[0] -= low_end - start;
261 }
221#ifdef CONFIG_HIGHMEM 262#ifdef CONFIG_HIGHMEM
222 if (mi->bank[i].highmem) 263 if (end > max_low) {
223 idx = ZONE_HIGHMEM; 264 unsigned long high_start = max(start, max_low);
265 zhole_size[ZONE_HIGHMEM] -= end - high_start;
266 }
224#endif 267#endif
225 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
226 } 268 }
227 269
270#ifdef ARM_DMA_ZONE_SIZE
271#ifndef CONFIG_ZONE_DMA
272#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations
273#endif
274
228 /* 275 /*
229 * Adjust the sizes according to any special requirements for 276 * Adjust the sizes according to any special requirements for
230 * this machine type. 277 * this machine type.
231 */ 278 */
232 arch_adjust_zones(zone_size, zhole_size); 279 arm_adjust_dma_zone(zone_size, zhole_size,
280 ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
281#endif
233 282
234 free_area_init_node(0, zone_size, min, zhole_size); 283 free_area_init_node(0, zone_size, min, zhole_size);
235} 284}
236 285
237#ifndef CONFIG_SPARSEMEM 286#ifdef CONFIG_HAVE_ARCH_PFN_VALID
238int pfn_valid(unsigned long pfn) 287int pfn_valid(unsigned long pfn)
239{ 288{
240 struct memblock_region *mem = &memblock.memory; 289 return memblock_is_memory(pfn << PAGE_SHIFT);
241 unsigned int left = 0, right = mem->cnt;
242
243 do {
244 unsigned int mid = (right + left) / 2;
245
246 if (pfn < memblock_start_pfn(mem, mid))
247 right = mid;
248 else if (pfn >= memblock_end_pfn(mem, mid))
249 left = mid + 1;
250 else
251 return 1;
252 } while (left < right);
253 return 0;
254} 290}
255EXPORT_SYMBOL(pfn_valid); 291EXPORT_SYMBOL(pfn_valid);
292#endif
256 293
294#ifndef CONFIG_SPARSEMEM
257static void arm_memory_present(void) 295static void arm_memory_present(void)
258{ 296{
259} 297}
260#else 298#else
261static void arm_memory_present(void) 299static void arm_memory_present(void)
262{ 300{
263 int i; 301 struct memblock_region *reg;
264 for (i = 0; i < memblock.memory.cnt; i++) 302
265 memory_present(0, memblock_start_pfn(&memblock.memory, i), 303 for_each_memblock(memory, reg)
266 memblock_end_pfn(&memblock.memory, i)); 304 memory_present(0, memblock_region_memory_base_pfn(reg),
305 memblock_region_memory_end_pfn(reg));
267} 306}
268#endif 307#endif
269 308
309static int __init meminfo_cmp(const void *_a, const void *_b)
310{
311 const struct membank *a = _a, *b = _b;
312 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
313 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
314}
315
270void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 316void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
271{ 317{
272 int i; 318 int i;
273 319
320 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
321
274 memblock_init(); 322 memblock_init();
275 for (i = 0; i < mi->nr_banks; i++) 323 for (i = 0; i < mi->nr_banks; i++)
276 memblock_add(mi->bank[i].start, mi->bank[i].size); 324 memblock_add(mi->bank[i].start, mi->bank[i].size);
277 325
278 /* Register the kernel text, kernel data and initrd with memblock. */ 326 /* Register the kernel text, kernel data and initrd with memblock. */
279#ifdef CONFIG_XIP_KERNEL 327#ifdef CONFIG_XIP_KERNEL
280 memblock_reserve(__pa(_data), _end - _data); 328 memblock_reserve(__pa(_sdata), _end - _sdata);
281#else 329#else
282 memblock_reserve(__pa(_stext), _end - _stext); 330 memblock_reserve(__pa(_stext), _end - _stext);
283#endif 331#endif
284#ifdef CONFIG_BLK_DEV_INITRD 332#ifdef CONFIG_BLK_DEV_INITRD
333 if (phys_initrd_size &&
334 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
335 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
336 phys_initrd_start, phys_initrd_size);
337 phys_initrd_start = phys_initrd_size = 0;
338 }
339 if (phys_initrd_size &&
340 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
341 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
342 phys_initrd_start, phys_initrd_size);
343 phys_initrd_start = phys_initrd_size = 0;
344 }
285 if (phys_initrd_size) { 345 if (phys_initrd_size) {
286 memblock_reserve(phys_initrd_start, phys_initrd_size); 346 memblock_reserve(phys_initrd_start, phys_initrd_size);
287 347
@@ -292,6 +352,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
292#endif 352#endif
293 353
294 arm_mm_memblock_reserve(); 354 arm_mm_memblock_reserve();
355 arm_dt_memblock_reserve();
295 356
296 /* reserve any platform specific memblock areas */ 357 /* reserve any platform specific memblock areas */
297 if (mdesc->reserve) 358 if (mdesc->reserve)
@@ -303,14 +364,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
303 364
304void __init bootmem_init(void) 365void __init bootmem_init(void)
305{ 366{
306 struct meminfo *mi = &meminfo;
307 unsigned long min, max_low, max_high; 367 unsigned long min, max_low, max_high;
308 368
309 max_low = max_high = 0; 369 max_low = max_high = 0;
310 370
311 find_limits(mi, &min, &max_low, &max_high); 371 find_limits(&min, &max_low, &max_high);
312 372
313 arm_bootmem_init(mi, min, max_low); 373 arm_bootmem_init(min, max_low);
314 374
315 /* 375 /*
316 * Sparsemem tries to allocate bootmem in memory_present(), 376 * Sparsemem tries to allocate bootmem in memory_present(),
@@ -328,9 +388,9 @@ void __init bootmem_init(void)
328 * the sparse mem_map arrays initialized by sparse_init() 388 * the sparse mem_map arrays initialized by sparse_init()
329 * for memmap_init_zone(), otherwise all PFNs are invalid. 389 * for memmap_init_zone(), otherwise all PFNs are invalid.
330 */ 390 */
331 arm_bootmem_free(mi, min, max_low, max_high); 391 arm_bootmem_free(min, max_low, max_high);
332 392
333 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 393 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
334 394
335 /* 395 /*
336 * This doesn't seem to be used by the Linux memory manager any 396 * This doesn't seem to be used by the Linux memory manager any
@@ -372,14 +432,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
372 * Convert start_pfn/end_pfn to a struct page pointer. 432 * Convert start_pfn/end_pfn to a struct page pointer.
373 */ 433 */
374 start_pg = pfn_to_page(start_pfn - 1) + 1; 434 start_pg = pfn_to_page(start_pfn - 1) + 1;
375 end_pg = pfn_to_page(end_pfn); 435 end_pg = pfn_to_page(end_pfn - 1) + 1;
376 436
377 /* 437 /*
378 * Convert to physical addresses, and 438 * Convert to physical addresses, and
379 * round start upwards and end downwards. 439 * round start upwards and end downwards.
380 */ 440 */
381 pg = PAGE_ALIGN(__pa(start_pg)); 441 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
382 pgend = __pa(end_pg) & PAGE_MASK; 442 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
383 443
384 /* 444 /*
385 * If there are free pages between these, 445 * If there are free pages between these,
@@ -406,6 +466,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
406 466
407 bank_start = bank_pfn_start(bank); 467 bank_start = bank_pfn_start(bank);
408 468
469#ifdef CONFIG_SPARSEMEM
470 /*
471 * Take care not to free memmap entries that don't exist
472 * due to SPARSEMEM sections which aren't present.
473 */
474 bank_start = min(bank_start,
475 ALIGN(prev_bank_end, PAGES_PER_SECTION));
476#endif
409 /* 477 /*
410 * If we had a previous bank, and there is a space 478 * If we had a previous bank, and there is a space
411 * between the current bank and the previous, free it. 479 * between the current bank and the previous, free it.
@@ -420,6 +488,62 @@ static void __init free_unused_memmap(struct meminfo *mi)
420 */ 488 */
421 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 489 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
422 } 490 }
491
492#ifdef CONFIG_SPARSEMEM
493 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
494 free_memmap(prev_bank_end,
495 ALIGN(prev_bank_end, PAGES_PER_SECTION));
496#endif
497}
498
499static void __init free_highpages(void)
500{
501#ifdef CONFIG_HIGHMEM
502 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
503 struct memblock_region *mem, *res;
504
505 /* set highmem page free */
506 for_each_memblock(memory, mem) {
507 unsigned long start = memblock_region_memory_base_pfn(mem);
508 unsigned long end = memblock_region_memory_end_pfn(mem);
509
510 /* Ignore complete lowmem entries */
511 if (end <= max_low)
512 continue;
513
514 /* Truncate partial highmem entries */
515 if (start < max_low)
516 start = max_low;
517
518 /* Find and exclude any reserved regions */
519 for_each_memblock(reserved, res) {
520 unsigned long res_start, res_end;
521
522 res_start = memblock_region_reserved_base_pfn(res);
523 res_end = memblock_region_reserved_end_pfn(res);
524
525 if (res_end < start)
526 continue;
527 if (res_start < start)
528 res_start = start;
529 if (res_start > end)
530 res_start = end;
531 if (res_end > end)
532 res_end = end;
533 if (res_start != start)
534 totalhigh_pages += free_area(start, res_start,
535 NULL);
536 start = res_end;
537 if (start == end)
538 break;
539 }
540
541 /* And now free anything which remains */
542 if (start < end)
543 totalhigh_pages += free_area(start, end, NULL);
544 }
545 totalram_pages += totalhigh_pages;
546#endif
423} 547}
424 548
425/* 549/*
@@ -430,6 +554,7 @@ static void __init free_unused_memmap(struct meminfo *mi)
430void __init mem_init(void) 554void __init mem_init(void)
431{ 555{
432 unsigned long reserved_pages, free_pages; 556 unsigned long reserved_pages, free_pages;
557 struct memblock_region *reg;
433 int i; 558 int i;
434#ifdef CONFIG_HAVE_TCM 559#ifdef CONFIG_HAVE_TCM
435 /* These pointers are filled in on TCM detection */ 560 /* These pointers are filled in on TCM detection */
@@ -450,16 +575,7 @@ void __init mem_init(void)
450 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 575 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
451#endif 576#endif
452 577
453#ifdef CONFIG_HIGHMEM 578 free_highpages();
454 /* set highmem page free */
455 for_each_bank (i, &meminfo) {
456 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
457 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
458 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
459 totalhigh_pages += free_area(start, end, NULL);
460 }
461 totalram_pages += totalhigh_pages;
462#endif
463 579
464 reserved_pages = free_pages = 0; 580 reserved_pages = free_pages = 0;
465 581
@@ -489,9 +605,11 @@ void __init mem_init(void)
489 */ 605 */
490 printk(KERN_INFO "Memory:"); 606 printk(KERN_INFO "Memory:");
491 num_physpages = 0; 607 num_physpages = 0;
492 for (i = 0; i < meminfo.nr_banks; i++) { 608 for_each_memblock(memory, reg) {
493 num_physpages += bank_pfn_size(&meminfo.bank[i]); 609 unsigned long pages = memblock_region_memory_end_pfn(reg) -
494 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 610 memblock_region_memory_base_pfn(reg);
611 num_physpages += pages;
612 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
495 } 613 }
496 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 614 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
497 615
@@ -523,7 +641,8 @@ void __init mem_init(void)
523 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 641 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
524 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 642 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
525 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 643 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
526 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 644 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
645 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
527 646
528 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 647 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
529 (PAGE_SIZE)), 648 (PAGE_SIZE)),
@@ -545,7 +664,8 @@ void __init mem_init(void)
545 664
546 MLK_ROUNDUP(__init_begin, __init_end), 665 MLK_ROUNDUP(__init_begin, __init_end),
547 MLK_ROUNDUP(_text, _etext), 666 MLK_ROUNDUP(_text, _etext),
548 MLK_ROUNDUP(_data, _edata)); 667 MLK_ROUNDUP(_sdata, _edata),
668 MLK_ROUNDUP(__bss_start, __bss_stop));
549 669
550#undef MLK 670#undef MLK
551#undef MLM 671#undef MLM