aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mm-armv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r--arch/arm/mm/mm-armv.c172
1 files changed, 51 insertions, 121 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index d125a3dc061c..61bc2fa0511e 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/arch/arm/mm/mm-armv.c 2 * linux/arch/arm/mm/mm-armv.c
3 * 3 *
4 * Copyright (C) 1998-2002 Russell King 4 * Copyright (C) 1998-2005 Russell King
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -305,16 +305,6 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
305 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 305 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
306} 306}
307 307
308/*
309 * Clear any PGD mapping. On a two-level page table system,
310 * the clearance is done by the middle-level functions (pmd)
311 * rather than the top-level (pgd) functions.
312 */
313static inline void clear_mapping(unsigned long virt)
314{
315 pmd_clear(pmd_off_k(virt));
316}
317
318struct mem_types { 308struct mem_types {
319 unsigned int prot_pte; 309 unsigned int prot_pte;
320 unsigned int prot_l1; 310 unsigned int prot_l1;
@@ -373,7 +363,7 @@ static struct mem_types mem_types[] __initdata = {
373/* 363/*
374 * Adjust the PMD section entries according to the CPU in use. 364 * Adjust the PMD section entries according to the CPU in use.
375 */ 365 */
376static void __init build_mem_type_table(void) 366void __init build_mem_type_table(void)
377{ 367{
378 struct cachepolicy *cp; 368 struct cachepolicy *cp;
379 unsigned int cr = get_cr(); 369 unsigned int cr = get_cr();
@@ -483,25 +473,25 @@ static void __init build_mem_type_table(void)
483 * offsets, and we take full advantage of sections and 473 * offsets, and we take full advantage of sections and
484 * supersections. 474 * supersections.
485 */ 475 */
486static void __init create_mapping(struct map_desc *md) 476void __init create_mapping(struct map_desc *md)
487{ 477{
488 unsigned long virt, length; 478 unsigned long virt, length;
489 int prot_sect, prot_l1, domain; 479 int prot_sect, prot_l1, domain;
490 pgprot_t prot_pte; 480 pgprot_t prot_pte;
491 long off; 481 unsigned long off = (u32)__pfn_to_phys(md->pfn);
492 482
493 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 483 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
494 printk(KERN_WARNING "BUG: not creating mapping for " 484 printk(KERN_WARNING "BUG: not creating mapping for "
495 "0x%08lx at 0x%08lx in user region\n", 485 "0x%016llx at 0x%08lx in user region\n",
496 md->physical, md->virtual); 486 __pfn_to_phys((u64)md->pfn), md->virtual);
497 return; 487 return;
498 } 488 }
499 489
500 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 490 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
501 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 491 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
502 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " 492 printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
503 "overlaps vmalloc space\n", 493 "overlaps vmalloc space\n",
504 md->physical, md->virtual); 494 __pfn_to_phys((u64)md->pfn), md->virtual);
505 } 495 }
506 496
507 domain = mem_types[md->type].domain; 497 domain = mem_types[md->type].domain;
@@ -509,15 +499,40 @@ static void __init create_mapping(struct map_desc *md)
509 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); 499 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
510 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); 500 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
511 501
502 /*
503 * Catch 36-bit addresses
504 */
505 if(md->pfn >= 0x100000) {
506 if(domain) {
507 printk(KERN_ERR "MM: invalid domain in supersection "
508 "mapping for 0x%016llx at 0x%08lx\n",
509 __pfn_to_phys((u64)md->pfn), md->virtual);
510 return;
511 }
512 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
513 & ~SUPERSECTION_MASK) {
514 printk(KERN_ERR "MM: cannot create mapping for "
515 "0x%016llx at 0x%08lx invalid alignment\n",
516 __pfn_to_phys((u64)md->pfn), md->virtual);
517 return;
518 }
519
520 /*
521 * Shift bits [35:32] of address into bits [23:20] of PMD
522 * (See ARMv6 spec).
523 */
524 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
525 }
526
512 virt = md->virtual; 527 virt = md->virtual;
513 off = md->physical - virt; 528 off -= virt;
514 length = md->length; 529 length = md->length;
515 530
516 if (mem_types[md->type].prot_l1 == 0 && 531 if (mem_types[md->type].prot_l1 == 0 &&
517 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { 532 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
518 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 533 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
519 "be mapped using pages, ignoring.\n", 534 "be mapped using pages, ignoring.\n",
520 md->physical, md->virtual); 535 __pfn_to_phys(md->pfn), md->virtual);
521 return; 536 return;
522 } 537 }
523 538
@@ -535,13 +550,22 @@ static void __init create_mapping(struct map_desc *md)
535 * of the actual domain assignments in use. 550 * of the actual domain assignments in use.
536 */ 551 */
537 if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { 552 if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
538 /* Align to supersection boundary */ 553 /*
539 while ((virt & ~SUPERSECTION_MASK || (virt + off) & 554 * Align to supersection boundary if !high pages.
540 ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { 555 * High pages have already been checked for proper
541 alloc_init_section(virt, virt + off, prot_sect); 556 * alignment above and they will fail the SUPSERSECTION_MASK
542 557 * check because of the way the address is encoded into
543 virt += (PGDIR_SIZE / 2); 558 * offset.
544 length -= (PGDIR_SIZE / 2); 559 */
560 if (md->pfn <= 0x100000) {
561 while ((virt & ~SUPERSECTION_MASK ||
562 (virt + off) & ~SUPERSECTION_MASK) &&
563 length >= (PGDIR_SIZE / 2)) {
564 alloc_init_section(virt, virt + off, prot_sect);
565
566 virt += (PGDIR_SIZE / 2);
567 length -= (PGDIR_SIZE / 2);
568 }
545 } 569 }
546 570
547 while (length >= SUPERSECTION_SIZE) { 571 while (length >= SUPERSECTION_SIZE) {
@@ -601,100 +625,6 @@ void setup_mm_for_reboot(char mode)
601 } 625 }
602} 626}
603 627
604extern void _stext, _etext;
605
606/*
607 * Setup initial mappings. We use the page we allocated for zero page to hold
608 * the mappings, which will get overwritten by the vectors in traps_init().
609 * The mappings must be in virtual address order.
610 */
611void __init memtable_init(struct meminfo *mi)
612{
613 struct map_desc *init_maps, *p, *q;
614 unsigned long address = 0;
615 int i;
616
617 build_mem_type_table();
618
619 init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
620
621#ifdef CONFIG_XIP_KERNEL
622 p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK;
623 p->virtual = (unsigned long)&_stext & PMD_MASK;
624 p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
625 p->type = MT_ROM;
626 p ++;
627#endif
628
629 for (i = 0; i < mi->nr_banks; i++) {
630 if (mi->bank[i].size == 0)
631 continue;
632
633 p->physical = mi->bank[i].start;
634 p->virtual = __phys_to_virt(p->physical);
635 p->length = mi->bank[i].size;
636 p->type = MT_MEMORY;
637 p ++;
638 }
639
640#ifdef FLUSH_BASE
641 p->physical = FLUSH_BASE_PHYS;
642 p->virtual = FLUSH_BASE;
643 p->length = PGDIR_SIZE;
644 p->type = MT_CACHECLEAN;
645 p ++;
646#endif
647
648#ifdef FLUSH_BASE_MINICACHE
649 p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE;
650 p->virtual = FLUSH_BASE_MINICACHE;
651 p->length = PGDIR_SIZE;
652 p->type = MT_MINICLEAN;
653 p ++;
654#endif
655
656 /*
657 * Go through the initial mappings, but clear out any
658 * pgdir entries that are not in the description.
659 */
660 q = init_maps;
661 do {
662 if (address < q->virtual || q == p) {
663 clear_mapping(address);
664 address += PGDIR_SIZE;
665 } else {
666 create_mapping(q);
667
668 address = q->virtual + q->length;
669 address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;
670
671 q ++;
672 }
673 } while (address != 0);
674
675 /*
676 * Create a mapping for the machine vectors at the high-vectors
677 * location (0xffff0000). If we aren't using high-vectors, also
678 * create a mapping at the low-vectors virtual address.
679 */
680 init_maps->physical = virt_to_phys(init_maps);
681 init_maps->virtual = 0xffff0000;
682 init_maps->length = PAGE_SIZE;
683 init_maps->type = MT_HIGH_VECTORS;
684 create_mapping(init_maps);
685
686 if (!vectors_high()) {
687 init_maps->virtual = 0;
688 init_maps->type = MT_LOW_VECTORS;
689 create_mapping(init_maps);
690 }
691
692 flush_cache_all();
693 local_flush_tlb_all();
694
695 top_pmd = pmd_off_k(0xffff0000);
696}
697
698/* 628/*
699 * Create the architecture specific mappings 629 * Create the architecture specific mappings
700 */ 630 */