diff options
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r-- | arch/arm/mm/mm-armv.c | 187 |
1 files changed, 52 insertions, 135 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index d125a3dc061c..1221fdde1769 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mm/mm-armv.c | 2 | * linux/arch/arm/mm/mm-armv.c |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2002 Russell King | 4 | * Copyright (C) 1998-2005 Russell King |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -180,11 +180,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
180 | 180 | ||
181 | if (!vectors_high()) { | 181 | if (!vectors_high()) { |
182 | /* | 182 | /* |
183 | * This lock is here just to satisfy pmd_alloc and pte_lock | ||
184 | */ | ||
185 | spin_lock(&mm->page_table_lock); | ||
186 | |||
187 | /* | ||
188 | * On ARM, first page must always be allocated since it | 183 | * On ARM, first page must always be allocated since it |
189 | * contains the machine vectors. | 184 | * contains the machine vectors. |
190 | */ | 185 | */ |
@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
201 | set_pte(new_pte, *init_pte); | 196 | set_pte(new_pte, *init_pte); |
202 | pte_unmap_nested(init_pte); | 197 | pte_unmap_nested(init_pte); |
203 | pte_unmap(new_pte); | 198 | pte_unmap(new_pte); |
204 | |||
205 | spin_unlock(&mm->page_table_lock); | ||
206 | } | 199 | } |
207 | 200 | ||
208 | return new_pgd; | 201 | return new_pgd; |
209 | 202 | ||
210 | no_pte: | 203 | no_pte: |
211 | spin_unlock(&mm->page_table_lock); | ||
212 | pmd_free(new_pmd); | 204 | pmd_free(new_pmd); |
213 | free_pages((unsigned long)new_pgd, 2); | ||
214 | return NULL; | ||
215 | |||
216 | no_pmd: | 205 | no_pmd: |
217 | spin_unlock(&mm->page_table_lock); | ||
218 | free_pages((unsigned long)new_pgd, 2); | 206 | free_pages((unsigned long)new_pgd, 2); |
219 | return NULL; | ||
220 | |||
221 | no_pgd: | 207 | no_pgd: |
222 | return NULL; | 208 | return NULL; |
223 | } | 209 | } |
@@ -243,6 +229,7 @@ void free_pgd_slow(pgd_t *pgd) | |||
243 | pte = pmd_page(*pmd); | 229 | pte = pmd_page(*pmd); |
244 | pmd_clear(pmd); | 230 | pmd_clear(pmd); |
245 | dec_page_state(nr_page_table_pages); | 231 | dec_page_state(nr_page_table_pages); |
232 | pte_lock_deinit(pte); | ||
246 | pte_free(pte); | 233 | pte_free(pte); |
247 | pmd_free(pmd); | 234 | pmd_free(pmd); |
248 | free: | 235 | free: |
@@ -305,16 +292,6 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg | |||
305 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | 292 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); |
306 | } | 293 | } |
307 | 294 | ||
308 | /* | ||
309 | * Clear any PGD mapping. On a two-level page table system, | ||
310 | * the clearance is done by the middle-level functions (pmd) | ||
311 | * rather than the top-level (pgd) functions. | ||
312 | */ | ||
313 | static inline void clear_mapping(unsigned long virt) | ||
314 | { | ||
315 | pmd_clear(pmd_off_k(virt)); | ||
316 | } | ||
317 | |||
318 | struct mem_types { | 295 | struct mem_types { |
319 | unsigned int prot_pte; | 296 | unsigned int prot_pte; |
320 | unsigned int prot_l1; | 297 | unsigned int prot_l1; |
@@ -373,7 +350,7 @@ static struct mem_types mem_types[] __initdata = { | |||
373 | /* | 350 | /* |
374 | * Adjust the PMD section entries according to the CPU in use. | 351 | * Adjust the PMD section entries according to the CPU in use. |
375 | */ | 352 | */ |
376 | static void __init build_mem_type_table(void) | 353 | void __init build_mem_type_table(void) |
377 | { | 354 | { |
378 | struct cachepolicy *cp; | 355 | struct cachepolicy *cp; |
379 | unsigned int cr = get_cr(); | 356 | unsigned int cr = get_cr(); |
@@ -483,25 +460,25 @@ static void __init build_mem_type_table(void) | |||
483 | * offsets, and we take full advantage of sections and | 460 | * offsets, and we take full advantage of sections and |
484 | * supersections. | 461 | * supersections. |
485 | */ | 462 | */ |
486 | static void __init create_mapping(struct map_desc *md) | 463 | void __init create_mapping(struct map_desc *md) |
487 | { | 464 | { |
488 | unsigned long virt, length; | 465 | unsigned long virt, length; |
489 | int prot_sect, prot_l1, domain; | 466 | int prot_sect, prot_l1, domain; |
490 | pgprot_t prot_pte; | 467 | pgprot_t prot_pte; |
491 | long off; | 468 | unsigned long off = (u32)__pfn_to_phys(md->pfn); |
492 | 469 | ||
493 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 470 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
494 | printk(KERN_WARNING "BUG: not creating mapping for " | 471 | printk(KERN_WARNING "BUG: not creating mapping for " |
495 | "0x%08lx at 0x%08lx in user region\n", | 472 | "0x%016llx at 0x%08lx in user region\n", |
496 | md->physical, md->virtual); | 473 | __pfn_to_phys((u64)md->pfn), md->virtual); |
497 | return; | 474 | return; |
498 | } | 475 | } |
499 | 476 | ||
500 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 477 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
501 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 478 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
502 | printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " | 479 | printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx " |
503 | "overlaps vmalloc space\n", | 480 | "overlaps vmalloc space\n", |
504 | md->physical, md->virtual); | 481 | __pfn_to_phys((u64)md->pfn), md->virtual); |
505 | } | 482 | } |
506 | 483 | ||
507 | domain = mem_types[md->type].domain; | 484 | domain = mem_types[md->type].domain; |
@@ -509,15 +486,40 @@ static void __init create_mapping(struct map_desc *md) | |||
509 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | 486 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); |
510 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | 487 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); |
511 | 488 | ||
489 | /* | ||
490 | * Catch 36-bit addresses | ||
491 | */ | ||
492 | if(md->pfn >= 0x100000) { | ||
493 | if(domain) { | ||
494 | printk(KERN_ERR "MM: invalid domain in supersection " | ||
495 | "mapping for 0x%016llx at 0x%08lx\n", | ||
496 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
497 | return; | ||
498 | } | ||
499 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) | ||
500 | & ~SUPERSECTION_MASK) { | ||
501 | printk(KERN_ERR "MM: cannot create mapping for " | ||
502 | "0x%016llx at 0x%08lx invalid alignment\n", | ||
503 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
504 | return; | ||
505 | } | ||
506 | |||
507 | /* | ||
508 | * Shift bits [35:32] of address into bits [23:20] of PMD | ||
509 | * (See ARMv6 spec). | ||
510 | */ | ||
511 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | ||
512 | } | ||
513 | |||
512 | virt = md->virtual; | 514 | virt = md->virtual; |
513 | off = md->physical - virt; | 515 | off -= virt; |
514 | length = md->length; | 516 | length = md->length; |
515 | 517 | ||
516 | if (mem_types[md->type].prot_l1 == 0 && | 518 | if (mem_types[md->type].prot_l1 == 0 && |
517 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | 519 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { |
518 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 520 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
519 | "be mapped using pages, ignoring.\n", | 521 | "be mapped using pages, ignoring.\n", |
520 | md->physical, md->virtual); | 522 | __pfn_to_phys(md->pfn), md->virtual); |
521 | return; | 523 | return; |
522 | } | 524 | } |
523 | 525 | ||
@@ -535,13 +537,22 @@ static void __init create_mapping(struct map_desc *md) | |||
535 | * of the actual domain assignments in use. | 537 | * of the actual domain assignments in use. |
536 | */ | 538 | */ |
537 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { | 539 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { |
538 | /* Align to supersection boundary */ | 540 | /* |
539 | while ((virt & ~SUPERSECTION_MASK || (virt + off) & | 541 | * Align to supersection boundary if !high pages. |
540 | ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { | 542 | * High pages have already been checked for proper |
541 | alloc_init_section(virt, virt + off, prot_sect); | 543 | * alignment above and they will fail the SUPSERSECTION_MASK |
542 | 544 | * check because of the way the address is encoded into | |
543 | virt += (PGDIR_SIZE / 2); | 545 | * offset. |
544 | length -= (PGDIR_SIZE / 2); | 546 | */ |
547 | if (md->pfn <= 0x100000) { | ||
548 | while ((virt & ~SUPERSECTION_MASK || | ||
549 | (virt + off) & ~SUPERSECTION_MASK) && | ||
550 | length >= (PGDIR_SIZE / 2)) { | ||
551 | alloc_init_section(virt, virt + off, prot_sect); | ||
552 | |||
553 | virt += (PGDIR_SIZE / 2); | ||
554 | length -= (PGDIR_SIZE / 2); | ||
555 | } | ||
545 | } | 556 | } |
546 | 557 | ||
547 | while (length >= SUPERSECTION_SIZE) { | 558 | while (length >= SUPERSECTION_SIZE) { |
@@ -601,100 +612,6 @@ void setup_mm_for_reboot(char mode) | |||
601 | } | 612 | } |
602 | } | 613 | } |
603 | 614 | ||
604 | extern void _stext, _etext; | ||
605 | |||
606 | /* | ||
607 | * Setup initial mappings. We use the page we allocated for zero page to hold | ||
608 | * the mappings, which will get overwritten by the vectors in traps_init(). | ||
609 | * The mappings must be in virtual address order. | ||
610 | */ | ||
611 | void __init memtable_init(struct meminfo *mi) | ||
612 | { | ||
613 | struct map_desc *init_maps, *p, *q; | ||
614 | unsigned long address = 0; | ||
615 | int i; | ||
616 | |||
617 | build_mem_type_table(); | ||
618 | |||
619 | init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE); | ||
620 | |||
621 | #ifdef CONFIG_XIP_KERNEL | ||
622 | p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK; | ||
623 | p->virtual = (unsigned long)&_stext & PMD_MASK; | ||
624 | p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; | ||
625 | p->type = MT_ROM; | ||
626 | p ++; | ||
627 | #endif | ||
628 | |||
629 | for (i = 0; i < mi->nr_banks; i++) { | ||
630 | if (mi->bank[i].size == 0) | ||
631 | continue; | ||
632 | |||
633 | p->physical = mi->bank[i].start; | ||
634 | p->virtual = __phys_to_virt(p->physical); | ||
635 | p->length = mi->bank[i].size; | ||
636 | p->type = MT_MEMORY; | ||
637 | p ++; | ||
638 | } | ||
639 | |||
640 | #ifdef FLUSH_BASE | ||
641 | p->physical = FLUSH_BASE_PHYS; | ||
642 | p->virtual = FLUSH_BASE; | ||
643 | p->length = PGDIR_SIZE; | ||
644 | p->type = MT_CACHECLEAN; | ||
645 | p ++; | ||
646 | #endif | ||
647 | |||
648 | #ifdef FLUSH_BASE_MINICACHE | ||
649 | p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE; | ||
650 | p->virtual = FLUSH_BASE_MINICACHE; | ||
651 | p->length = PGDIR_SIZE; | ||
652 | p->type = MT_MINICLEAN; | ||
653 | p ++; | ||
654 | #endif | ||
655 | |||
656 | /* | ||
657 | * Go through the initial mappings, but clear out any | ||
658 | * pgdir entries that are not in the description. | ||
659 | */ | ||
660 | q = init_maps; | ||
661 | do { | ||
662 | if (address < q->virtual || q == p) { | ||
663 | clear_mapping(address); | ||
664 | address += PGDIR_SIZE; | ||
665 | } else { | ||
666 | create_mapping(q); | ||
667 | |||
668 | address = q->virtual + q->length; | ||
669 | address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
670 | |||
671 | q ++; | ||
672 | } | ||
673 | } while (address != 0); | ||
674 | |||
675 | /* | ||
676 | * Create a mapping for the machine vectors at the high-vectors | ||
677 | * location (0xffff0000). If we aren't using high-vectors, also | ||
678 | * create a mapping at the low-vectors virtual address. | ||
679 | */ | ||
680 | init_maps->physical = virt_to_phys(init_maps); | ||
681 | init_maps->virtual = 0xffff0000; | ||
682 | init_maps->length = PAGE_SIZE; | ||
683 | init_maps->type = MT_HIGH_VECTORS; | ||
684 | create_mapping(init_maps); | ||
685 | |||
686 | if (!vectors_high()) { | ||
687 | init_maps->virtual = 0; | ||
688 | init_maps->type = MT_LOW_VECTORS; | ||
689 | create_mapping(init_maps); | ||
690 | } | ||
691 | |||
692 | flush_cache_all(); | ||
693 | local_flush_tlb_all(); | ||
694 | |||
695 | top_pmd = pmd_off_k(0xffff0000); | ||
696 | } | ||
697 | |||
698 | /* | 615 | /* |
699 | * Create the architecture specific mappings | 616 | * Create the architecture specific mappings |
700 | */ | 617 | */ |