aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c97
1 files changed, 82 insertions, 15 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550e6cbd..94c5a0c94f5e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/vmalloc.h>
18 19
19#include <asm/cputype.h> 20#include <asm/cputype.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -150,6 +151,7 @@ static int __init early_nowrite(char *__unused)
150} 151}
151early_param("nowb", early_nowrite); 152early_param("nowb", early_nowrite);
152 153
154#ifndef CONFIG_ARM_LPAE
153static int __init early_ecc(char *p) 155static int __init early_ecc(char *p)
154{ 156{
155 if (memcmp(p, "on", 2) == 0) 157 if (memcmp(p, "on", 2) == 0)
@@ -159,6 +161,7 @@ static int __init early_ecc(char *p)
159 return 0; 161 return 0;
160} 162}
161early_param("ecc", early_ecc); 163early_param("ecc", early_ecc);
164#endif
162 165
163static int __init noalign_setup(char *__unused) 166static int __init noalign_setup(char *__unused)
164{ 167{
@@ -228,10 +231,12 @@ static struct mem_type mem_types[] = {
228 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 231 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
229 .domain = DOMAIN_KERNEL, 232 .domain = DOMAIN_KERNEL,
230 }, 233 },
234#ifndef CONFIG_ARM_LPAE
231 [MT_MINICLEAN] = { 235 [MT_MINICLEAN] = {
232 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 236 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
233 .domain = DOMAIN_KERNEL, 237 .domain = DOMAIN_KERNEL,
234 }, 238 },
239#endif
235 [MT_LOW_VECTORS] = { 240 [MT_LOW_VECTORS] = {
236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 241 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
237 L_PTE_RDONLY, 242 L_PTE_RDONLY,
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
429 * ARMv6 and above have extended page tables. 434 * ARMv6 and above have extended page tables.
430 */ 435 */
431 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 436 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
437#ifndef CONFIG_ARM_LPAE
432 /* 438 /*
433 * Mark cache clean areas and XIP ROM read only 439 * Mark cache clean areas and XIP ROM read only
434 * from SVC mode and no access from userspace. 440 * from SVC mode and no access from userspace.
@@ -436,6 +442,7 @@ static void __init build_mem_type_table(void)
436 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 442 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 443 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 444 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
445#endif
439 446
440 if (is_smp()) { 447 if (is_smp()) {
441 /* 448 /*
@@ -474,6 +481,18 @@ static void __init build_mem_type_table(void)
474 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 481 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
475 } 482 }
476 483
484#ifdef CONFIG_ARM_LPAE
485 /*
486 * Do not generate access flag faults for the kernel mappings.
487 */
488 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
489 mem_types[i].prot_pte |= PTE_EXT_AF;
490 mem_types[i].prot_sect |= PMD_SECT_AF;
491 }
492 kern_pgprot |= PTE_EXT_AF;
493 vecs_pgprot |= PTE_EXT_AF;
494#endif
495
477 for (i = 0; i < 16; i++) { 496 for (i = 0; i < 16; i++) {
478 unsigned long v = pgprot_val(protection_map[i]); 497 unsigned long v = pgprot_val(protection_map[i]);
479 protection_map[i] = __pgprot(v | user_pgprot); 498 protection_map[i] = __pgprot(v | user_pgprot);
@@ -529,13 +548,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
529 548
530#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 549#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
531 550
532static void __init *early_alloc(unsigned long sz) 551static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
533{ 552{
534 void *ptr = __va(memblock_alloc(sz, sz)); 553 void *ptr = __va(memblock_alloc(sz, align));
535 memset(ptr, 0, sz); 554 memset(ptr, 0, sz);
536 return ptr; 555 return ptr;
537} 556}
538 557
558static void __init *early_alloc(unsigned long sz)
559{
560 return early_alloc_aligned(sz, sz);
561}
562
539static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 563static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
540{ 564{
541 if (pmd_none(*pmd)) { 565 if (pmd_none(*pmd)) {
@@ -572,8 +596,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
572 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 596 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
573 pmd_t *p = pmd; 597 pmd_t *p = pmd;
574 598
599#ifndef CONFIG_ARM_LPAE
575 if (addr & SECTION_SIZE) 600 if (addr & SECTION_SIZE)
576 pmd++; 601 pmd++;
602#endif
577 603
578 do { 604 do {
579 *pmd = __pmd(phys | type->prot_sect); 605 *pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +629,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
603 } while (pud++, addr = next, addr != end); 629 } while (pud++, addr = next, addr != end);
604} 630}
605 631
632#ifndef CONFIG_ARM_LPAE
606static void __init create_36bit_mapping(struct map_desc *md, 633static void __init create_36bit_mapping(struct map_desc *md,
607 const struct mem_type *type) 634 const struct mem_type *type)
608{ 635{
@@ -662,6 +689,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
662 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 689 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
663 } while (addr != end); 690 } while (addr != end);
664} 691}
692#endif /* !CONFIG_ARM_LPAE */
665 693
666/* 694/*
667 * Create the page directory entries and any necessary 695 * Create the page directory entries and any necessary
@@ -685,14 +713,16 @@ static void __init create_mapping(struct map_desc *md)
685 } 713 }
686 714
687 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 715 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
688 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 716 md->virtual >= PAGE_OFFSET &&
717 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
689 printk(KERN_WARNING "BUG: mapping for 0x%08llx" 718 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
690 " at 0x%08lx overlaps vmalloc space\n", 719 " at 0x%08lx out of vmalloc space\n",
691 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 720 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
692 } 721 }
693 722
694 type = &mem_types[md->type]; 723 type = &mem_types[md->type];
695 724
725#ifndef CONFIG_ARM_LPAE
696 /* 726 /*
697 * Catch 36-bit addresses 727 * Catch 36-bit addresses
698 */ 728 */
@@ -700,6 +730,7 @@ static void __init create_mapping(struct map_desc *md)
700 create_36bit_mapping(md, type); 730 create_36bit_mapping(md, type);
701 return; 731 return;
702 } 732 }
733#endif
703 734
704 addr = md->virtual & PAGE_MASK; 735 addr = md->virtual & PAGE_MASK;
705 phys = __pfn_to_phys(md->pfn); 736 phys = __pfn_to_phys(md->pfn);
@@ -729,18 +760,33 @@ static void __init create_mapping(struct map_desc *md)
729 */ 760 */
730void __init iotable_init(struct map_desc *io_desc, int nr) 761void __init iotable_init(struct map_desc *io_desc, int nr)
731{ 762{
732 int i; 763 struct map_desc *md;
764 struct vm_struct *vm;
765
766 if (!nr)
767 return;
733 768
734 for (i = 0; i < nr; i++) 769 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
735 create_mapping(io_desc + i); 770
771 for (md = io_desc; nr; md++, nr--) {
772 create_mapping(md);
773 vm->addr = (void *)(md->virtual & PAGE_MASK);
774 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
775 vm->phys_addr = __pfn_to_phys(md->pfn);
776 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
777 vm->flags |= VM_ARM_MTYPE(md->type);
778 vm->caller = iotable_init;
779 vm_area_add_early(vm++);
780 }
736} 781}
737 782
738static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); 783static void * __initdata vmalloc_min =
784 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
739 785
740/* 786/*
741 * vmalloc=size forces the vmalloc area to be exactly 'size' 787 * vmalloc=size forces the vmalloc area to be exactly 'size'
742 * bytes. This can be used to increase (or decrease) the vmalloc 788 * bytes. This can be used to increase (or decrease) the vmalloc
743 * area - the default is 128m. 789 * area - the default is 240m.
744 */ 790 */
745static int __init early_vmalloc(char *arg) 791static int __init early_vmalloc(char *arg)
746{ 792{
@@ -775,6 +821,9 @@ void __init sanity_check_meminfo(void)
775 struct membank *bank = &meminfo.bank[j]; 821 struct membank *bank = &meminfo.bank[j];
776 *bank = meminfo.bank[i]; 822 *bank = meminfo.bank[i];
777 823
824 if (bank->start > ULONG_MAX)
825 highmem = 1;
826
778#ifdef CONFIG_HIGHMEM 827#ifdef CONFIG_HIGHMEM
779 if (__va(bank->start) >= vmalloc_min || 828 if (__va(bank->start) >= vmalloc_min ||
780 __va(bank->start) < (void *)PAGE_OFFSET) 829 __va(bank->start) < (void *)PAGE_OFFSET)
@@ -786,7 +835,7 @@ void __init sanity_check_meminfo(void)
786 * Split those memory banks which are partially overlapping 835 * Split those memory banks which are partially overlapping
787 * the vmalloc area greatly simplifying things later. 836 * the vmalloc area greatly simplifying things later.
788 */ 837 */
789 if (__va(bank->start) < vmalloc_min && 838 if (!highmem && __va(bank->start) < vmalloc_min &&
790 bank->size > vmalloc_min - __va(bank->start)) { 839 bank->size > vmalloc_min - __va(bank->start)) {
791 if (meminfo.nr_banks >= NR_BANKS) { 840 if (meminfo.nr_banks >= NR_BANKS) {
792 printk(KERN_CRIT "NR_BANKS too low, " 841 printk(KERN_CRIT "NR_BANKS too low, "
@@ -807,6 +856,17 @@ void __init sanity_check_meminfo(void)
807 bank->highmem = highmem; 856 bank->highmem = highmem;
808 857
809 /* 858 /*
859 * Highmem banks not allowed with !CONFIG_HIGHMEM.
860 */
861 if (highmem) {
862 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
863 "(!CONFIG_HIGHMEM).\n",
864 (unsigned long long)bank->start,
865 (unsigned long long)bank->start + bank->size - 1);
866 continue;
867 }
868
869 /*
810 * Check whether this memory bank would entirely overlap 870 * Check whether this memory bank would entirely overlap
811 * the vmalloc area. 871 * the vmalloc area.
812 */ 872 */
@@ -860,6 +920,7 @@ void __init sanity_check_meminfo(void)
860 } 920 }
861#endif 921#endif
862 meminfo.nr_banks = j; 922 meminfo.nr_banks = j;
923 high_memory = __va(lowmem_limit - 1) + 1;
863 memblock_set_current_limit(lowmem_limit); 924 memblock_set_current_limit(lowmem_limit);
864} 925}
865 926
@@ -890,14 +951,20 @@ static inline void prepare_page_table(void)
890 951
891 /* 952 /*
892 * Clear out all the kernel space mappings, except for the first 953 * Clear out all the kernel space mappings, except for the first
893 * memory bank, up to the end of the vmalloc region. 954 * memory bank, up to the vmalloc region.
894 */ 955 */
895 for (addr = __phys_to_virt(end); 956 for (addr = __phys_to_virt(end);
896 addr < VMALLOC_END; addr += PMD_SIZE) 957 addr < VMALLOC_START; addr += PMD_SIZE)
897 pmd_clear(pmd_off_k(addr)); 958 pmd_clear(pmd_off_k(addr));
898} 959}
899 960
961#ifdef CONFIG_ARM_LPAE
962/* the first page is reserved for pgd */
963#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
964 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
965#else
900#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 966#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
967#endif
901 968
902/* 969/*
903 * Reserve the special regions of memory 970 * Reserve the special regions of memory
@@ -920,8 +987,8 @@ void __init arm_mm_memblock_reserve(void)
920} 987}
921 988
922/* 989/*
923 * Set up device the mappings. Since we clear out the page tables for all 990 * Set up the device mappings. Since we clear out the page tables for all
924 * mappings above VMALLOC_END, we will remove any debug device mappings. 991 * mappings above VMALLOC_START, we will remove any debug device mappings.
925 * This means you have to be careful how you debug this function, or any 992 * This means you have to be careful how you debug this function, or any
926 * called function. This means you can't use any function or debugging 993 * called function. This means you can't use any function or debugging
927 * method which may touch any device, otherwise the kernel _will_ crash. 994 * method which may touch any device, otherwise the kernel _will_ crash.
@@ -936,7 +1003,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
936 */ 1003 */
937 vectors_page = early_alloc(PAGE_SIZE); 1004 vectors_page = early_alloc(PAGE_SIZE);
938 1005
939 for (addr = VMALLOC_END; addr; addr += PMD_SIZE) 1006 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
940 pmd_clear(pmd_off_k(addr)); 1007 pmd_clear(pmd_off_k(addr));
941 1008
942 /* 1009 /*