aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-11-22 12:30:29 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2011-12-08 05:30:39 -0500
commit1b6ba46b7efa31055eb993a6f2c6bbcb8b35b001 (patch)
treeb04e3b1fd23ba81a643f64cba113551d127111a0 /arch/arm/mm/mmu.c
parentda02877987e6e173ebba137d4e1e155e1f1151cd (diff)
ARM: LPAE: MMU setup for the 3-level page table format
This patch adds the MMU initialisation for the LPAE page table format. The swapper_pg_dir size with LPAE is 5 rather than 4 pages. A new proc-v7-3level.S file contains the TTB initialisation, context switch and PTE setting code with the LPAE. The TTBRx split is based on the PAGE_OFFSET with TTBR1 used for the kernel mappings. The 36-bit mappings (supersections) and a few other memory types in mmu.c are conditionally compiled. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550e6cbd..1935311e17fc 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -150,6 +150,7 @@ static int __init early_nowrite(char *__unused)
150} 150}
151early_param("nowb", early_nowrite); 151early_param("nowb", early_nowrite);
152 152
153#ifndef CONFIG_ARM_LPAE
153static int __init early_ecc(char *p) 154static int __init early_ecc(char *p)
154{ 155{
155 if (memcmp(p, "on", 2) == 0) 156 if (memcmp(p, "on", 2) == 0)
@@ -159,6 +160,7 @@ static int __init early_ecc(char *p)
159 return 0; 160 return 0;
160} 161}
161early_param("ecc", early_ecc); 162early_param("ecc", early_ecc);
163#endif
162 164
163static int __init noalign_setup(char *__unused) 165static int __init noalign_setup(char *__unused)
164{ 166{
@@ -228,10 +230,12 @@ static struct mem_type mem_types[] = {
228 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 230 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
229 .domain = DOMAIN_KERNEL, 231 .domain = DOMAIN_KERNEL,
230 }, 232 },
233#ifndef CONFIG_ARM_LPAE
231 [MT_MINICLEAN] = { 234 [MT_MINICLEAN] = {
232 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 235 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
233 .domain = DOMAIN_KERNEL, 236 .domain = DOMAIN_KERNEL,
234 }, 237 },
238#endif
235 [MT_LOW_VECTORS] = { 239 [MT_LOW_VECTORS] = {
236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 240 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
237 L_PTE_RDONLY, 241 L_PTE_RDONLY,
@@ -429,6 +433,7 @@ static void __init build_mem_type_table(void)
429 * ARMv6 and above have extended page tables. 433 * ARMv6 and above have extended page tables.
430 */ 434 */
431 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 435 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
436#ifndef CONFIG_ARM_LPAE
432 /* 437 /*
433 * Mark cache clean areas and XIP ROM read only 438 * Mark cache clean areas and XIP ROM read only
434 * from SVC mode and no access from userspace. 439 * from SVC mode and no access from userspace.
@@ -436,6 +441,7 @@ static void __init build_mem_type_table(void)
436 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 441 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 442 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 443 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
444#endif
439 445
440 if (is_smp()) { 446 if (is_smp()) {
441 /* 447 /*
@@ -474,6 +480,18 @@ static void __init build_mem_type_table(void)
474 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 480 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
475 } 481 }
476 482
483#ifdef CONFIG_ARM_LPAE
484 /*
485 * Do not generate access flag faults for the kernel mappings.
486 */
487 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
488 mem_types[i].prot_pte |= PTE_EXT_AF;
489 mem_types[i].prot_sect |= PMD_SECT_AF;
490 }
491 kern_pgprot |= PTE_EXT_AF;
492 vecs_pgprot |= PTE_EXT_AF;
493#endif
494
477 for (i = 0; i < 16; i++) { 495 for (i = 0; i < 16; i++) {
478 unsigned long v = pgprot_val(protection_map[i]); 496 unsigned long v = pgprot_val(protection_map[i]);
479 protection_map[i] = __pgprot(v | user_pgprot); 497 protection_map[i] = __pgprot(v | user_pgprot);
@@ -572,8 +590,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
572 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 590 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
573 pmd_t *p = pmd; 591 pmd_t *p = pmd;
574 592
593#ifndef CONFIG_ARM_LPAE
575 if (addr & SECTION_SIZE) 594 if (addr & SECTION_SIZE)
576 pmd++; 595 pmd++;
596#endif
577 597
578 do { 598 do {
579 *pmd = __pmd(phys | type->prot_sect); 599 *pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +623,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
603 } while (pud++, addr = next, addr != end); 623 } while (pud++, addr = next, addr != end);
604} 624}
605 625
626#ifndef CONFIG_ARM_LPAE
606static void __init create_36bit_mapping(struct map_desc *md, 627static void __init create_36bit_mapping(struct map_desc *md,
607 const struct mem_type *type) 628 const struct mem_type *type)
608{ 629{
@@ -662,6 +683,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
662 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 683 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
663 } while (addr != end); 684 } while (addr != end);
664} 685}
686#endif /* !CONFIG_ARM_LPAE */
665 687
666/* 688/*
667 * Create the page directory entries and any necessary 689 * Create the page directory entries and any necessary
@@ -693,6 +715,7 @@ static void __init create_mapping(struct map_desc *md)
693 715
694 type = &mem_types[md->type]; 716 type = &mem_types[md->type];
695 717
718#ifndef CONFIG_ARM_LPAE
696 /* 719 /*
697 * Catch 36-bit addresses 720 * Catch 36-bit addresses
698 */ 721 */
@@ -700,6 +723,7 @@ static void __init create_mapping(struct map_desc *md)
700 create_36bit_mapping(md, type); 723 create_36bit_mapping(md, type);
701 return; 724 return;
702 } 725 }
726#endif
703 727
704 addr = md->virtual & PAGE_MASK; 728 addr = md->virtual & PAGE_MASK;
705 phys = __pfn_to_phys(md->pfn); 729 phys = __pfn_to_phys(md->pfn);
@@ -897,7 +921,13 @@ static inline void prepare_page_table(void)
897 pmd_clear(pmd_off_k(addr)); 921 pmd_clear(pmd_off_k(addr));
898} 922}
899 923
924#ifdef CONFIG_ARM_LPAE
925/* the first page is reserved for pgd */
926#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
927 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
928#else
900#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 929#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
930#endif
901 931
902/* 932/*
903 * Reserve the special regions of memory 933 * Reserve the special regions of memory