aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 2c7cf2f9c837..e5dad60b558b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
288 PMD_SECT_UNCACHED | PMD_SECT_XN, 288 PMD_SECT_UNCACHED | PMD_SECT_XN,
289 .domain = DOMAIN_KERNEL, 289 .domain = DOMAIN_KERNEL,
290 }, 290 },
291 [MT_MEMORY_DMA_READY] = {
292 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
293 .prot_l1 = PMD_TYPE_TABLE,
294 .domain = DOMAIN_KERNEL,
295 },
291}; 296};
292 297
293const struct mem_type *get_mem_type(unsigned int type) 298const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
429 if (arch_is_coherent() && cpu_is_xsc3()) { 434 if (arch_is_coherent() && cpu_is_xsc3()) {
430 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 435 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
431 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 436 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
437 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 438 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 439 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 } 440 }
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
460 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 466 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
461 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 467 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
462 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 468 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
469 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
463 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 470 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
464 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 471 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
465 } 472 }
@@ -489,7 +496,8 @@ static void __init build_mem_type_table(void)
489 */ 496 */
490 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 497 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
491 mem_types[i].prot_pte |= PTE_EXT_AF; 498 mem_types[i].prot_pte |= PTE_EXT_AF;
492 mem_types[i].prot_sect |= PMD_SECT_AF; 499 if (mem_types[i].prot_sect)
500 mem_types[i].prot_sect |= PMD_SECT_AF;
493 } 501 }
494 kern_pgprot |= PTE_EXT_AF; 502 kern_pgprot |= PTE_EXT_AF;
495 vecs_pgprot |= PTE_EXT_AF; 503 vecs_pgprot |= PTE_EXT_AF;
@@ -511,6 +519,7 @@ static void __init build_mem_type_table(void)
511 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 519 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
512 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 520 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
513 mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 521 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
522 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
514 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 523 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
515 mem_types[MT_ROM].prot_sect |= cp->pmd; 524 mem_types[MT_ROM].prot_sect |= cp->pmd;
516 525
@@ -595,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
595 * L1 entries, whereas PGDs refer to a group of L1 entries making 604 * L1 entries, whereas PGDs refer to a group of L1 entries making
596 * up one logical pointer to an L2 table. 605 * up one logical pointer to an L2 table.
597 */ 606 */
598 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 607 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
599 pmd_t *p = pmd; 608 pmd_t *p = pmd;
600 609
601#ifndef CONFIG_ARM_LPAE 610#ifndef CONFIG_ARM_LPAE
@@ -813,7 +822,7 @@ static int __init early_vmalloc(char *arg)
813} 822}
814early_param("vmalloc", early_vmalloc); 823early_param("vmalloc", early_vmalloc);
815 824
816static phys_addr_t lowmem_limit __initdata = 0; 825phys_addr_t arm_lowmem_limit __initdata = 0;
817 826
818void __init sanity_check_meminfo(void) 827void __init sanity_check_meminfo(void)
819{ 828{
@@ -896,8 +905,8 @@ void __init sanity_check_meminfo(void)
896 bank->size = newsize; 905 bank->size = newsize;
897 } 906 }
898#endif 907#endif
899 if (!bank->highmem && bank->start + bank->size > lowmem_limit) 908 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
900 lowmem_limit = bank->start + bank->size; 909 arm_lowmem_limit = bank->start + bank->size;
901 910
902 j++; 911 j++;
903 } 912 }
@@ -922,8 +931,8 @@ void __init sanity_check_meminfo(void)
922 } 931 }
923#endif 932#endif
924 meminfo.nr_banks = j; 933 meminfo.nr_banks = j;
925 high_memory = __va(lowmem_limit - 1) + 1; 934 high_memory = __va(arm_lowmem_limit - 1) + 1;
926 memblock_set_current_limit(lowmem_limit); 935 memblock_set_current_limit(arm_lowmem_limit);
927} 936}
928 937
929static inline void prepare_page_table(void) 938static inline void prepare_page_table(void)
@@ -948,8 +957,8 @@ static inline void prepare_page_table(void)
948 * Find the end of the first block of lowmem. 957 * Find the end of the first block of lowmem.
949 */ 958 */
950 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; 959 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
951 if (end >= lowmem_limit) 960 if (end >= arm_lowmem_limit)
952 end = lowmem_limit; 961 end = arm_lowmem_limit;
953 962
954 /* 963 /*
955 * Clear out all the kernel space mappings, except for the first 964 * Clear out all the kernel space mappings, except for the first
@@ -1092,8 +1101,8 @@ static void __init map_lowmem(void)
1092 phys_addr_t end = start + reg->size; 1101 phys_addr_t end = start + reg->size;
1093 struct map_desc map; 1102 struct map_desc map;
1094 1103
1095 if (end > lowmem_limit) 1104 if (end > arm_lowmem_limit)
1096 end = lowmem_limit; 1105 end = arm_lowmem_limit;
1097 if (start >= end) 1106 if (start >= end)
1098 break; 1107 break;
1099 1108
@@ -1114,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
1114{ 1123{
1115 void *zero_page; 1124 void *zero_page;
1116 1125
1117 memblock_set_current_limit(lowmem_limit); 1126 memblock_set_current_limit(arm_lowmem_limit);
1118 1127
1119 build_mem_type_table(); 1128 build_mem_type_table();
1120 prepare_page_table(); 1129 prepare_page_table();
1121 map_lowmem(); 1130 map_lowmem();
1131 dma_contiguous_remap();
1122 devicemaps_init(mdesc); 1132 devicemaps_init(mdesc);
1123 kmap_init(); 1133 kmap_init();
1124 1134