aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2011-12-29 07:09:51 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:38 -0400
commitc79095092834a18ae74cfc08def1a5a101dc106c (patch)
treec6cd81c38b92dcdb269288ab9a125bc13f4bb339 /arch/arm/mm/mmu.c
parent0a2b9a6ea93650b8a00f9fd5ee8fdd25671e2df6 (diff)
ARM: integrate CMA with DMA-mapping subsystem
This patch adds support for CMA to dma-mapping subsystem for ARM architecture. By default a global CMA area is used, but specific devices are allowed to have their private memory areas if required (they can be created with dma_declare_contiguous() function during board initialisation). Contiguous memory areas reserved for DMA are remapped with 2-level page tables on boot. Once a buffer is requested, a low memory kernel mapping is updated to to match requested memory access type. GFP_ATOMIC allocations are performed from special pool which is created early during boot. This way remapping page attributes is not needed on allocation time. CMA has been enabled unconditionally for ARMv6+ systems. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> CC: Michal Nazarewicz <mina86@mina86.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aa78de8bfdd3..e5dad60b558b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
288 PMD_SECT_UNCACHED | PMD_SECT_XN, 288 PMD_SECT_UNCACHED | PMD_SECT_XN,
289 .domain = DOMAIN_KERNEL, 289 .domain = DOMAIN_KERNEL,
290 }, 290 },
291 [MT_MEMORY_DMA_READY] = {
292 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
293 .prot_l1 = PMD_TYPE_TABLE,
294 .domain = DOMAIN_KERNEL,
295 },
291}; 296};
292 297
293const struct mem_type *get_mem_type(unsigned int type) 298const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
429 if (arch_is_coherent() && cpu_is_xsc3()) { 434 if (arch_is_coherent() && cpu_is_xsc3()) {
430 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 435 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
431 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 436 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
437 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 438 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 439 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 } 440 }
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
460 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 466 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
461 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 467 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
462 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 468 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
469 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
463 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 470 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
464 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 471 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
465 } 472 }
@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
512 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 519 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
513 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 520 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
514 mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 521 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
522 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
515 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 523 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
516 mem_types[MT_ROM].prot_sect |= cp->pmd; 524 mem_types[MT_ROM].prot_sect |= cp->pmd;
517 525
@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
596 * L1 entries, whereas PGDs refer to a group of L1 entries making 604 * L1 entries, whereas PGDs refer to a group of L1 entries making
597 * up one logical pointer to an L2 table. 605 * up one logical pointer to an L2 table.
598 */ 606 */
599 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 607 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
600 pmd_t *p = pmd; 608 pmd_t *p = pmd;
601 609
602#ifndef CONFIG_ARM_LPAE 610#ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
814} 822}
815early_param("vmalloc", early_vmalloc); 823early_param("vmalloc", early_vmalloc);
816 824
817static phys_addr_t lowmem_limit __initdata = 0; 825phys_addr_t arm_lowmem_limit __initdata = 0;
818 826
819void __init sanity_check_meminfo(void) 827void __init sanity_check_meminfo(void)
820{ 828{
@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
897 bank->size = newsize; 905 bank->size = newsize;
898 } 906 }
899#endif 907#endif
900 if (!bank->highmem && bank->start + bank->size > lowmem_limit) 908 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
901 lowmem_limit = bank->start + bank->size; 909 arm_lowmem_limit = bank->start + bank->size;
902 910
903 j++; 911 j++;
904 } 912 }
@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
923 } 931 }
924#endif 932#endif
925 meminfo.nr_banks = j; 933 meminfo.nr_banks = j;
926 high_memory = __va(lowmem_limit - 1) + 1; 934 high_memory = __va(arm_lowmem_limit - 1) + 1;
927 memblock_set_current_limit(lowmem_limit); 935 memblock_set_current_limit(arm_lowmem_limit);
928} 936}
929 937
930static inline void prepare_page_table(void) 938static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
949 * Find the end of the first block of lowmem. 957 * Find the end of the first block of lowmem.
950 */ 958 */
951 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; 959 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
952 if (end >= lowmem_limit) 960 if (end >= arm_lowmem_limit)
953 end = lowmem_limit; 961 end = arm_lowmem_limit;
954 962
955 /* 963 /*
956 * Clear out all the kernel space mappings, except for the first 964 * Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
1093 phys_addr_t end = start + reg->size; 1101 phys_addr_t end = start + reg->size;
1094 struct map_desc map; 1102 struct map_desc map;
1095 1103
1096 if (end > lowmem_limit) 1104 if (end > arm_lowmem_limit)
1097 end = lowmem_limit; 1105 end = arm_lowmem_limit;
1098 if (start >= end) 1106 if (start >= end)
1099 break; 1107 break;
1100 1108
@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
1115{ 1123{
1116 void *zero_page; 1124 void *zero_page;
1117 1125
1118 memblock_set_current_limit(lowmem_limit); 1126 memblock_set_current_limit(arm_lowmem_limit);
1119 1127
1120 build_mem_type_table(); 1128 build_mem_type_table();
1121 prepare_page_table(); 1129 prepare_page_table();
1122 map_lowmem(); 1130 map_lowmem();
1131 dma_contiguous_remap();
1123 devicemaps_init(mdesc); 1132 devicemaps_init(mdesc);
1124 kmap_init(); 1133 kmap_init();
1125 1134