aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorKwangwoo Lee <kwangwoo.lee@sk.com>2016-08-01 20:50:50 -0400
committerWill Deacon <will.deacon@arm.com>2016-08-22 05:00:48 -0400
commitd34fdb7081394cbf93fa6571d990086356f4ea9d (patch)
treec0c324d47c405bfe0cab6be1146f0a36278ecf8a /arch/arm64/mm
parent421dd6fa6709ebee4f888ed89da5c103c77caee1 (diff)
arm64: mm: convert __dma_* routines to use start, size
__dma_* routines have been converted to use start and size instread of start and end addresses. The patch was origianlly for adding __clean_dcache_area_poc() which will be used in pmem driver to clean dcache to the PoC(Point of Coherency) in arch_wb_cache_pmem(). The functionality of __clean_dcache_area_poc() was equivalent to __dma_clean_range(). The difference was __dma_clean_range() uses the end address, but __clean_dcache_area_poc() uses the size to clean. Thus, __clean_dcache_area_poc() has been revised with a fallthrough function of __dma_clean_range() after the change that __dma_* routines use start and size instead of using start and end. As a consequence of using start and size, the name of __dma_* routines has also been altered following the terminology below: area: takes a start and size range: takes a start and end Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kwangwoo Lee <kwangwoo.lee@sk.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S82
-rw-r--r--arch/arm64/mm/dma-mapping.c6
2 files changed, 42 insertions, 46 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 07d7352d7c38..58b5a906ff78 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -105,19 +105,20 @@ ENTRY(__clean_dcache_area_pou)
105ENDPROC(__clean_dcache_area_pou) 105ENDPROC(__clean_dcache_area_pou)
106 106
107/* 107/*
108 * __inval_cache_range(start, end) 108 * __dma_inv_area(start, size)
109 * - start - start address of region 109 * - start - virtual start address of region
110 * - end - end address of region 110 * - size - size in question
111 */ 111 */
112ENTRY(__inval_cache_range) 112__dma_inv_area:
113 add x1, x1, x0
113 /* FALLTHROUGH */ 114 /* FALLTHROUGH */
114 115
115/* 116/*
116 * __dma_inv_range(start, end) 117 * __inval_cache_range(start, end)
117 * - start - virtual start address of region 118 * - start - start address of region
118 * - end - virtual end address of region 119 * - end - end address of region
119 */ 120 */
120__dma_inv_range: 121ENTRY(__inval_cache_range)
121 dcache_line_size x2, x3 122 dcache_line_size x2, x3
122 sub x3, x2, #1 123 sub x3, x2, #1
123 tst x1, x3 // end cache line aligned? 124 tst x1, x3 // end cache line aligned?
@@ -136,46 +137,43 @@ __dma_inv_range:
136 dsb sy 137 dsb sy
137 ret 138 ret
138ENDPIPROC(__inval_cache_range) 139ENDPIPROC(__inval_cache_range)
139ENDPROC(__dma_inv_range) 140ENDPROC(__dma_inv_area)
141
142/*
143 * __clean_dcache_area_poc(kaddr, size)
144 *
145 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
146 * are cleaned to the PoC.
147 *
148 * - kaddr - kernel address
149 * - size - size in question
150 */
151ENTRY(__clean_dcache_area_poc)
152 /* FALLTHROUGH */
140 153
141/* 154/*
142 * __dma_clean_range(start, end) 155 * __dma_clean_area(start, size)
143 * - start - virtual start address of region 156 * - start - virtual start address of region
144 * - end - virtual end address of region 157 * - size - size in question
145 */ 158 */
146__dma_clean_range: 159__dma_clean_area:
147 dcache_line_size x2, x3 160 dcache_by_line_op cvac, sy, x0, x1, x2, x3
148 sub x3, x2, #1
149 bic x0, x0, x3
1501:
151alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
152 dc cvac, x0
153alternative_else
154 dc civac, x0
155alternative_endif
156 add x0, x0, x2
157 cmp x0, x1
158 b.lo 1b
159 dsb sy
160 ret 161 ret
161ENDPROC(__dma_clean_range) 162ENDPIPROC(__clean_dcache_area_poc)
163ENDPROC(__dma_clean_area)
162 164
163/* 165/*
164 * __dma_flush_range(start, end) 166 * __dma_flush_area(start, size)
167 *
168 * clean & invalidate D / U line
169 *
165 * - start - virtual start address of region 170 * - start - virtual start address of region
166 * - end - virtual end address of region 171 * - size - size in question
167 */ 172 */
168ENTRY(__dma_flush_range) 173ENTRY(__dma_flush_area)
169 dcache_line_size x2, x3 174 dcache_by_line_op civac, sy, x0, x1, x2, x3
170 sub x3, x2, #1
171 bic x0, x0, x3
1721: dc civac, x0 // clean & invalidate D / U line
173 add x0, x0, x2
174 cmp x0, x1
175 b.lo 1b
176 dsb sy
177 ret 175 ret
178ENDPIPROC(__dma_flush_range) 176ENDPIPROC(__dma_flush_area)
179 177
180/* 178/*
181 * __dma_map_area(start, size, dir) 179 * __dma_map_area(start, size, dir)
@@ -184,10 +182,9 @@ ENDPIPROC(__dma_flush_range)
184 * - dir - DMA direction 182 * - dir - DMA direction
185 */ 183 */
186ENTRY(__dma_map_area) 184ENTRY(__dma_map_area)
187 add x1, x1, x0
188 cmp w2, #DMA_FROM_DEVICE 185 cmp w2, #DMA_FROM_DEVICE
189 b.eq __dma_inv_range 186 b.eq __dma_inv_area
190 b __dma_clean_range 187 b __dma_clean_area
191ENDPIPROC(__dma_map_area) 188ENDPIPROC(__dma_map_area)
192 189
193/* 190/*
@@ -197,8 +194,7 @@ ENDPIPROC(__dma_map_area)
197 * - dir - DMA direction 194 * - dir - DMA direction
198 */ 195 */
199ENTRY(__dma_unmap_area) 196ENTRY(__dma_unmap_area)
200 add x1, x1, x0
201 cmp w2, #DMA_TO_DEVICE 197 cmp w2, #DMA_TO_DEVICE
202 b.ne __dma_inv_range 198 b.ne __dma_inv_area
203 ret 199 ret
204ENDPIPROC(__dma_unmap_area) 200ENDPIPROC(__dma_unmap_area)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index c4284c432ae8..f3953decb171 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -168,7 +168,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
168 return ptr; 168 return ptr;
169 169
170 /* remove any dirty cache lines on the kernel alias */ 170 /* remove any dirty cache lines on the kernel alias */
171 __dma_flush_range(ptr, ptr + size); 171 __dma_flush_area(ptr, size);
172 172
173 /* create a coherent mapping */ 173 /* create a coherent mapping */
174 page = virt_to_page(ptr); 174 page = virt_to_page(ptr);
@@ -387,7 +387,7 @@ static int __init atomic_pool_init(void)
387 void *page_addr = page_address(page); 387 void *page_addr = page_address(page);
388 388
389 memset(page_addr, 0, atomic_pool_size); 389 memset(page_addr, 0, atomic_pool_size);
390 __dma_flush_range(page_addr, page_addr + atomic_pool_size); 390 __dma_flush_area(page_addr, atomic_pool_size);
391 391
392 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 392 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
393 if (!atomic_pool) 393 if (!atomic_pool)
@@ -548,7 +548,7 @@ fs_initcall(dma_debug_do_init);
548/* Thankfully, all cache ops are by VA so we can ignore phys here */ 548/* Thankfully, all cache ops are by VA so we can ignore phys here */
549static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) 549static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
550{ 550{
551 __dma_flush_range(virt, virt + PAGE_SIZE); 551 __dma_flush_area(virt, PAGE_SIZE);
552} 552}
553 553
554static void *__iommu_alloc_attrs(struct device *dev, size_t size, 554static void *__iommu_alloc_attrs(struct device *dev, size_t size,