aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2014-04-01 13:32:55 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-04-08 06:45:08 -0400
commitebf81a938dade3b450eb11c57fa744cfac4b523f (patch)
treefb7c10fd49f9ee3a71a7d5c0620d3a6745a082ea /arch/arm64/mm
parentd253b4406df69fa7a74231769d6f6ad80dc33063 (diff)
arm64: Fix DMA range invalidation for cache line unaligned buffers
If the buffer needing cache invalidation for inbound DMA does start or end on a cache line aligned address, we need to use the non-destructive clean&invalidate operation. This issue was introduced by commit 7363590d2c46 (arm64: Implement coherent DMA API based on swiotlb). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Jon Medhurst (Tixy) <tixy@linaro.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index e803a62e0e45..fda756875fa6 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -183,12 +183,19 @@ ENTRY(__inval_cache_range)
183__dma_inv_range: 183__dma_inv_range:
184 dcache_line_size x2, x3 184 dcache_line_size x2, x3
185 sub x3, x2, #1 185 sub x3, x2, #1
186 bic x0, x0, x3 186 tst x1, x3 // end cache line aligned?
187 bic x1, x1, x3 187 bic x1, x1, x3
1881: dc ivac, x0 // invalidate D / U line 188 b.eq 1f
189 add x0, x0, x2 189 dc civac, x1 // clean & invalidate D / U line
1901: tst x0, x3 // start cache line aligned?
191 bic x0, x0, x3
192 b.eq 2f
193 dc civac, x0 // clean & invalidate D / U line
194 b 3f
1952: dc ivac, x0 // invalidate D / U line
1963: add x0, x0, x2
190 cmp x0, x1 197 cmp x0, x1
191 b.lo 1b 198 b.lo 2b
192 dsb sy 199 dsb sy
193 ret 200 ret
194ENDPROC(__inval_cache_range) 201ENDPROC(__inval_cache_range)