aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/dma-mapping.h
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2006-04-01 18:07:39 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-04-01 18:07:39 -0500
commit23759dc6430428897a36c4d493f611eca55c9481 (patch)
treec62050927599b36ed223753c35fd737e3c0c6762 /include/asm-arm/dma-mapping.h
parentd3f4c571b6e596f9d39c596426269006a309d3b8 (diff)
[ARM] 3439/2: xsc3: add I/O coherency support
Patch from Lennert Buytenhek This patch adds support for the I/O coherent cache available on the xsc3. The approach is to provide a simple API to determine whether the chipset supports coherency by calling arch_is_coherent() and then setting the appropriate system memory PTE and PMD bits. In addition, we call this API on dma_alloc_coherent() and dma_map_single() calls. A generic version exists that will compile out all the coherency-related code that is not needed on the majority of ARM systems. Note that we do not check for coherency in the dma_alloc_writecombine() function as that still requires a special PTE setting. We also don't touch dma_mmap_coherent() as that is a special ARM-only API that is by definition only used on non-coherent system. Signed-off-by: Deepak Saxena <dsaxena@plexity.net> Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/dma-mapping.h')
-rw-r--r--include/asm-arm/dma-mapping.h22
1 files changed, 15 insertions, 7 deletions
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index e3e8541ee63..63ca7412a46 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -47,7 +47,7 @@ static inline int dma_get_cache_alignment(void)
47 47
48static inline int dma_is_consistent(dma_addr_t handle) 48static inline int dma_is_consistent(dma_addr_t handle)
49{ 49{
50 return 0; 50 return !!arch_is_coherent();
51} 51}
52 52
53/* 53/*
@@ -145,7 +145,9 @@ static inline dma_addr_t
145dma_map_single(struct device *dev, void *cpu_addr, size_t size, 145dma_map_single(struct device *dev, void *cpu_addr, size_t size,
146 enum dma_data_direction dir) 146 enum dma_data_direction dir)
147{ 147{
148 consistent_sync(cpu_addr, size, dir); 148 if (!arch_is_coherent())
149 consistent_sync(cpu_addr, size, dir);
150
149 return virt_to_dma(dev, (unsigned long)cpu_addr); 151 return virt_to_dma(dev, (unsigned long)cpu_addr);
150} 152}
151#else 153#else
@@ -255,7 +257,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
255 257
256 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; 258 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
257 virt = page_address(sg->page) + sg->offset; 259 virt = page_address(sg->page) + sg->offset;
258 consistent_sync(virt, sg->length, dir); 260
261 if (!arch_is_coherent())
262 consistent_sync(virt, sg->length, dir);
259 } 263 }
260 264
261 return nents; 265 return nents;
@@ -310,14 +314,16 @@ static inline void
310dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, 314dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
311 enum dma_data_direction dir) 315 enum dma_data_direction dir)
312{ 316{
313 consistent_sync((void *)dma_to_virt(dev, handle), size, dir); 317 if (!arch_is_coherent())
318 consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
314} 319}
315 320
316static inline void 321static inline void
317dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, 322dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
318 enum dma_data_direction dir) 323 enum dma_data_direction dir)
319{ 324{
320 consistent_sync((void *)dma_to_virt(dev, handle), size, dir); 325 if (!arch_is_coherent())
326 consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
321} 327}
322#else 328#else
323extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); 329extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
@@ -347,7 +353,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
347 353
348 for (i = 0; i < nents; i++, sg++) { 354 for (i = 0; i < nents; i++, sg++) {
349 char *virt = page_address(sg->page) + sg->offset; 355 char *virt = page_address(sg->page) + sg->offset;
350 consistent_sync(virt, sg->length, dir); 356 if (!arch_is_coherent())
357 consistent_sync(virt, sg->length, dir);
351 } 358 }
352} 359}
353 360
@@ -359,7 +366,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
359 366
360 for (i = 0; i < nents; i++, sg++) { 367 for (i = 0; i < nents; i++, sg++) {
361 char *virt = page_address(sg->page) + sg->offset; 368 char *virt = page_address(sg->page) + sg->offset;
362 consistent_sync(virt, sg->length, dir); 369 if (!arch_is_coherent())
370 consistent_sync(virt, sg->length, dir);
363 } 371 }
364} 372}
365#else 373#else