aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-06-06 08:50:56 -0400
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-07-30 06:25:47 -0400
commit97ef952a20853fad72087a53fa556fbec45edd8f (patch)
tree5aa3a3422ead9dc4933a5cf57169dbe659ba0ce7 /arch/arm/mm/dma-mapping.c
parentbdf5e4871f1b41150236e2337837399109469e65 (diff)
ARM: dma-mapping: add support for DMA_ATTR_SKIP_CPU_SYNC attribute
This patch adds support for DMA_ATTR_SKIP_CPU_SYNC attribute for dma_(un)map_(single,page,sg) functions family. It lets dma mapping clients to create a mapping for the buffer for the given device without performing a CPU cache synchronization. CPU cache synchronization can be skipped for the buffers which it is known that they are already in 'device' domain (CPU caches have been already synchronized or there are only coherent mappings for the buffer). For advanced users only, please use it with care. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 51278ede2028..b30ba0a1fca3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -73,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size, enum dma_data_direction dir, 73 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs) 74 struct dma_attrs *attrs)
75{ 75{
76 if (!arch_is_coherent()) 76 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
77 __dma_page_cpu_to_dev(page, offset, size, dir); 77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
79} 79}
@@ -96,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
96 size_t size, enum dma_data_direction dir, 96 size_t size, enum dma_data_direction dir,
97 struct dma_attrs *attrs) 97 struct dma_attrs *attrs)
98{ 98{
99 if (!arch_is_coherent()) 99 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
100 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 100 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
101 handle & ~PAGE_MASK, size, dir); 101 handle & ~PAGE_MASK, size, dir);
102} 102}
@@ -1207,7 +1207,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1207 */ 1207 */
1208static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1208static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1209 size_t size, dma_addr_t *handle, 1209 size_t size, dma_addr_t *handle,
1210 enum dma_data_direction dir) 1210 enum dma_data_direction dir, struct dma_attrs *attrs)
1211{ 1211{
1212 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1212 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1213 dma_addr_t iova, iova_base; 1213 dma_addr_t iova, iova_base;
@@ -1226,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1226 phys_addr_t phys = page_to_phys(sg_page(s)); 1226 phys_addr_t phys = page_to_phys(sg_page(s));
1227 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1227 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1228 1228
1229 if (!arch_is_coherent()) 1229 if (!arch_is_coherent() &&
1230 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1230 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1231 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1231 1232
1232 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1233 ret = iommu_map(mapping->domain, iova, phys, len, 0);
@@ -1273,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1273 1274
1274 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1275 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1275 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1276 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1276 dir) < 0) 1277 dir, attrs) < 0)
1277 goto bad_mapping; 1278 goto bad_mapping;
1278 1279
1279 dma->dma_address += offset; 1280 dma->dma_address += offset;
@@ -1286,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1286 } 1287 }
1287 size += s->length; 1288 size += s->length;
1288 } 1289 }
1289 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0) 1290 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
1290 goto bad_mapping; 1291 goto bad_mapping;
1291 1292
1292 dma->dma_address += offset; 1293 dma->dma_address += offset;
@@ -1320,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1320 if (sg_dma_len(s)) 1321 if (sg_dma_len(s))
1321 __iommu_remove_mapping(dev, sg_dma_address(s), 1322 __iommu_remove_mapping(dev, sg_dma_address(s),
1322 sg_dma_len(s)); 1323 sg_dma_len(s));
1323 if (!arch_is_coherent()) 1324 if (!arch_is_coherent() &&
1325 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1324 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1326 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1325 s->length, dir); 1327 s->length, dir);
1326 } 1328 }
@@ -1382,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1382 dma_addr_t dma_addr; 1384 dma_addr_t dma_addr;
1383 int ret, len = PAGE_ALIGN(size + offset); 1385 int ret, len = PAGE_ALIGN(size + offset);
1384 1386
1385 if (!arch_is_coherent()) 1387 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1386 __dma_page_cpu_to_dev(page, offset, size, dir); 1388 __dma_page_cpu_to_dev(page, offset, size, dir);
1387 1389
1388 dma_addr = __alloc_iova(mapping, len); 1390 dma_addr = __alloc_iova(mapping, len);
@@ -1421,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1421 if (!iova) 1423 if (!iova)
1422 return; 1424 return;
1423 1425
1424 if (!arch_is_coherent()) 1426 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1425 __dma_page_dev_to_cpu(page, offset, size, dir); 1427 __dma_page_dev_to_cpu(page, offset, size, dir);
1426 1428
1427 iommu_unmap(mapping->domain, iova, len); 1429 iommu_unmap(mapping->domain, iova, len);