diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 51278ede2028..b30ba0a1fca3 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -73,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | |||
73 | unsigned long offset, size_t size, enum dma_data_direction dir, | 73 | unsigned long offset, size_t size, enum dma_data_direction dir, |
74 | struct dma_attrs *attrs) | 74 | struct dma_attrs *attrs) |
75 | { | 75 | { |
76 | if (!arch_is_coherent()) | 76 | if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
77 | __dma_page_cpu_to_dev(page, offset, size, dir); | 77 | __dma_page_cpu_to_dev(page, offset, size, dir); |
78 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 78 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
79 | } | 79 | } |
@@ -96,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | |||
96 | size_t size, enum dma_data_direction dir, | 96 | size_t size, enum dma_data_direction dir, |
97 | struct dma_attrs *attrs) | 97 | struct dma_attrs *attrs) |
98 | { | 98 | { |
99 | if (!arch_is_coherent()) | 99 | if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
100 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | 100 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
101 | handle & ~PAGE_MASK, size, dir); | 101 | handle & ~PAGE_MASK, size, dir); |
102 | } | 102 | } |
@@ -1207,7 +1207,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
1207 | */ | 1207 | */ |
1208 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | 1208 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, |
1209 | size_t size, dma_addr_t *handle, | 1209 | size_t size, dma_addr_t *handle, |
1210 | enum dma_data_direction dir) | 1210 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1211 | { | 1211 | { |
1212 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 1212 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; |
1213 | dma_addr_t iova, iova_base; | 1213 | dma_addr_t iova, iova_base; |
@@ -1226,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
1226 | phys_addr_t phys = page_to_phys(sg_page(s)); | 1226 | phys_addr_t phys = page_to_phys(sg_page(s)); |
1227 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 1227 | unsigned int len = PAGE_ALIGN(s->offset + s->length); |
1228 | 1228 | ||
1229 | if (!arch_is_coherent()) | 1229 | if (!arch_is_coherent() && |
1230 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1230 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 1231 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); |
1231 | 1232 | ||
1232 | ret = iommu_map(mapping->domain, iova, phys, len, 0); | 1233 | ret = iommu_map(mapping->domain, iova, phys, len, 0); |
@@ -1273,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1273 | 1274 | ||
1274 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { | 1275 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { |
1275 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, | 1276 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, |
1276 | dir) < 0) | 1277 | dir, attrs) < 0) |
1277 | goto bad_mapping; | 1278 | goto bad_mapping; |
1278 | 1279 | ||
1279 | dma->dma_address += offset; | 1280 | dma->dma_address += offset; |
@@ -1286,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1286 | } | 1287 | } |
1287 | size += s->length; | 1288 | size += s->length; |
1288 | } | 1289 | } |
1289 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0) | 1290 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0) |
1290 | goto bad_mapping; | 1291 | goto bad_mapping; |
1291 | 1292 | ||
1292 | dma->dma_address += offset; | 1293 | dma->dma_address += offset; |
@@ -1320,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1320 | if (sg_dma_len(s)) | 1321 | if (sg_dma_len(s)) |
1321 | __iommu_remove_mapping(dev, sg_dma_address(s), | 1322 | __iommu_remove_mapping(dev, sg_dma_address(s), |
1322 | sg_dma_len(s)); | 1323 | sg_dma_len(s)); |
1323 | if (!arch_is_coherent()) | 1324 | if (!arch_is_coherent() && |
1325 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1324 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 1326 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
1325 | s->length, dir); | 1327 | s->length, dir); |
1326 | } | 1328 | } |
@@ -1382,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | |||
1382 | dma_addr_t dma_addr; | 1384 | dma_addr_t dma_addr; |
1383 | int ret, len = PAGE_ALIGN(size + offset); | 1385 | int ret, len = PAGE_ALIGN(size + offset); |
1384 | 1386 | ||
1385 | if (!arch_is_coherent()) | 1387 | if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
1386 | __dma_page_cpu_to_dev(page, offset, size, dir); | 1388 | __dma_page_cpu_to_dev(page, offset, size, dir); |
1387 | 1389 | ||
1388 | dma_addr = __alloc_iova(mapping, len); | 1390 | dma_addr = __alloc_iova(mapping, len); |
@@ -1421,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | |||
1421 | if (!iova) | 1423 | if (!iova) |
1422 | return; | 1424 | return; |
1423 | 1425 | ||
1424 | if (!arch_is_coherent()) | 1426 | if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
1425 | __dma_page_dev_to_cpu(page, offset, size, dir); | 1427 | __dma_page_dev_to_cpu(page, offset, size, dir); |
1426 | 1428 | ||
1427 | iommu_unmap(mapping->domain, iova, len); | 1429 | iommu_unmap(mapping->domain, iova, len); |