aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann@calxeda.com>2013-09-26 18:36:15 -0400
committerMarek Szyprowski <m.szyprowski@samsung.com>2013-10-02 07:23:11 -0400
commitc9b24996d5da1bf7d2bebab5770dfcc7834c53b7 (patch)
tree986b26dc765c0bfb16ace52b2a7e83e1c1a5a4a8 /arch/arm/mm
parent15c03dd4859ab16f9212238f29dd315654aa94f6 (diff)
ARM: dma-mapping: Always pass proper prot flags to iommu_map()
... otherwise it is impossible for the low level iommu driver to figure out which pte flags should be used. In __map_sg_chunk we can derive the flags from dma_data_direction. In __iommu_create_mapping we should treat the memory like DMA_BIDIRECTIONAL and pass both IOMMU_READ and IOMMU_WRITE to iommu_map. __iommu_create_mapping is used during dma_alloc_coherent (via arm_iommu_alloc_attrs). AFAIK dma_alloc_coherent is responsible for allocation _and_ mapping. I think this implies that access to the mapped pages should be allowed. Cc: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Andreas Herrmann <andreas.herrmann@calxeda.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..1272ed202dde 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1232 break; 1232 break;
1233 1233
1234 len = (j - i) << PAGE_SHIFT; 1234 len = (j - i) << PAGE_SHIFT;
1235 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1235 ret = iommu_map(mapping->domain, iova, phys, len,
1236 IOMMU_READ|IOMMU_WRITE);
1236 if (ret < 0) 1237 if (ret < 0)
1237 goto fail; 1238 goto fail;
1238 iova += len; 1239 iova += len;
@@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1431 GFP_KERNEL); 1432 GFP_KERNEL);
1432} 1433}
1433 1434
1435static int __dma_direction_to_prot(enum dma_data_direction dir)
1436{
1437 int prot;
1438
1439 switch (dir) {
1440 case DMA_BIDIRECTIONAL:
1441 prot = IOMMU_READ | IOMMU_WRITE;
1442 break;
1443 case DMA_TO_DEVICE:
1444 prot = IOMMU_READ;
1445 break;
1446 case DMA_FROM_DEVICE:
1447 prot = IOMMU_WRITE;
1448 break;
1449 default:
1450 prot = 0;
1451 }
1452
1453 return prot;
1454}
1455
1434/* 1456/*
1435 * Map a part of the scatter-gather list into contiguous io address space 1457 * Map a part of the scatter-gather list into contiguous io address space
1436 */ 1458 */
@@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1444 int ret = 0; 1466 int ret = 0;
1445 unsigned int count; 1467 unsigned int count;
1446 struct scatterlist *s; 1468 struct scatterlist *s;
1469 int prot;
1447 1470
1448 size = PAGE_ALIGN(size); 1471 size = PAGE_ALIGN(size);
1449 *handle = DMA_ERROR_CODE; 1472 *handle = DMA_ERROR_CODE;
@@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1460 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1483 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1461 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1484 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1462 1485
1463 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1486 prot = __dma_direction_to_prot(dir);
1487
1488 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1464 if (ret < 0) 1489 if (ret < 0)
1465 goto fail; 1490 goto fail;
1466 count += len >> PAGE_SHIFT; 1491 count += len >> PAGE_SHIFT;
@@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1665 if (dma_addr == DMA_ERROR_CODE) 1690 if (dma_addr == DMA_ERROR_CODE)
1666 return dma_addr; 1691 return dma_addr;
1667 1692
1668 switch (dir) { 1693 prot = __dma_direction_to_prot(dir);
1669 case DMA_BIDIRECTIONAL:
1670 prot = IOMMU_READ | IOMMU_WRITE;
1671 break;
1672 case DMA_TO_DEVICE:
1673 prot = IOMMU_READ;
1674 break;
1675 case DMA_FROM_DEVICE:
1676 prot = IOMMU_WRITE;
1677 break;
1678 default:
1679 prot = 0;
1680 }
1681 1694
1682 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1695 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1683 if (ret < 0) 1696 if (ret < 0)