aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index a2302aba5df2..ab4f74536057 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2014,6 +2014,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
2014 __free_iova(mapping, iova, len); 2014 __free_iova(mapping, iova, len);
2015} 2015}
2016 2016
2017/**
2018 * arm_iommu_map_resource - map a device resource for DMA
2019 * @dev: valid struct device pointer
2020 * @phys_addr: physical address of resource
2021 * @size: size of resource to map
2022 * @dir: DMA transfer direction
2023 */
2024static dma_addr_t arm_iommu_map_resource(struct device *dev,
2025 phys_addr_t phys_addr, size_t size,
2026 enum dma_data_direction dir, unsigned long attrs)
2027{
2028 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2029 dma_addr_t dma_addr;
2030 int ret, prot;
2031 phys_addr_t addr = phys_addr & PAGE_MASK;
2032 unsigned int offset = phys_addr & ~PAGE_MASK;
2033 size_t len = PAGE_ALIGN(size + offset);
2034
2035 dma_addr = __alloc_iova(mapping, len);
2036 if (dma_addr == DMA_ERROR_CODE)
2037 return dma_addr;
2038
2039 prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
2040
2041 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
2042 if (ret < 0)
2043 goto fail;
2044
2045 return dma_addr + offset;
2046fail:
2047 __free_iova(mapping, dma_addr, len);
2048 return DMA_ERROR_CODE;
2049}
2050
2051/**
2052 * arm_iommu_unmap_resource - unmap a device DMA resource
2053 * @dev: valid struct device pointer
2054 * @dma_handle: DMA address to resource
2055 * @size: size of resource to map
2056 * @dir: DMA transfer direction
2057 */
2058static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
2059 size_t size, enum dma_data_direction dir,
2060 unsigned long attrs)
2061{
2062 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2063 dma_addr_t iova = dma_handle & PAGE_MASK;
2064 unsigned int offset = dma_handle & ~PAGE_MASK;
2065 size_t len = PAGE_ALIGN(size + offset);
2066
2067 if (!iova)
2068 return;
2069
2070 iommu_unmap(mapping->domain, iova, len);
2071 __free_iova(mapping, iova, len);
2072}
2073
2017static void arm_iommu_sync_single_for_cpu(struct device *dev, 2074static void arm_iommu_sync_single_for_cpu(struct device *dev,
2018 dma_addr_t handle, size_t size, enum dma_data_direction dir) 2075 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2019{ 2076{
@@ -2057,6 +2114,9 @@ struct dma_map_ops iommu_ops = {
2057 .unmap_sg = arm_iommu_unmap_sg, 2114 .unmap_sg = arm_iommu_unmap_sg,
2058 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 2115 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
2059 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 2116 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
2117
2118 .map_resource = arm_iommu_map_resource,
2119 .unmap_resource = arm_iommu_unmap_resource,
2060}; 2120};
2061 2121
2062struct dma_map_ops iommu_coherent_ops = { 2122struct dma_map_ops iommu_coherent_ops = {
@@ -2070,6 +2130,9 @@ struct dma_map_ops iommu_coherent_ops = {
2070 2130
2071 .map_sg = arm_coherent_iommu_map_sg, 2131 .map_sg = arm_coherent_iommu_map_sg,
2072 .unmap_sg = arm_coherent_iommu_unmap_sg, 2132 .unmap_sg = arm_coherent_iommu_unmap_sg,
2133
2134 .map_resource = arm_iommu_map_resource,
2135 .unmap_resource = arm_iommu_unmap_resource,
2073}; 2136};
2074 2137
2075/** 2138/**