diff options
| author | Vladimir Murzin <vladimir.murzin@arm.com> | 2017-07-20 06:19:59 -0400 |
|---|---|---|
| committer | Christoph Hellwig <hch@lst.de> | 2017-07-20 10:09:27 -0400 |
| commit | 878ec36765fa71ffe656bc003d25bb9f8bad28f9 (patch) | |
| tree | 179070aa4cc0a17c92b7f24a424a3772db5f325e | |
| parent | 43fc509c3efb5c973991ee24c449ab2a0d71dd1e (diff) | |
ARM: NOMMU: Wire-up default DMA interface
The way how default DMA pool is exposed has changed and now we need to
use dedicated interface to work with it. This patch makes alloc/release
operations to use such interface. Since, default DMA pool is not
handled by generic code anymore we have to implement our own mmap
operation.
Tested-by: Andras Szemzo <sza@esh.hu>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
| -rw-r--r-- | arch/arm/mm/dma-mapping-nommu.c | 45 |
1 files changed, 36 insertions, 9 deletions
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 90ee354d803e..6db5fc26d154 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c | |||
| @@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, | |||
| 40 | 40 | ||
| 41 | { | 41 | { |
| 42 | const struct dma_map_ops *ops = &dma_noop_ops; | 42 | const struct dma_map_ops *ops = &dma_noop_ops; |
| 43 | void *ret; | ||
| 43 | 44 | ||
| 44 | /* | 45 | /* |
| 45 | * We are here because: | 46 | * Try generic allocator first if we are advertised that |
| 47 | * consistency is not required. | ||
| 48 | */ | ||
| 49 | |||
| 50 | if (attrs & DMA_ATTR_NON_CONSISTENT) | ||
| 51 | return ops->alloc(dev, size, dma_handle, gfp, attrs); | ||
| 52 | |||
| 53 | ret = dma_alloc_from_global_coherent(size, dma_handle); | ||
| 54 | |||
| 55 | /* | ||
| 56 | * dma_alloc_from_global_coherent() may fail because: | ||
| 57 | * | ||
| 46 | * - no consistent DMA region has been defined, so we can't | 58 | * - no consistent DMA region has been defined, so we can't |
| 47 | * continue. | 59 | * continue. |
| 48 | * - there is no space left in consistent DMA region, so we | 60 | * - there is no space left in consistent DMA region, so we |
| @@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, | |||
| 50 | * advertised that consistency is not required. | 62 | * advertised that consistency is not required. |
| 51 | */ | 63 | */ |
| 52 | 64 | ||
| 53 | if (attrs & DMA_ATTR_NON_CONSISTENT) | 65 | WARN_ON_ONCE(ret == NULL); |
| 54 | return ops->alloc(dev, size, dma_handle, gfp, attrs); | 66 | return ret; |
| 55 | |||
| 56 | WARN_ON_ONCE(1); | ||
| 57 | return NULL; | ||
| 58 | } | 67 | } |
| 59 | 68 | ||
| 60 | static void arm_nommu_dma_free(struct device *dev, size_t size, | 69 | static void arm_nommu_dma_free(struct device *dev, size_t size, |
| @@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, | |||
| 63 | { | 72 | { |
| 64 | const struct dma_map_ops *ops = &dma_noop_ops; | 73 | const struct dma_map_ops *ops = &dma_noop_ops; |
| 65 | 74 | ||
| 66 | if (attrs & DMA_ATTR_NON_CONSISTENT) | 75 | if (attrs & DMA_ATTR_NON_CONSISTENT) { |
| 67 | ops->free(dev, size, cpu_addr, dma_addr, attrs); | 76 | ops->free(dev, size, cpu_addr, dma_addr, attrs); |
| 68 | else | 77 | } else { |
| 69 | WARN_ON_ONCE(1); | 78 | int ret = dma_release_from_global_coherent(get_order(size), |
| 79 | cpu_addr); | ||
| 80 | |||
| 81 | WARN_ON_ONCE(ret == 0); | ||
| 82 | } | ||
| 70 | 83 | ||
| 71 | return; | 84 | return; |
| 72 | } | 85 | } |
| 73 | 86 | ||
| 87 | static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 88 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
| 89 | unsigned long attrs) | ||
| 90 | { | ||
| 91 | int ret; | ||
| 92 | |||
| 93 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) | ||
| 94 | return ret; | ||
| 95 | |||
| 96 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
| 97 | } | ||
| 98 | |||
| 99 | |||
| 74 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, | 100 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, |
| 75 | enum dma_data_direction dir) | 101 | enum dma_data_direction dir) |
| 76 | { | 102 | { |
| @@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist | |||
| 173 | const struct dma_map_ops arm_nommu_dma_ops = { | 199 | const struct dma_map_ops arm_nommu_dma_ops = { |
| 174 | .alloc = arm_nommu_dma_alloc, | 200 | .alloc = arm_nommu_dma_alloc, |
| 175 | .free = arm_nommu_dma_free, | 201 | .free = arm_nommu_dma_free, |
| 202 | .mmap = arm_nommu_dma_mmap, | ||
| 176 | .map_page = arm_nommu_dma_map_page, | 203 | .map_page = arm_nommu_dma_map_page, |
| 177 | .unmap_page = arm_nommu_dma_unmap_page, | 204 | .unmap_page = arm_nommu_dma_unmap_page, |
| 178 | .map_sg = arm_nommu_dma_map_sg, | 205 | .map_sg = arm_nommu_dma_map_sg, |
