diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-06-13 04:01:15 -0400 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-07-30 06:25:47 -0400 |
commit | dc2832e1e7db3f9ad465d2fe894bd69ef05d1e4b (patch) | |
tree | 67b74751c0d7ee64c6ab9702a56041ab0f3df092 /arch/arm | |
parent | d2b7428eb0caa7c66e34b6ac869a43915b294123 (diff) |
ARM: dma-mapping: add support for dma_get_sgtable()
This patch adds support for dma_get_sgtable() function which is required
to let drivers to share the buffers allocated by DMA-mapping subsystem.
Generic implementation based on virt_to_page() is not suitable for ARM
dma-mapping subsystem.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/common/dmabounce.c | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 3 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 31 |
3 files changed, 35 insertions, 0 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index aa07f5938f05..1143c4d5c567 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -452,6 +452,7 @@ static struct dma_map_ops dmabounce_ops = { | |||
452 | .alloc = arm_dma_alloc, | 452 | .alloc = arm_dma_alloc, |
453 | .free = arm_dma_free, | 453 | .free = arm_dma_free, |
454 | .mmap = arm_dma_mmap, | 454 | .mmap = arm_dma_mmap, |
455 | .get_sgtable = arm_dma_get_sgtable, | ||
455 | .map_page = dmabounce_map_page, | 456 | .map_page = dmabounce_map_page, |
456 | .unmap_page = dmabounce_unmap_page, | 457 | .unmap_page = dmabounce_unmap_page, |
457 | .sync_single_for_cpu = dmabounce_sync_for_cpu, | 458 | .sync_single_for_cpu = dmabounce_sync_for_cpu, |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a04803331144..2ae842df4551 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -261,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | |||
261 | enum dma_data_direction); | 261 | enum dma_data_direction); |
262 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | 262 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
263 | enum dma_data_direction); | 263 | enum dma_data_direction); |
264 | extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
265 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
266 | struct dma_attrs *attrs); | ||
264 | 267 | ||
265 | #endif /* __KERNEL__ */ | 268 | #endif /* __KERNEL__ */ |
266 | #endif | 269 | #endif |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 8e505b9df5c7..51278ede2028 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -125,6 +125,7 @@ struct dma_map_ops arm_dma_ops = { | |||
125 | .alloc = arm_dma_alloc, | 125 | .alloc = arm_dma_alloc, |
126 | .free = arm_dma_free, | 126 | .free = arm_dma_free, |
127 | .mmap = arm_dma_mmap, | 127 | .mmap = arm_dma_mmap, |
128 | .get_sgtable = arm_dma_get_sgtable, | ||
128 | .map_page = arm_dma_map_page, | 129 | .map_page = arm_dma_map_page, |
129 | .unmap_page = arm_dma_unmap_page, | 130 | .unmap_page = arm_dma_unmap_page, |
130 | .map_sg = arm_dma_map_sg, | 131 | .map_sg = arm_dma_map_sg, |
@@ -661,6 +662,21 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
661 | } | 662 | } |
662 | } | 663 | } |
663 | 664 | ||
665 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
666 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
667 | struct dma_attrs *attrs) | ||
668 | { | ||
669 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | ||
670 | int ret; | ||
671 | |||
672 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | ||
673 | if (unlikely(ret)) | ||
674 | return ret; | ||
675 | |||
676 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | ||
677 | return 0; | ||
678 | } | ||
679 | |||
664 | static void dma_cache_maint_page(struct page *page, unsigned long offset, | 680 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
665 | size_t size, enum dma_data_direction dir, | 681 | size_t size, enum dma_data_direction dir, |
666 | void (*op)(const void *, size_t, int)) | 682 | void (*op)(const void *, size_t, int)) |
@@ -1172,6 +1188,20 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1172 | __iommu_free_buffer(dev, pages, size); | 1188 | __iommu_free_buffer(dev, pages, size); |
1173 | } | 1189 | } |
1174 | 1190 | ||
1191 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
1192 | void *cpu_addr, dma_addr_t dma_addr, | ||
1193 | size_t size, struct dma_attrs *attrs) | ||
1194 | { | ||
1195 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
1196 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | ||
1197 | |||
1198 | if (!pages) | ||
1199 | return -ENXIO; | ||
1200 | |||
1201 | return sg_alloc_table_from_pages(sgt, pages, count, 0, size, | ||
1202 | GFP_KERNEL); | ||
1203 | } | ||
1204 | |||
1175 | /* | 1205 | /* |
1176 | * Map a part of the scatter-gather list into contiguous io address space | 1206 | * Map a part of the scatter-gather list into contiguous io address space |
1177 | */ | 1207 | */ |
@@ -1431,6 +1461,7 @@ struct dma_map_ops iommu_ops = { | |||
1431 | .alloc = arm_iommu_alloc_attrs, | 1461 | .alloc = arm_iommu_alloc_attrs, |
1432 | .free = arm_iommu_free_attrs, | 1462 | .free = arm_iommu_free_attrs, |
1433 | .mmap = arm_iommu_mmap_attrs, | 1463 | .mmap = arm_iommu_mmap_attrs, |
1464 | .get_sgtable = arm_iommu_get_sgtable, | ||
1434 | 1465 | ||
1435 | .map_page = arm_iommu_map_page, | 1466 | .map_page = arm_iommu_map_page, |
1436 | .unmap_page = arm_iommu_unmap_page, | 1467 | .unmap_page = arm_iommu_unmap_page, |