diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-06-14 07:03:04 -0400 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-07-30 06:25:46 -0400 |
commit | 64ccc9c033c6089b2d426dad3c56477ab066c999 (patch) | |
tree | ffaec86ca326dfc83b78ce4005bf46c3ad98ceb9 | |
parent | 9fa8af91f0679f2abbebe1382b937264f3a8b981 (diff) |
common: dma-mapping: add support for generic dma_mmap_* calls
Commit 9adc5374 ('common: dma-mapping: introduce mmap method') added a
generic method for implementing mmap user call to dma_map_ops structure.
This patch converts ARM and PowerPC architectures (the only providers of
dma_mmap_coherent/dma_mmap_writecombine calls) to use this generic
dma_map_ops based call and adds a generic cross architecture
definition for dma_mmap_attrs, dma_mmap_coherent, dma_mmap_writecombine
functions.
The generic mmap virt_to_page-based fallback implementation is provided for
architectures which don't provide their own implementation for mmap method.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 19 | ||||
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-swiotlb.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma.c | 36 | ||||
-rw-r--r-- | arch/powerpc/kernel/vio.c | 1 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 31 | ||||
-rw-r--r-- | include/asm-generic/dma-coherent.h | 1 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-common.h | 37 |
9 files changed, 95 insertions, 40 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 80777d871422..a04803331144 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
186 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 186 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
187 | struct dma_attrs *attrs); | 187 | struct dma_attrs *attrs); |
188 | 188 | ||
189 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | ||
190 | |||
191 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | ||
192 | void *cpu_addr, dma_addr_t dma_addr, | ||
193 | size_t size, struct dma_attrs *attrs) | ||
194 | { | ||
195 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
196 | BUG_ON(!ops); | ||
197 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
198 | } | ||
199 | |||
200 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, | 189 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, |
201 | dma_addr_t *dma_handle, gfp_t flag) | 190 | dma_addr_t *dma_handle, gfp_t flag) |
202 | { | 191 | { |
@@ -213,14 +202,6 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, | |||
213 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | 202 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
214 | } | 203 | } |
215 | 204 | ||
216 | static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
217 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
218 | { | ||
219 | DEFINE_DMA_ATTRS(attrs); | ||
220 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
221 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | ||
222 | } | ||
223 | |||
224 | /* | 205 | /* |
225 | * This can be called during boot to increase the size of the consistent | 206 | * This can be called during boot to increase the size of the consistent |
226 | * DMA region above it's default value of 2MB. It must be called before the | 207 | * DMA region above it's default value of 2MB. It must be called before the |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 62678e365ca0..78160874809a 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
27 | extern void dma_direct_free_coherent(struct device *dev, size_t size, | 27 | extern void dma_direct_free_coherent(struct device *dev, size_t size, |
28 | void *vaddr, dma_addr_t dma_handle, | 28 | void *vaddr, dma_addr_t dma_handle, |
29 | struct dma_attrs *attrs); | 29 | struct dma_attrs *attrs); |
30 | 30 | extern int dma_direct_mmap_coherent(struct device *dev, | |
31 | struct vm_area_struct *vma, | ||
32 | void *cpu_addr, dma_addr_t handle, | ||
33 | size_t size, struct dma_attrs *attrs); | ||
31 | 34 | ||
32 | #ifdef CONFIG_NOT_COHERENT_CACHE | 35 | #ifdef CONFIG_NOT_COHERENT_CACHE |
33 | /* | 36 | /* |
@@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | |||
207 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 210 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
208 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 211 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
209 | 212 | ||
210 | extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, | ||
211 | void *, dma_addr_t, size_t); | ||
212 | #define ARCH_HAS_DMA_MMAP_COHERENT | 213 | #define ARCH_HAS_DMA_MMAP_COHERENT |
213 | 214 | ||
214 | |||
215 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 215 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
216 | enum dma_data_direction direction) | 216 | enum dma_data_direction direction) |
217 | { | 217 | { |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index bcfdcd22c766..2d7bb8ced136 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev) | |||
109 | struct dma_map_ops dma_iommu_ops = { | 109 | struct dma_map_ops dma_iommu_ops = { |
110 | .alloc = dma_iommu_alloc_coherent, | 110 | .alloc = dma_iommu_alloc_coherent, |
111 | .free = dma_iommu_free_coherent, | 111 | .free = dma_iommu_free_coherent, |
112 | .mmap = dma_direct_mmap_coherent, | ||
112 | .map_sg = dma_iommu_map_sg, | 113 | .map_sg = dma_iommu_map_sg, |
113 | .unmap_sg = dma_iommu_unmap_sg, | 114 | .unmap_sg = dma_iommu_unmap_sg, |
114 | .dma_supported = dma_iommu_dma_supported, | 115 | .dma_supported = dma_iommu_dma_supported, |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 4ab88dafb235..46943651da23 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) | |||
49 | struct dma_map_ops swiotlb_dma_ops = { | 49 | struct dma_map_ops swiotlb_dma_ops = { |
50 | .alloc = dma_direct_alloc_coherent, | 50 | .alloc = dma_direct_alloc_coherent, |
51 | .free = dma_direct_free_coherent, | 51 | .free = dma_direct_free_coherent, |
52 | .mmap = dma_direct_mmap_coherent, | ||
52 | .map_sg = swiotlb_map_sg_attrs, | 53 | .map_sg = swiotlb_map_sg_attrs, |
53 | .unmap_sg = swiotlb_unmap_sg_attrs, | 54 | .unmap_sg = swiotlb_unmap_sg_attrs, |
54 | .dma_supported = swiotlb_dma_supported, | 55 | .dma_supported = swiotlb_dma_supported, |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index b1ec983dcec8..062bf20e6dd4 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -65,6 +65,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size, | |||
65 | #endif | 65 | #endif |
66 | } | 66 | } |
67 | 67 | ||
68 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
69 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
70 | struct dma_attrs *attrs) | ||
71 | { | ||
72 | unsigned long pfn; | ||
73 | |||
74 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
75 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
76 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | ||
77 | #else | ||
78 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | ||
79 | #endif | ||
80 | return remap_pfn_range(vma, vma->vm_start, | ||
81 | pfn + vma->vm_pgoff, | ||
82 | vma->vm_end - vma->vm_start, | ||
83 | vma->vm_page_prot); | ||
84 | } | ||
85 | |||
68 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 86 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
69 | int nents, enum dma_data_direction direction, | 87 | int nents, enum dma_data_direction direction, |
70 | struct dma_attrs *attrs) | 88 | struct dma_attrs *attrs) |
@@ -154,6 +172,7 @@ static inline void dma_direct_sync_single(struct device *dev, | |||
154 | struct dma_map_ops dma_direct_ops = { | 172 | struct dma_map_ops dma_direct_ops = { |
155 | .alloc = dma_direct_alloc_coherent, | 173 | .alloc = dma_direct_alloc_coherent, |
156 | .free = dma_direct_free_coherent, | 174 | .free = dma_direct_free_coherent, |
175 | .mmap = dma_direct_mmap_coherent, | ||
157 | .map_sg = dma_direct_map_sg, | 176 | .map_sg = dma_direct_map_sg, |
158 | .unmap_sg = dma_direct_unmap_sg, | 177 | .unmap_sg = dma_direct_unmap_sg, |
159 | .dma_supported = dma_direct_dma_supported, | 178 | .dma_supported = dma_direct_dma_supported, |
@@ -211,20 +230,3 @@ static int __init dma_init(void) | |||
211 | } | 230 | } |
212 | fs_initcall(dma_init); | 231 | fs_initcall(dma_init); |
213 | 232 | ||
214 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
215 | void *cpu_addr, dma_addr_t handle, size_t size) | ||
216 | { | ||
217 | unsigned long pfn; | ||
218 | |||
219 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
220 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
221 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | ||
222 | #else | ||
223 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | ||
224 | #endif | ||
225 | return remap_pfn_range(vma, vma->vm_start, | ||
226 | pfn + vma->vm_pgoff, | ||
227 | vma->vm_end - vma->vm_start, | ||
228 | vma->vm_page_prot); | ||
229 | } | ||
230 | EXPORT_SYMBOL_GPL(dma_mmap_coherent); | ||
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index cb87301ccd55..dda3d9ad1094 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -613,6 +613,7 @@ static u64 vio_dma_get_required_mask(struct device *dev) | |||
613 | struct dma_map_ops vio_dma_mapping_ops = { | 613 | struct dma_map_ops vio_dma_mapping_ops = { |
614 | .alloc = vio_dma_iommu_alloc_coherent, | 614 | .alloc = vio_dma_iommu_alloc_coherent, |
615 | .free = vio_dma_iommu_free_coherent, | 615 | .free = vio_dma_iommu_free_coherent, |
616 | .mmap = dma_direct_mmap_coherent, | ||
616 | .map_sg = vio_dma_iommu_map_sg, | 617 | .map_sg = vio_dma_iommu_map_sg, |
617 | .unmap_sg = vio_dma_iommu_unmap_sg, | 618 | .unmap_sg = vio_dma_iommu_unmap_sg, |
618 | .map_page = vio_dma_iommu_map_page, | 619 | .map_page = vio_dma_iommu_map_page, |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 6f3676f1559f..db5db02e885f 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
13 | #include <asm-generic/dma-coherent.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * Managed DMA API | 16 | * Managed DMA API |
@@ -218,3 +219,33 @@ void dmam_release_declared_memory(struct device *dev) | |||
218 | EXPORT_SYMBOL(dmam_release_declared_memory); | 219 | EXPORT_SYMBOL(dmam_release_declared_memory); |
219 | 220 | ||
220 | #endif | 221 | #endif |
222 | |||
223 | /* | ||
224 | * Create userspace mapping for the DMA-coherent memory. | ||
225 | */ | ||
226 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
227 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
228 | { | ||
229 | int ret = -ENXIO; | ||
230 | #ifdef CONFIG_MMU | ||
231 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
232 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
233 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); | ||
234 | unsigned long off = vma->vm_pgoff; | ||
235 | |||
236 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
237 | |||
238 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | ||
239 | return ret; | ||
240 | |||
241 | if (off < count && user_count <= (count - off)) { | ||
242 | ret = remap_pfn_range(vma, vma->vm_start, | ||
243 | pfn + off, | ||
244 | user_count << PAGE_SHIFT, | ||
245 | vma->vm_page_prot); | ||
246 | } | ||
247 | #endif /* CONFIG_MMU */ | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | EXPORT_SYMBOL(dma_common_mmap); | ||
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index abfb2682de7f..2be8a2dbc868 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h | |||
@@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev, | |||
29 | #else | 29 | #else |
30 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | 30 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) |
31 | #define dma_release_from_coherent(dev, order, vaddr) (0) | 31 | #define dma_release_from_coherent(dev, order, vaddr) (0) |
32 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) | ||
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | #endif | 35 | #endif |
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 2e248d8924dc..9073aeb3bb1a 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
@@ -176,4 +176,41 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
176 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | 176 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
177 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | 177 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) |
178 | 178 | ||
179 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
180 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
181 | |||
182 | /** | ||
183 | * dma_mmap_attrs - map a coherent DMA allocation into user space | ||
184 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
185 | * @vma: vm_area_struct describing requested user mapping | ||
186 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | ||
187 | * @handle: device-view address returned from dma_alloc_attrs | ||
188 | * @size: size of memory originally requested in dma_alloc_attrs | ||
189 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | ||
190 | * | ||
191 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | ||
192 | * into user space. The coherent DMA buffer must not be freed by the | ||
193 | * driver until the user space mapping has been released. | ||
194 | */ | ||
195 | static inline int | ||
196 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | ||
197 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
198 | { | ||
199 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
200 | BUG_ON(!ops); | ||
201 | if (ops->mmap) | ||
202 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
203 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
204 | } | ||
205 | |||
206 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | ||
207 | |||
208 | static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
209 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
210 | { | ||
211 | DEFINE_DMA_ATTRS(attrs); | ||
212 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
213 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | ||
214 | } | ||
215 | |||
179 | #endif | 216 | #endif |