diff options
-rw-r--r-- | arch/arc/mm/dma.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping-nommu.c | 45 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 2 | ||||
-rw-r--r-- | drivers/base/dma-coherent.c | 164 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 2 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 40 |
8 files changed, 180 insertions, 81 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 2a07e6ecafbd..71d3efff99d3 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
117 | 117 | ||
118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
119 | 119 | ||
120 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 120 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
121 | return ret; | 121 | return ret; |
122 | 122 | ||
123 | if (off < count && user_count <= (count - off)) { | 123 | if (off < count && user_count <= (count - off)) { |
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 90ee354d803e..6db5fc26d154 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c | |||
@@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, | |||
40 | 40 | ||
41 | { | 41 | { |
42 | const struct dma_map_ops *ops = &dma_noop_ops; | 42 | const struct dma_map_ops *ops = &dma_noop_ops; |
43 | void *ret; | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * We are here because: | 46 | * Try generic allocator first if we are advertised that |
47 | * consistency is not required. | ||
48 | */ | ||
49 | |||
50 | if (attrs & DMA_ATTR_NON_CONSISTENT) | ||
51 | return ops->alloc(dev, size, dma_handle, gfp, attrs); | ||
52 | |||
53 | ret = dma_alloc_from_global_coherent(size, dma_handle); | ||
54 | |||
55 | /* | ||
56 | * dma_alloc_from_global_coherent() may fail because: | ||
57 | * | ||
46 | * - no consistent DMA region has been defined, so we can't | 58 | * - no consistent DMA region has been defined, so we can't |
47 | * continue. | 59 | * continue. |
48 | * - there is no space left in consistent DMA region, so we | 60 | * - there is no space left in consistent DMA region, so we |
@@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, | |||
50 | * advertised that consistency is not required. | 62 | * advertised that consistency is not required. |
51 | */ | 63 | */ |
52 | 64 | ||
53 | if (attrs & DMA_ATTR_NON_CONSISTENT) | 65 | WARN_ON_ONCE(ret == NULL); |
54 | return ops->alloc(dev, size, dma_handle, gfp, attrs); | 66 | return ret; |
55 | |||
56 | WARN_ON_ONCE(1); | ||
57 | return NULL; | ||
58 | } | 67 | } |
59 | 68 | ||
60 | static void arm_nommu_dma_free(struct device *dev, size_t size, | 69 | static void arm_nommu_dma_free(struct device *dev, size_t size, |
@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, | |||
63 | { | 72 | { |
64 | const struct dma_map_ops *ops = &dma_noop_ops; | 73 | const struct dma_map_ops *ops = &dma_noop_ops; |
65 | 74 | ||
66 | if (attrs & DMA_ATTR_NON_CONSISTENT) | 75 | if (attrs & DMA_ATTR_NON_CONSISTENT) { |
67 | ops->free(dev, size, cpu_addr, dma_addr, attrs); | 76 | ops->free(dev, size, cpu_addr, dma_addr, attrs); |
68 | else | 77 | } else { |
69 | WARN_ON_ONCE(1); | 78 | int ret = dma_release_from_global_coherent(get_order(size), |
79 | cpu_addr); | ||
80 | |||
81 | WARN_ON_ONCE(ret == 0); | ||
82 | } | ||
70 | 83 | ||
71 | return; | 84 | return; |
72 | } | 85 | } |
73 | 86 | ||
87 | static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
88 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
89 | unsigned long attrs) | ||
90 | { | ||
91 | int ret; | ||
92 | |||
93 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) | ||
94 | return ret; | ||
95 | |||
96 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
97 | } | ||
98 | |||
99 | |||
74 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, | 100 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, |
75 | enum dma_data_direction dir) | 101 | enum dma_data_direction dir) |
76 | { | 102 | { |
@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist | |||
173 | const struct dma_map_ops arm_nommu_dma_ops = { | 199 | const struct dma_map_ops arm_nommu_dma_ops = { |
174 | .alloc = arm_nommu_dma_alloc, | 200 | .alloc = arm_nommu_dma_alloc, |
175 | .free = arm_nommu_dma_free, | 201 | .free = arm_nommu_dma_free, |
202 | .mmap = arm_nommu_dma_mmap, | ||
176 | .map_page = arm_nommu_dma_map_page, | 203 | .map_page = arm_nommu_dma_map_page, |
177 | .unmap_page = arm_nommu_dma_unmap_page, | 204 | .unmap_page = arm_nommu_dma_unmap_page, |
178 | .map_sg = arm_nommu_dma_map_sg, | 205 | .map_sg = arm_nommu_dma_map_sg, |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e7380bafbfa6..fcf1473d6fed 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
851 | unsigned long pfn = dma_to_pfn(dev, dma_addr); | 851 | unsigned long pfn = dma_to_pfn(dev, dma_addr); |
852 | unsigned long off = vma->vm_pgoff; | 852 | unsigned long off = vma->vm_pgoff; |
853 | 853 | ||
854 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 854 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
855 | return ret; | 855 | return ret; |
856 | 856 | ||
857 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | 857 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index e90cd1db42a8..f27d4dd04384 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev, | |||
329 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, | 329 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, |
330 | is_device_dma_coherent(dev)); | 330 | is_device_dma_coherent(dev)); |
331 | 331 | ||
332 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 332 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
333 | return ret; | 333 | return ret; |
334 | 334 | ||
335 | return __swiotlb_mmap_pfn(vma, pfn, size); | 335 | return __swiotlb_mmap_pfn(vma, pfn, size); |
@@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |||
706 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, | 706 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, |
707 | is_device_dma_coherent(dev)); | 707 | is_device_dma_coherent(dev)); |
708 | 708 | ||
709 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 709 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
710 | return ret; | 710 | return ret; |
711 | 711 | ||
712 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | 712 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index e08598c70b3e..8e78251eccc2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
232 | else | 232 | else |
233 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 233 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
234 | 234 | ||
235 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 235 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
236 | return ret; | 236 | return ret; |
237 | 237 | ||
238 | if (off < count && user_count <= (count - off)) { | 238 | if (off < count && user_count <= (count - off)) { |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c..1c152aed6b82 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de | |||
25 | { | 25 | { |
26 | if (dev && dev->dma_mem) | 26 | if (dev && dev->dma_mem) |
27 | return dev->dma_mem; | 27 | return dev->dma_mem; |
28 | return dma_coherent_default_memory; | 28 | return NULL; |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline dma_addr_t dma_get_device_base(struct device *dev, | 31 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev, | |||
165 | } | 165 | } |
166 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | 166 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
167 | 167 | ||
168 | /** | 168 | static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, |
169 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area | 169 | ssize_t size, dma_addr_t *dma_handle) |
170 | * | ||
171 | * @dev: device from which we allocate memory | ||
172 | * @size: size of requested memory area | ||
173 | * @dma_handle: This will be filled with the correct dma handle | ||
174 | * @ret: This pointer will be filled with the virtual address | ||
175 | * to allocated area. | ||
176 | * | ||
177 | * This function should be only called from per-arch dma_alloc_coherent() | ||
178 | * to support allocation from per-device coherent memory pools. | ||
179 | * | ||
180 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
181 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
182 | */ | ||
183 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
184 | dma_addr_t *dma_handle, void **ret) | ||
185 | { | 170 | { |
186 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
187 | int order = get_order(size); | 171 | int order = get_order(size); |
188 | unsigned long flags; | 172 | unsigned long flags; |
189 | int pageno; | 173 | int pageno; |
190 | int dma_memory_map; | 174 | int dma_memory_map; |
175 | void *ret; | ||
191 | 176 | ||
192 | if (!mem) | ||
193 | return 0; | ||
194 | |||
195 | *ret = NULL; | ||
196 | spin_lock_irqsave(&mem->spinlock, flags); | 177 | spin_lock_irqsave(&mem->spinlock, flags); |
197 | 178 | ||
198 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | 179 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
203 | goto err; | 184 | goto err; |
204 | 185 | ||
205 | /* | 186 | /* |
206 | * Memory was found in the per-device area. | 187 | * Memory was found in the coherent area. |
207 | */ | 188 | */ |
208 | *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); | 189 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
209 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | 190 | ret = mem->virt_base + (pageno << PAGE_SHIFT); |
210 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); | 191 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); |
211 | spin_unlock_irqrestore(&mem->spinlock, flags); | 192 | spin_unlock_irqrestore(&mem->spinlock, flags); |
212 | if (dma_memory_map) | 193 | if (dma_memory_map) |
213 | memset(*ret, 0, size); | 194 | memset(ret, 0, size); |
214 | else | 195 | else |
215 | memset_io(*ret, 0, size); | 196 | memset_io(ret, 0, size); |
216 | 197 | ||
217 | return 1; | 198 | return ret; |
218 | 199 | ||
219 | err: | 200 | err: |
220 | spin_unlock_irqrestore(&mem->spinlock, flags); | 201 | spin_unlock_irqrestore(&mem->spinlock, flags); |
202 | return NULL; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool | ||
207 | * @dev: device from which we allocate memory | ||
208 | * @size: size of requested memory area | ||
209 | * @dma_handle: This will be filled with the correct dma handle | ||
210 | * @ret: This pointer will be filled with the virtual address | ||
211 | * to allocated area. | ||
212 | * | ||
213 | * This function should be only called from per-arch dma_alloc_coherent() | ||
214 | * to support allocation from per-device coherent memory pools. | ||
215 | * | ||
216 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
217 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
218 | */ | ||
219 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, | ||
220 | dma_addr_t *dma_handle, void **ret) | ||
221 | { | ||
222 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
223 | |||
224 | if (!mem) | ||
225 | return 0; | ||
226 | |||
227 | *ret = __dma_alloc_from_coherent(mem, size, dma_handle); | ||
228 | if (*ret) | ||
229 | return 1; | ||
230 | |||
221 | /* | 231 | /* |
222 | * In the case where the allocation can not be satisfied from the | 232 | * In the case where the allocation can not be satisfied from the |
223 | * per-device area, try to fall back to generic memory if the | 233 | * per-device area, try to fall back to generic memory if the |
@@ -225,25 +235,20 @@ err: | |||
225 | */ | 235 | */ |
226 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | 236 | return mem->flags & DMA_MEMORY_EXCLUSIVE; |
227 | } | 237 | } |
228 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 238 | EXPORT_SYMBOL(dma_alloc_from_dev_coherent); |
229 | 239 | ||
230 | /** | 240 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) |
231 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | ||
232 | * @dev: device from which the memory was allocated | ||
233 | * @order: the order of pages allocated | ||
234 | * @vaddr: virtual address of allocated pages | ||
235 | * | ||
236 | * This checks whether the memory was allocated from the per-device | ||
237 | * coherent memory pool and if so, releases that memory. | ||
238 | * | ||
239 | * Returns 1 if we correctly released the memory, or 0 if | ||
240 | * dma_release_coherent() should proceed with releasing memory from | ||
241 | * generic pools. | ||
242 | */ | ||
243 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | ||
244 | { | 241 | { |
245 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | 242 | if (!dma_coherent_default_memory) |
243 | return NULL; | ||
244 | |||
245 | return __dma_alloc_from_coherent(dma_coherent_default_memory, size, | ||
246 | dma_handle); | ||
247 | } | ||
246 | 248 | ||
249 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, | ||
250 | int order, void *vaddr) | ||
251 | { | ||
247 | if (mem && vaddr >= mem->virt_base && vaddr < | 252 | if (mem && vaddr >= mem->virt_base && vaddr < |
248 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 253 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
249 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | 254 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | |||
256 | } | 261 | } |
257 | return 0; | 262 | return 0; |
258 | } | 263 | } |
259 | EXPORT_SYMBOL(dma_release_from_coherent); | ||
260 | 264 | ||
261 | /** | 265 | /** |
262 | * dma_mmap_from_coherent() - try to mmap the memory allocated from | 266 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool |
263 | * per-device coherent memory pool to userspace | ||
264 | * @dev: device from which the memory was allocated | 267 | * @dev: device from which the memory was allocated |
265 | * @vma: vm_area for the userspace memory | 268 | * @order: the order of pages allocated |
266 | * @vaddr: cpu address returned by dma_alloc_from_coherent | 269 | * @vaddr: virtual address of allocated pages |
267 | * @size: size of the memory buffer allocated by dma_alloc_from_coherent | ||
268 | * @ret: result from remap_pfn_range() | ||
269 | * | 270 | * |
270 | * This checks whether the memory was allocated from the per-device | 271 | * This checks whether the memory was allocated from the per-device |
271 | * coherent memory pool and if so, maps that memory to the provided vma. | 272 | * coherent memory pool and if so, releases that memory. |
272 | * | 273 | * |
273 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should | 274 | * Returns 1 if we correctly released the memory, or 0 if the caller should |
274 | * proceed with mapping memory from generic pools. | 275 | * proceed with releasing memory from generic pools. |
275 | */ | 276 | */ |
276 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | 277 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) |
277 | void *vaddr, size_t size, int *ret) | ||
278 | { | 278 | { |
279 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | 279 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
280 | 280 | ||
281 | return __dma_release_from_coherent(mem, order, vaddr); | ||
282 | } | ||
283 | EXPORT_SYMBOL(dma_release_from_dev_coherent); | ||
284 | |||
285 | int dma_release_from_global_coherent(int order, void *vaddr) | ||
286 | { | ||
287 | if (!dma_coherent_default_memory) | ||
288 | return 0; | ||
289 | |||
290 | return __dma_release_from_coherent(dma_coherent_default_memory, order, | ||
291 | vaddr); | ||
292 | } | ||
293 | |||
294 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, | ||
295 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) | ||
296 | { | ||
281 | if (mem && vaddr >= mem->virt_base && vaddr + size <= | 297 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
282 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 298 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
283 | unsigned long off = vma->vm_pgoff; | 299 | unsigned long off = vma->vm_pgoff; |
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |||
296 | } | 312 | } |
297 | return 0; | 313 | return 0; |
298 | } | 314 | } |
299 | EXPORT_SYMBOL(dma_mmap_from_coherent); | 315 | |
316 | /** | ||
317 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool | ||
318 | * @dev: device from which the memory was allocated | ||
319 | * @vma: vm_area for the userspace memory | ||
320 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent | ||
321 | * @size: size of the memory buffer allocated | ||
322 | * @ret: result from remap_pfn_range() | ||
323 | * | ||
324 | * This checks whether the memory was allocated from the per-device | ||
325 | * coherent memory pool and if so, maps that memory to the provided vma. | ||
326 | * | ||
327 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should | ||
328 | * proceed with mapping memory from generic pools. | ||
329 | */ | ||
330 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | ||
331 | void *vaddr, size_t size, int *ret) | ||
332 | { | ||
333 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
334 | |||
335 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); | ||
336 | } | ||
337 | EXPORT_SYMBOL(dma_mmap_from_dev_coherent); | ||
338 | |||
339 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, | ||
340 | size_t size, int *ret) | ||
341 | { | ||
342 | if (!dma_coherent_default_memory) | ||
343 | return 0; | ||
344 | |||
345 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, | ||
346 | vaddr, size, ret); | ||
347 | } | ||
300 | 348 | ||
301 | /* | 349 | /* |
302 | * Support for reserved memory regions defined in device tree | 350 | * Support for reserved memory regions defined in device tree |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e..b555ff9dd8fc 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
235 | 235 | ||
236 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 236 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
237 | 237 | ||
238 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 238 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
239 | return ret; | 239 | return ret; |
240 | 240 | ||
241 | if (off < count && user_count <= (count - off)) { | 241 | if (off < count && user_count <= (count - off)) { |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 843ab866e0f4..03c0196a6f24 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -157,16 +157,40 @@ static inline int is_device_dma_capable(struct device *dev) | |||
157 | * These three functions are only for dma allocator. | 157 | * These three functions are only for dma allocator. |
158 | * Don't use them in device drivers. | 158 | * Don't use them in device drivers. |
159 | */ | 159 | */ |
160 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | 160 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
161 | dma_addr_t *dma_handle, void **ret); | 161 | dma_addr_t *dma_handle, void **ret); |
162 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); | 162 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
163 | 163 | ||
164 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | 164 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
165 | void *cpu_addr, size_t size, int *ret); | 165 | void *cpu_addr, size_t size, int *ret); |
166 | |||
167 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); | ||
168 | int dma_release_from_global_coherent(int order, void *vaddr); | ||
169 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | ||
170 | size_t size, int *ret); | ||
171 | |||
166 | #else | 172 | #else |
167 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | 173 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
168 | #define dma_release_from_coherent(dev, order, vaddr) (0) | 174 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) |
169 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) | 175 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) |
176 | |||
177 | static inline void *dma_alloc_from_global_coherent(ssize_t size, | ||
178 | dma_addr_t *dma_handle) | ||
179 | { | ||
180 | return NULL; | ||
181 | } | ||
182 | |||
183 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | ||
184 | { | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | ||
189 | void *cpu_addr, size_t size, | ||
190 | int *ret) | ||
191 | { | ||
192 | return 0; | ||
193 | } | ||
170 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | 194 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
171 | 195 | ||
172 | #ifdef CONFIG_HAS_DMA | 196 | #ifdef CONFIG_HAS_DMA |
@@ -481,7 +505,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |||
481 | 505 | ||
482 | BUG_ON(!ops); | 506 | BUG_ON(!ops); |
483 | 507 | ||
484 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) | 508 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
485 | return cpu_addr; | 509 | return cpu_addr; |
486 | 510 | ||
487 | if (!arch_dma_alloc_attrs(&dev, &flag)) | 511 | if (!arch_dma_alloc_attrs(&dev, &flag)) |
@@ -503,7 +527,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, | |||
503 | BUG_ON(!ops); | 527 | BUG_ON(!ops); |
504 | WARN_ON(irqs_disabled()); | 528 | WARN_ON(irqs_disabled()); |
505 | 529 | ||
506 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 530 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
507 | return; | 531 | return; |
508 | 532 | ||
509 | if (!ops->free || !cpu_addr) | 533 | if (!ops->free || !cpu_addr) |