aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--arch/x86/kernel/amd_gart_64.c10
-rw-r--r--include/linux/dma-debug.h11
-rw-r--r--include/linux/dma-mapping.h340
-rw-r--r--kernel/dma/coherent.c2
-rw-r--r--kernel/dma/debug.c19
-rw-r--r--kernel/dma/mapping.c94
-rw-r--r--kernel/dma/remap.c13
8 files changed, 215 insertions, 275 deletions
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 841c99529d27..b277cafce71e 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -250,7 +250,6 @@ DMA
250 dmaenginem_async_device_register() 250 dmaenginem_async_device_register()
251 dmam_alloc_coherent() 251 dmam_alloc_coherent()
252 dmam_alloc_attrs() 252 dmam_alloc_attrs()
253 dmam_declare_coherent_memory()
254 dmam_free_coherent() 253 dmam_free_coherent()
255 dmam_pool_create() 254 dmam_pool_create()
256 dmam_pool_destroy() 255 dmam_pool_destroy()
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index e0ff3ac8c127..2c0aa34af69c 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -256,7 +256,15 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
256 int npages; 256 int npages;
257 int i; 257 int i;
258 258
259 if (dma_addr == DMA_MAPPING_ERROR || 259 if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
260 return;
261
262 /*
263 * This driver will not always use a GART mapping, but might have
264 * created a direct mapping instead. If that is the case there is
265 * nothing to unmap here.
266 */
267 if (dma_addr < iommu_bus_base ||
260 dma_addr >= iommu_bus_base + iommu_size) 268 dma_addr >= iommu_bus_base + iommu_size)
261 return; 269 return;
262 270
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 2ad5c363d7d5..cb422cbe587d 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -35,13 +35,12 @@ extern void debug_dma_map_single(struct device *dev, const void *addr,
35 35
36extern void debug_dma_map_page(struct device *dev, struct page *page, 36extern void debug_dma_map_page(struct device *dev, struct page *page,
37 size_t offset, size_t size, 37 size_t offset, size_t size,
38 int direction, dma_addr_t dma_addr, 38 int direction, dma_addr_t dma_addr);
39 bool map_single);
40 39
41extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 40extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
42 41
43extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 42extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
44 size_t size, int direction, bool map_single); 43 size_t size, int direction);
45 44
46extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 45extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
47 int nents, int mapped_ents, int direction); 46 int nents, int mapped_ents, int direction);
@@ -95,8 +94,7 @@ static inline void debug_dma_map_single(struct device *dev, const void *addr,
95 94
96static inline void debug_dma_map_page(struct device *dev, struct page *page, 95static inline void debug_dma_map_page(struct device *dev, struct page *page,
97 size_t offset, size_t size, 96 size_t offset, size_t size,
98 int direction, dma_addr_t dma_addr, 97 int direction, dma_addr_t dma_addr)
99 bool map_single)
100{ 98{
101} 99}
102 100
@@ -106,8 +104,7 @@ static inline void debug_dma_mapping_error(struct device *dev,
106} 104}
107 105
108static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 106static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
109 size_t size, int direction, 107 size_t size, int direction)
110 bool map_single)
111{ 108{
112} 109}
113 110
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ba521d5506c9..cef2127e1d70 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -194,33 +194,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
194} 194}
195#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 195#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
196 196
197#ifdef CONFIG_HAS_DMA
198#include <asm/dma-mapping.h>
199static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
200{
201 if (dev && dev->dma_ops)
202 return dev->dma_ops;
203 return get_arch_dma_ops(dev ? dev->bus : NULL);
204}
205
206static inline void set_dma_ops(struct device *dev,
207 const struct dma_map_ops *dma_ops)
208{
209 dev->dma_ops = dma_ops;
210}
211#else
212/*
213 * Define the dma api to allow compilation of dma dependent code.
214 * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
215 * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
216 * where <something> guarantuees the availability of the dma-mapping API.
217 */
218static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
219{
220 return NULL;
221}
222#endif
223
224static inline bool dma_is_direct(const struct dma_map_ops *ops) 197static inline bool dma_is_direct(const struct dma_map_ops *ops)
225{ 198{
226 return likely(!ops); 199 return likely(!ops);
@@ -284,32 +257,41 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
284} 257}
285#endif 258#endif
286 259
287static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 260#ifdef CONFIG_HAS_DMA
288 size_t size, 261#include <asm/dma-mapping.h>
289 enum dma_data_direction dir, 262
290 unsigned long attrs) 263static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
264{
265 if (dev && dev->dma_ops)
266 return dev->dma_ops;
267 return get_arch_dma_ops(dev ? dev->bus : NULL);
268}
269
270static inline void set_dma_ops(struct device *dev,
271 const struct dma_map_ops *dma_ops)
272{
273 dev->dma_ops = dma_ops;
274}
275
276static inline dma_addr_t dma_map_page_attrs(struct device *dev,
277 struct page *page, size_t offset, size_t size,
278 enum dma_data_direction dir, unsigned long attrs)
291{ 279{
292 const struct dma_map_ops *ops = get_dma_ops(dev); 280 const struct dma_map_ops *ops = get_dma_ops(dev);
293 dma_addr_t addr; 281 dma_addr_t addr;
294 282
295 BUG_ON(!valid_dma_direction(dir)); 283 BUG_ON(!valid_dma_direction(dir));
296 debug_dma_map_single(dev, ptr, size);
297 if (dma_is_direct(ops)) 284 if (dma_is_direct(ops))
298 addr = dma_direct_map_page(dev, virt_to_page(ptr), 285 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
299 offset_in_page(ptr), size, dir, attrs);
300 else 286 else
301 addr = ops->map_page(dev, virt_to_page(ptr), 287 addr = ops->map_page(dev, page, offset, size, dir, attrs);
302 offset_in_page(ptr), size, dir, attrs); 288 debug_dma_map_page(dev, page, offset, size, dir, addr);
303 debug_dma_map_page(dev, virt_to_page(ptr), 289
304 offset_in_page(ptr), size,
305 dir, addr, true);
306 return addr; 290 return addr;
307} 291}
308 292
309static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 293static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
310 size_t size, 294 size_t size, enum dma_data_direction dir, unsigned long attrs)
311 enum dma_data_direction dir,
312 unsigned long attrs)
313{ 295{
314 const struct dma_map_ops *ops = get_dma_ops(dev); 296 const struct dma_map_ops *ops = get_dma_ops(dev);
315 297
@@ -318,13 +300,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
318 dma_direct_unmap_page(dev, addr, size, dir, attrs); 300 dma_direct_unmap_page(dev, addr, size, dir, attrs);
319 else if (ops->unmap_page) 301 else if (ops->unmap_page)
320 ops->unmap_page(dev, addr, size, dir, attrs); 302 ops->unmap_page(dev, addr, size, dir, attrs);
321 debug_dma_unmap_page(dev, addr, size, dir, true); 303 debug_dma_unmap_page(dev, addr, size, dir);
322}
323
324static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
325 size_t size, enum dma_data_direction dir, unsigned long attrs)
326{
327 return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
328} 304}
329 305
330/* 306/*
@@ -363,25 +339,6 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
363 ops->unmap_sg(dev, sg, nents, dir, attrs); 339 ops->unmap_sg(dev, sg, nents, dir, attrs);
364} 340}
365 341
366static inline dma_addr_t dma_map_page_attrs(struct device *dev,
367 struct page *page,
368 size_t offset, size_t size,
369 enum dma_data_direction dir,
370 unsigned long attrs)
371{
372 const struct dma_map_ops *ops = get_dma_ops(dev);
373 dma_addr_t addr;
374
375 BUG_ON(!valid_dma_direction(dir));
376 if (dma_is_direct(ops))
377 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
378 else
379 addr = ops->map_page(dev, page, offset, size, dir, attrs);
380 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
381
382 return addr;
383}
384
385static inline dma_addr_t dma_map_resource(struct device *dev, 342static inline dma_addr_t dma_map_resource(struct device *dev,
386 phys_addr_t phys_addr, 343 phys_addr_t phys_addr,
387 size_t size, 344 size_t size,
@@ -431,13 +388,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
431 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 388 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
432} 389}
433 390
434static inline void dma_sync_single_range_for_cpu(struct device *dev,
435 dma_addr_t addr, unsigned long offset, size_t size,
436 enum dma_data_direction dir)
437{
438 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
439}
440
441static inline void dma_sync_single_for_device(struct device *dev, 391static inline void dma_sync_single_for_device(struct device *dev,
442 dma_addr_t addr, size_t size, 392 dma_addr_t addr, size_t size,
443 enum dma_data_direction dir) 393 enum dma_data_direction dir)
@@ -452,13 +402,6 @@ static inline void dma_sync_single_for_device(struct device *dev,
452 debug_dma_sync_single_for_device(dev, addr, size, dir); 402 debug_dma_sync_single_for_device(dev, addr, size, dir);
453} 403}
454 404
455static inline void dma_sync_single_range_for_device(struct device *dev,
456 dma_addr_t addr, unsigned long offset, size_t size,
457 enum dma_data_direction dir)
458{
459 return dma_sync_single_for_device(dev, addr + offset, size, dir);
460}
461
462static inline void 405static inline void
463dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 406dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
464 int nelems, enum dma_data_direction dir) 407 int nelems, enum dma_data_direction dir)
@@ -488,15 +431,174 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
488 431
489} 432}
490 433
434static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
435{
436 debug_dma_mapping_error(dev, dma_addr);
437
438 if (dma_addr == DMA_MAPPING_ERROR)
439 return -ENOMEM;
440 return 0;
441}
442
443void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
444 gfp_t flag, unsigned long attrs);
445void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
446 dma_addr_t dma_handle, unsigned long attrs);
447void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 gfp_t gfp, unsigned long attrs);
449void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
450 dma_addr_t dma_handle);
451void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
452 enum dma_data_direction dir);
453int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
454 void *cpu_addr, dma_addr_t dma_addr, size_t size,
455 unsigned long attrs);
456int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
457 void *cpu_addr, dma_addr_t dma_addr, size_t size,
458 unsigned long attrs);
459int dma_supported(struct device *dev, u64 mask);
460int dma_set_mask(struct device *dev, u64 mask);
461int dma_set_coherent_mask(struct device *dev, u64 mask);
462u64 dma_get_required_mask(struct device *dev);
463#else /* CONFIG_HAS_DMA */
464static inline dma_addr_t dma_map_page_attrs(struct device *dev,
465 struct page *page, size_t offset, size_t size,
466 enum dma_data_direction dir, unsigned long attrs)
467{
468 return DMA_MAPPING_ERROR;
469}
470static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
471 size_t size, enum dma_data_direction dir, unsigned long attrs)
472{
473}
474static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
475 int nents, enum dma_data_direction dir, unsigned long attrs)
476{
477 return 0;
478}
479static inline void dma_unmap_sg_attrs(struct device *dev,
480 struct scatterlist *sg, int nents, enum dma_data_direction dir,
481 unsigned long attrs)
482{
483}
484static inline dma_addr_t dma_map_resource(struct device *dev,
485 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
486 unsigned long attrs)
487{
488 return DMA_MAPPING_ERROR;
489}
490static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
491 size_t size, enum dma_data_direction dir, unsigned long attrs)
492{
493}
494static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
495 size_t size, enum dma_data_direction dir)
496{
497}
498static inline void dma_sync_single_for_device(struct device *dev,
499 dma_addr_t addr, size_t size, enum dma_data_direction dir)
500{
501}
502static inline void dma_sync_sg_for_cpu(struct device *dev,
503 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
504{
505}
506static inline void dma_sync_sg_for_device(struct device *dev,
507 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
508{
509}
510static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
511{
512 return -ENOMEM;
513}
514static inline void *dma_alloc_attrs(struct device *dev, size_t size,
515 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
516{
517 return NULL;
518}
519static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
520 dma_addr_t dma_handle, unsigned long attrs)
521{
522}
523static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
524 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
525{
526 return NULL;
527}
528static inline void dmam_free_coherent(struct device *dev, size_t size,
529 void *vaddr, dma_addr_t dma_handle)
530{
531}
532static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
533 enum dma_data_direction dir)
534{
535}
536static inline int dma_get_sgtable_attrs(struct device *dev,
537 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
538 size_t size, unsigned long attrs)
539{
540 return -ENXIO;
541}
542static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
543 void *cpu_addr, dma_addr_t dma_addr, size_t size,
544 unsigned long attrs)
545{
546 return -ENXIO;
547}
548static inline int dma_supported(struct device *dev, u64 mask)
549{
550 return 0;
551}
552static inline int dma_set_mask(struct device *dev, u64 mask)
553{
554 return -EIO;
555}
556static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
557{
558 return -EIO;
559}
560static inline u64 dma_get_required_mask(struct device *dev)
561{
562 return 0;
563}
564#endif /* CONFIG_HAS_DMA */
565
566static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
567 size_t size, enum dma_data_direction dir, unsigned long attrs)
568{
569 debug_dma_map_single(dev, ptr, size);
570 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
571 size, dir, attrs);
572}
573
574static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
575 size_t size, enum dma_data_direction dir, unsigned long attrs)
576{
577 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
578}
579
580static inline void dma_sync_single_range_for_cpu(struct device *dev,
581 dma_addr_t addr, unsigned long offset, size_t size,
582 enum dma_data_direction dir)
583{
584 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
585}
586
587static inline void dma_sync_single_range_for_device(struct device *dev,
588 dma_addr_t addr, unsigned long offset, size_t size,
589 enum dma_data_direction dir)
590{
591 return dma_sync_single_for_device(dev, addr + offset, size, dir);
592}
593
491#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 594#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
492#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 595#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
493#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 596#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
494#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 597#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
495#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) 598#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
496#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) 599#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
497 600#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
498void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 601#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
499 enum dma_data_direction dir);
500 602
501extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 603extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
502 void *cpu_addr, dma_addr_t dma_addr, size_t size, 604 void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -516,25 +618,10 @@ bool dma_in_atomic_pool(void *start, size_t size);
516void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); 618void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
517bool dma_free_from_pool(void *start, size_t size); 619bool dma_free_from_pool(void *start, size_t size);
518 620
519int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
520 void *cpu_addr, dma_addr_t dma_addr, size_t size,
521 unsigned long attrs);
522#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
523
524int 621int
525dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, 622dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
526 dma_addr_t dma_addr, size_t size, unsigned long attrs); 623 dma_addr_t dma_addr, size_t size, unsigned long attrs);
527 624
528int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
529 void *cpu_addr, dma_addr_t dma_addr, size_t size,
530 unsigned long attrs);
531#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
532
533void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
534 gfp_t flag, unsigned long attrs);
535void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
536 dma_addr_t dma_handle, unsigned long attrs);
537
538static inline void *dma_alloc_coherent(struct device *dev, size_t size, 625static inline void *dma_alloc_coherent(struct device *dev, size_t size,
539 dma_addr_t *dma_handle, gfp_t gfp) 626 dma_addr_t *dma_handle, gfp_t gfp)
540{ 627{
@@ -549,18 +636,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
549 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 636 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
550} 637}
551 638
552static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
553{
554 debug_dma_mapping_error(dev, dma_addr);
555
556 if (dma_addr == DMA_MAPPING_ERROR)
557 return -ENOMEM;
558 return 0;
559}
560
561int dma_supported(struct device *dev, u64 mask);
562int dma_set_mask(struct device *dev, u64 mask);
563int dma_set_coherent_mask(struct device *dev, u64 mask);
564 639
565static inline u64 dma_get_mask(struct device *dev) 640static inline u64 dma_get_mask(struct device *dev)
566{ 641{
@@ -593,8 +668,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
593 return dma_set_mask_and_coherent(dev, mask); 668 return dma_set_mask_and_coherent(dev, mask);
594} 669}
595 670
596extern u64 dma_get_required_mask(struct device *dev);
597
598#ifndef arch_setup_dma_ops 671#ifndef arch_setup_dma_ops
599static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 672static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
600 u64 size, const struct iommu_ops *iommu, 673 u64 size, const struct iommu_ops *iommu,
@@ -691,43 +764,12 @@ dma_mark_declared_memory_occupied(struct device *dev,
691} 764}
692#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 765#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
693 766
694/*
695 * Managed DMA API
696 */
697#ifdef CONFIG_HAS_DMA
698extern void *dmam_alloc_coherent(struct device *dev, size_t size,
699 dma_addr_t *dma_handle, gfp_t gfp);
700extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
701 dma_addr_t dma_handle);
702#else /* !CONFIG_HAS_DMA */
703static inline void *dmam_alloc_coherent(struct device *dev, size_t size, 767static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
704 dma_addr_t *dma_handle, gfp_t gfp) 768 dma_addr_t *dma_handle, gfp_t gfp)
705{ return NULL; }
706static inline void dmam_free_coherent(struct device *dev, size_t size,
707 void *vaddr, dma_addr_t dma_handle) { }
708#endif /* !CONFIG_HAS_DMA */
709
710extern void *dmam_alloc_attrs(struct device *dev, size_t size,
711 dma_addr_t *dma_handle, gfp_t gfp,
712 unsigned long attrs);
713#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
714extern int dmam_declare_coherent_memory(struct device *dev,
715 phys_addr_t phys_addr,
716 dma_addr_t device_addr, size_t size,
717 int flags);
718extern void dmam_release_declared_memory(struct device *dev);
719#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
720static inline int dmam_declare_coherent_memory(struct device *dev,
721 phys_addr_t phys_addr, dma_addr_t device_addr,
722 size_t size, gfp_t gfp)
723{
724 return 0;
725}
726
727static inline void dmam_release_declared_memory(struct device *dev)
728{ 769{
770 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
771 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
729} 772}
730#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
731 773
732static inline void *dma_alloc_wc(struct device *dev, size_t size, 774static inline void *dma_alloc_wc(struct device *dev, size_t size,
733 dma_addr_t *dma_addr, gfp_t gfp) 775 dma_addr_t *dma_addr, gfp_t gfp)
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 597d40893862..66f0fb7e9a3a 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -223,7 +223,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
223 */ 223 */
224 return mem->flags & DMA_MEMORY_EXCLUSIVE; 224 return mem->flags & DMA_MEMORY_EXCLUSIVE;
225} 225}
226EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
227 226
228void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) 227void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
229{ 228{
@@ -268,7 +267,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
268 267
269 return __dma_release_from_coherent(mem, order, vaddr); 268 return __dma_release_from_coherent(mem, order, vaddr);
270} 269}
271EXPORT_SYMBOL(dma_release_from_dev_coherent);
272 270
273int dma_release_from_global_coherent(int order, void *vaddr) 271int dma_release_from_global_coherent(int order, void *vaddr)
274{ 272{
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 164706da2a73..23cf5361bcf1 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -49,7 +49,6 @@
49 49
50enum { 50enum {
51 dma_debug_single, 51 dma_debug_single,
52 dma_debug_page,
53 dma_debug_sg, 52 dma_debug_sg,
54 dma_debug_coherent, 53 dma_debug_coherent,
55 dma_debug_resource, 54 dma_debug_resource,
@@ -1300,8 +1299,7 @@ void debug_dma_map_single(struct device *dev, const void *addr,
1300EXPORT_SYMBOL(debug_dma_map_single); 1299EXPORT_SYMBOL(debug_dma_map_single);
1301 1300
1302void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1301void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1303 size_t size, int direction, dma_addr_t dma_addr, 1302 size_t size, int direction, dma_addr_t dma_addr)
1304 bool map_single)
1305{ 1303{
1306 struct dma_debug_entry *entry; 1304 struct dma_debug_entry *entry;
1307 1305
@@ -1316,7 +1314,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1316 return; 1314 return;
1317 1315
1318 entry->dev = dev; 1316 entry->dev = dev;
1319 entry->type = dma_debug_page; 1317 entry->type = dma_debug_single;
1320 entry->pfn = page_to_pfn(page); 1318 entry->pfn = page_to_pfn(page);
1321 entry->offset = offset, 1319 entry->offset = offset,
1322 entry->dev_addr = dma_addr; 1320 entry->dev_addr = dma_addr;
@@ -1324,9 +1322,6 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1324 entry->direction = direction; 1322 entry->direction = direction;
1325 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1323 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1326 1324
1327 if (map_single)
1328 entry->type = dma_debug_single;
1329
1330 check_for_stack(dev, page, offset); 1325 check_for_stack(dev, page, offset);
1331 1326
1332 if (!PageHighMem(page)) { 1327 if (!PageHighMem(page)) {
@@ -1378,10 +1373,10 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1378EXPORT_SYMBOL(debug_dma_mapping_error); 1373EXPORT_SYMBOL(debug_dma_mapping_error);
1379 1374
1380void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 1375void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1381 size_t size, int direction, bool map_single) 1376 size_t size, int direction)
1382{ 1377{
1383 struct dma_debug_entry ref = { 1378 struct dma_debug_entry ref = {
1384 .type = dma_debug_page, 1379 .type = dma_debug_single,
1385 .dev = dev, 1380 .dev = dev,
1386 .dev_addr = addr, 1381 .dev_addr = addr,
1387 .size = size, 1382 .size = size,
@@ -1390,10 +1385,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1390 1385
1391 if (unlikely(dma_debug_disabled())) 1386 if (unlikely(dma_debug_disabled()))
1392 return; 1387 return;
1393
1394 if (map_single)
1395 ref.type = dma_debug_single;
1396
1397 check_unmap(&ref); 1388 check_unmap(&ref);
1398} 1389}
1399EXPORT_SYMBOL(debug_dma_unmap_page); 1390EXPORT_SYMBOL(debug_dma_unmap_page);
@@ -1521,7 +1512,6 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
1521 1512
1522 add_dma_entry(entry); 1513 add_dma_entry(entry);
1523} 1514}
1524EXPORT_SYMBOL(debug_dma_alloc_coherent);
1525 1515
1526void debug_dma_free_coherent(struct device *dev, size_t size, 1516void debug_dma_free_coherent(struct device *dev, size_t size,
1527 void *virt, dma_addr_t addr) 1517 void *virt, dma_addr_t addr)
@@ -1549,7 +1539,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
1549 1539
1550 check_unmap(&ref); 1540 check_unmap(&ref);
1551} 1541}
1552EXPORT_SYMBOL(debug_dma_free_coherent);
1553 1542
1554void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1543void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1555 int direction, dma_addr_t dma_addr) 1544 int direction, dma_addr_t dma_addr)
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index d7c34d2d1ba5..a11006b6d8e8 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -46,45 +46,6 @@ static int dmam_match(struct device *dev, void *res, void *match_data)
46} 46}
47 47
48/** 48/**
49 * dmam_alloc_coherent - Managed dma_alloc_coherent()
50 * @dev: Device to allocate coherent memory for
51 * @size: Size of allocation
52 * @dma_handle: Out argument for allocated DMA handle
53 * @gfp: Allocation flags
54 *
55 * Managed dma_alloc_coherent(). Memory allocated using this function
56 * will be automatically released on driver detach.
57 *
58 * RETURNS:
59 * Pointer to allocated memory on success, NULL on failure.
60 */
61void *dmam_alloc_coherent(struct device *dev, size_t size,
62 dma_addr_t *dma_handle, gfp_t gfp)
63{
64 struct dma_devres *dr;
65 void *vaddr;
66
67 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
68 if (!dr)
69 return NULL;
70
71 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
72 if (!vaddr) {
73 devres_free(dr);
74 return NULL;
75 }
76
77 dr->vaddr = vaddr;
78 dr->dma_handle = *dma_handle;
79 dr->size = size;
80
81 devres_add(dev, dr);
82
83 return vaddr;
84}
85EXPORT_SYMBOL(dmam_alloc_coherent);
86
87/**
88 * dmam_free_coherent - Managed dma_free_coherent() 49 * dmam_free_coherent - Managed dma_free_coherent()
89 * @dev: Device to free coherent memory for 50 * @dev: Device to free coherent memory for
90 * @size: Size of allocation 51 * @size: Size of allocation
@@ -144,61 +105,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
144} 105}
145EXPORT_SYMBOL(dmam_alloc_attrs); 106EXPORT_SYMBOL(dmam_alloc_attrs);
146 107
147#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
148
149static void dmam_coherent_decl_release(struct device *dev, void *res)
150{
151 dma_release_declared_memory(dev);
152}
153
154/**
155 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
156 * @dev: Device to declare coherent memory for
157 * @phys_addr: Physical address of coherent memory to be declared
158 * @device_addr: Device address of coherent memory to be declared
159 * @size: Size of coherent memory to be declared
160 * @flags: Flags
161 *
162 * Managed dma_declare_coherent_memory().
163 *
164 * RETURNS:
165 * 0 on success, -errno on failure.
166 */
167int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
168 dma_addr_t device_addr, size_t size, int flags)
169{
170 void *res;
171 int rc;
172
173 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
174 if (!res)
175 return -ENOMEM;
176
177 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
178 flags);
179 if (!rc)
180 devres_add(dev, res);
181 else
182 devres_free(res);
183
184 return rc;
185}
186EXPORT_SYMBOL(dmam_declare_coherent_memory);
187
188/**
189 * dmam_release_declared_memory - Managed dma_release_declared_memory().
190 * @dev: Device to release declared coherent memory for
191 *
192 * Managed dmam_release_declared_memory().
193 */
194void dmam_release_declared_memory(struct device *dev)
195{
196 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
197}
198EXPORT_SYMBOL(dmam_release_declared_memory);
199
200#endif
201
202/* 108/*
203 * Create scatter-list for the already allocated DMA buffer. 109 * Create scatter-list for the already allocated DMA buffer.
204 */ 110 */
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index 18cc09fc27b9..7a723194ecbe 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -204,8 +204,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
204 ret = dma_alloc_from_pool(size, &page, flags); 204 ret = dma_alloc_from_pool(size, &page, flags);
205 if (!ret) 205 if (!ret)
206 return NULL; 206 return NULL;
207 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 207 goto done;
208 return ret;
209 } 208 }
210 209
211 page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); 210 page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
@@ -215,8 +214,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
215 /* remove any dirty cache lines on the kernel alias */ 214 /* remove any dirty cache lines on the kernel alias */
216 arch_dma_prep_coherent(page, size); 215 arch_dma_prep_coherent(page, size);
217 216
218 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 217 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
219 return page; /* opaque cookie */ 218 ret = page; /* opaque cookie */
219 goto done;
220 }
220 221
221 /* create a coherent mapping */ 222 /* create a coherent mapping */
222 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 223 ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
@@ -227,9 +228,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
227 return ret; 228 return ret;
228 } 229 }
229 230
230 *dma_handle = phys_to_dma(dev, page_to_phys(page));
231 memset(ret, 0, size); 231 memset(ret, 0, size);
232 232done:
233 *dma_handle = phys_to_dma(dev, page_to_phys(page));
233 return ret; 234 return ret;
234} 235}
235 236