aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-02-10 13:55:20 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:06:14 -0400
commit2dc6a016bbedf18f18ad73997e5338307d6dbde9 (patch)
tree741bf3c884304108b2e1d0a471aa6042c0d142e7 /arch
parenta227fb92a0f5f0dd8282719386e9b3a29f0d16b2 (diff)
ARM: dma-mapping: use asm-generic/dma-mapping-common.h
This patch modifies dma-mapping implementation on ARM architecture to use common dma_map_ops structure and asm-generic/dma-mapping-common.h helpers. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h196
-rw-r--r--arch/arm/mm/dma-mapping.c148
4 files changed, 115 insertions, 231 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 36586dba6fa6..c8111c58a982 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@ config ARM
4 select HAVE_AOUT 4 select HAVE_AOUT
5 select HAVE_DMA_API_DEBUG 5 select HAVE_DMA_API_DEBUG
6 select HAVE_IDE if PCI || ISA || PCMCIA 6 select HAVE_IDE if PCI || ISA || PCMCIA
7 select HAVE_DMA_ATTRS
7 select HAVE_MEMBLOCK 8 select HAVE_MEMBLOCK
8 select RTC_LIB 9 select RTC_LIB
9 select SYS_SUPPORTS_APM_EMULATION 10 select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa368003b05..6e2cb0ee770d 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,6 +7,7 @@
7#define ASMARM_DEVICE_H 7#define ASMARM_DEVICE_H
8 8
9struct dev_archdata { 9struct dev_archdata {
10 struct dma_map_ops *dma_ops;
10#ifdef CONFIG_DMABOUNCE 11#ifdef CONFIG_DMABOUNCE
11 struct dmabounce_device_info *dmabounce; 12 struct dmabounce_device_info *dmabounce;
12#endif 13#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index eeddbe201e24..6725a08a5c21 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -11,6 +11,27 @@
11#include <asm/memory.h> 11#include <asm/memory.h>
12 12
13#define DMA_ERROR_CODE (~0) 13#define DMA_ERROR_CODE (~0)
14extern struct dma_map_ops arm_dma_ops;
15
16static inline struct dma_map_ops *get_dma_ops(struct device *dev)
17{
18 if (dev && dev->archdata.dma_ops)
19 return dev->archdata.dma_ops;
20 return &arm_dma_ops;
21}
22
23static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
24{
25 BUG_ON(!dev);
26 dev->archdata.dma_ops = ops;
27}
28
29#include <asm-generic/dma-mapping-common.h>
30
31static inline int dma_set_mask(struct device *dev, u64 mask)
32{
33 return get_dma_ops(dev)->set_dma_mask(dev, mask);
34}
14 35
15#ifdef __arch_page_to_dma 36#ifdef __arch_page_to_dma
16#error Please update to __arch_pfn_to_dma 37#error Please update to __arch_pfn_to_dma
@@ -119,7 +140,6 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
119 140
120extern int dma_supported(struct device *, u64); 141extern int dma_supported(struct device *, u64);
121extern int dma_set_mask(struct device *, u64); 142extern int dma_set_mask(struct device *, u64);
122
123/* 143/*
124 * DMA errors are defined by all-bits-set in the DMA address. 144 * DMA errors are defined by all-bits-set in the DMA address.
125 */ 145 */
@@ -297,179 +317,17 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
297} 317}
298#endif /* CONFIG_DMABOUNCE */ 318#endif /* CONFIG_DMABOUNCE */
299 319
300/**
301 * dma_map_single - map a single buffer for streaming DMA
302 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
303 * @cpu_addr: CPU direct mapped address of buffer
304 * @size: size of buffer to map
305 * @dir: DMA transfer direction
306 *
307 * Ensure that any data held in the cache is appropriately discarded
308 * or written back.
309 *
310 * The device owns this memory once this call has completed. The CPU
311 * can regain ownership by calling dma_unmap_single() or
312 * dma_sync_single_for_cpu().
313 */
314static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
315 size_t size, enum dma_data_direction dir)
316{
317 unsigned long offset;
318 struct page *page;
319 dma_addr_t addr;
320
321 BUG_ON(!virt_addr_valid(cpu_addr));
322 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
323 BUG_ON(!valid_dma_direction(dir));
324
325 page = virt_to_page(cpu_addr);
326 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
327 addr = __dma_map_page(dev, page, offset, size, dir);
328 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
329
330 return addr;
331}
332
333/**
334 * dma_map_page - map a portion of a page for streaming DMA
335 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
336 * @page: page that buffer resides in
337 * @offset: offset into page for start of buffer
338 * @size: size of buffer to map
339 * @dir: DMA transfer direction
340 *
341 * Ensure that any data held in the cache is appropriately discarded
342 * or written back.
343 *
344 * The device owns this memory once this call has completed. The CPU
345 * can regain ownership by calling dma_unmap_page().
346 */
347static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
348 unsigned long offset, size_t size, enum dma_data_direction dir)
349{
350 dma_addr_t addr;
351
352 BUG_ON(!valid_dma_direction(dir));
353
354 addr = __dma_map_page(dev, page, offset, size, dir);
355 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
356
357 return addr;
358}
359
360/**
361 * dma_unmap_single - unmap a single buffer previously mapped
362 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
363 * @handle: DMA address of buffer
364 * @size: size of buffer (same as passed to dma_map_single)
365 * @dir: DMA transfer direction (same as passed to dma_map_single)
366 *
367 * Unmap a single streaming mode DMA translation. The handle and size
368 * must match what was provided in the previous dma_map_single() call.
369 * All other usages are undefined.
370 *
371 * After this call, reads by the CPU to the buffer are guaranteed to see
372 * whatever the device wrote there.
373 */
374static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
375 size_t size, enum dma_data_direction dir)
376{
377 debug_dma_unmap_page(dev, handle, size, dir, true);
378 __dma_unmap_page(dev, handle, size, dir);
379}
380
381/**
382 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
383 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
384 * @handle: DMA address of buffer
385 * @size: size of buffer (same as passed to dma_map_page)
386 * @dir: DMA transfer direction (same as passed to dma_map_page)
387 *
388 * Unmap a page streaming mode DMA translation. The handle and size
389 * must match what was provided in the previous dma_map_page() call.
390 * All other usages are undefined.
391 *
392 * After this call, reads by the CPU to the buffer are guaranteed to see
393 * whatever the device wrote there.
394 */
395static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
396 size_t size, enum dma_data_direction dir)
397{
398 debug_dma_unmap_page(dev, handle, size, dir, false);
399 __dma_unmap_page(dev, handle, size, dir);
400}
401
402
403static inline void dma_sync_single_for_cpu(struct device *dev,
404 dma_addr_t handle, size_t size, enum dma_data_direction dir)
405{
406 BUG_ON(!valid_dma_direction(dir));
407
408 debug_dma_sync_single_for_cpu(dev, handle, size, dir);
409
410 if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
411 return;
412
413 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
414}
415
416static inline void dma_sync_single_for_device(struct device *dev,
417 dma_addr_t handle, size_t size, enum dma_data_direction dir)
418{
419 BUG_ON(!valid_dma_direction(dir));
420
421 debug_dma_sync_single_for_device(dev, handle, size, dir);
422
423 if (!dmabounce_sync_for_device(dev, handle, size, dir))
424 return;
425
426 __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
427}
428
429/**
430 * dma_sync_single_range_for_cpu
431 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
432 * @handle: DMA address of buffer
433 * @offset: offset of region to start sync
434 * @size: size of region to sync
435 * @dir: DMA transfer direction (same as passed to dma_map_single)
436 *
437 * Make physical memory consistent for a single streaming mode DMA
438 * translation after a transfer.
439 *
440 * If you perform a dma_map_single() but wish to interrogate the
441 * buffer using the cpu, yet do not wish to teardown the PCI dma
442 * mapping, you must call this function before doing so. At the
443 * next point you give the PCI dma address back to the card, you
444 * must first the perform a dma_sync_for_device, and then the
445 * device again owns the buffer.
446 */
447static inline void dma_sync_single_range_for_cpu(struct device *dev,
448 dma_addr_t handle, unsigned long offset, size_t size,
449 enum dma_data_direction dir)
450{
451 dma_sync_single_for_cpu(dev, handle + offset, size, dir);
452}
453
454static inline void dma_sync_single_range_for_device(struct device *dev,
455 dma_addr_t handle, unsigned long offset, size_t size,
456 enum dma_data_direction dir)
457{
458 dma_sync_single_for_device(dev, handle + offset, size, dir);
459}
460
461/* 320/*
462 * The scatter list versions of the above methods. 321 * The scatter list versions of the above methods.
463 */ 322 */
464extern int dma_map_sg(struct device *, struct scatterlist *, int, 323extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
465 enum dma_data_direction); 324 enum dma_data_direction, struct dma_attrs *attrs);
466extern void dma_unmap_sg(struct device *, struct scatterlist *, int, 325extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
326 enum dma_data_direction, struct dma_attrs *attrs);
327extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
467 enum dma_data_direction); 328 enum dma_data_direction);
468extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, 329extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
469 enum dma_data_direction); 330 enum dma_data_direction);
470extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
471 enum dma_data_direction);
472
473 331
474#endif /* __KERNEL__ */ 332#endif /* __KERNEL__ */
475#endif 333#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index a16993a5b7eb..70be6e106667 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -29,6 +29,85 @@
29 29
30#include "mm.h" 30#include "mm.h"
31 31
32/**
33 * arm_dma_map_page - map a portion of a page for streaming DMA
34 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
35 * @page: page that buffer resides in
36 * @offset: offset into page for start of buffer
37 * @size: size of buffer to map
38 * @dir: DMA transfer direction
39 *
40 * Ensure that any data held in the cache is appropriately discarded
41 * or written back.
42 *
43 * The device owns this memory once this call has completed. The CPU
44 * can regain ownership by calling dma_unmap_page().
45 */
46static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
47 unsigned long offset, size_t size, enum dma_data_direction dir,
48 struct dma_attrs *attrs)
49{
50 return __dma_map_page(dev, page, offset, size, dir);
51}
52
53/**
54 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
55 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
56 * @handle: DMA address of buffer
57 * @size: size of buffer (same as passed to dma_map_page)
58 * @dir: DMA transfer direction (same as passed to dma_map_page)
59 *
60 * Unmap a page streaming mode DMA translation. The handle and size
61 * must match what was provided in the previous dma_map_page() call.
62 * All other usages are undefined.
63 *
64 * After this call, reads by the CPU to the buffer are guaranteed to see
65 * whatever the device wrote there.
66 */
67static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
68 size_t size, enum dma_data_direction dir,
69 struct dma_attrs *attrs)
70{
71 __dma_unmap_page(dev, handle, size, dir);
72}
73
74static inline void arm_dma_sync_single_for_cpu(struct device *dev,
75 dma_addr_t handle, size_t size, enum dma_data_direction dir)
76{
77 unsigned int offset = handle & (PAGE_SIZE - 1);
78 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
79 if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
80 return;
81
82 __dma_page_dev_to_cpu(page, offset, size, dir);
83}
84
85static inline void arm_dma_sync_single_for_device(struct device *dev,
86 dma_addr_t handle, size_t size, enum dma_data_direction dir)
87{
88 unsigned int offset = handle & (PAGE_SIZE - 1);
89 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
90 if (!dmabounce_sync_for_device(dev, handle, size, dir))
91 return;
92
93 __dma_page_cpu_to_dev(page, offset, size, dir);
94}
95
96static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
97
98struct dma_map_ops arm_dma_ops = {
99 .map_page = arm_dma_map_page,
100 .unmap_page = arm_dma_unmap_page,
101 .map_sg = arm_dma_map_sg,
102 .unmap_sg = arm_dma_unmap_sg,
103 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
104 .sync_single_for_device = arm_dma_sync_single_for_device,
105 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
106 .sync_sg_for_device = arm_dma_sync_sg_for_device,
107 .set_dma_mask = arm_dma_set_mask,
108};
109EXPORT_SYMBOL(arm_dma_ops);
110
32static u64 get_coherent_dma_mask(struct device *dev) 111static u64 get_coherent_dma_mask(struct device *dev)
33{ 112{
34 u64 mask = (u64)arm_dma_limit; 113 u64 mask = (u64)arm_dma_limit;
@@ -461,47 +540,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
461} 540}
462EXPORT_SYMBOL(dma_free_coherent); 541EXPORT_SYMBOL(dma_free_coherent);
463 542
464/*
465 * Make an area consistent for devices.
466 * Note: Drivers should NOT use this function directly, as it will break
467 * platforms with CONFIG_DMABOUNCE.
468 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
469 */
470void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
471 enum dma_data_direction dir)
472{
473 unsigned long paddr;
474
475 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
476
477 dmac_map_area(kaddr, size, dir);
478
479 paddr = __pa(kaddr);
480 if (dir == DMA_FROM_DEVICE) {
481 outer_inv_range(paddr, paddr + size);
482 } else {
483 outer_clean_range(paddr, paddr + size);
484 }
485 /* FIXME: non-speculating: flush on bidirectional mappings? */
486}
487EXPORT_SYMBOL(___dma_single_cpu_to_dev);
488
489void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
490 enum dma_data_direction dir)
491{
492 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
493
494 /* FIXME: non-speculating: not required */
495 /* don't bother invalidating if DMA to device */
496 if (dir != DMA_TO_DEVICE) {
497 unsigned long paddr = __pa(kaddr);
498 outer_inv_range(paddr, paddr + size);
499 }
500
501 dmac_unmap_area(kaddr, size, dir);
502}
503EXPORT_SYMBOL(___dma_single_dev_to_cpu);
504
505static void dma_cache_maint_page(struct page *page, unsigned long offset, 543static void dma_cache_maint_page(struct page *page, unsigned long offset,
506 size_t size, enum dma_data_direction dir, 544 size_t size, enum dma_data_direction dir,
507 void (*op)(const void *, size_t, int)) 545 void (*op)(const void *, size_t, int))
@@ -599,21 +637,18 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
599 * Device ownership issues as mentioned for dma_map_single are the same 637 * Device ownership issues as mentioned for dma_map_single are the same
600 * here. 638 * here.
601 */ 639 */
602int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 640int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
603 enum dma_data_direction dir) 641 enum dma_data_direction dir, struct dma_attrs *attrs)
604{ 642{
605 struct scatterlist *s; 643 struct scatterlist *s;
606 int i, j; 644 int i, j;
607 645
608 BUG_ON(!valid_dma_direction(dir));
609
610 for_each_sg(sg, s, nents, i) { 646 for_each_sg(sg, s, nents, i) {
611 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, 647 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
612 s->length, dir); 648 s->length, dir);
613 if (dma_mapping_error(dev, s->dma_address)) 649 if (dma_mapping_error(dev, s->dma_address))
614 goto bad_mapping; 650 goto bad_mapping;
615 } 651 }
616 debug_dma_map_sg(dev, sg, nents, nents, dir);
617 return nents; 652 return nents;
618 653
619 bad_mapping: 654 bad_mapping:
@@ -621,7 +656,6 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
621 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 656 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
622 return 0; 657 return 0;
623} 658}
624EXPORT_SYMBOL(dma_map_sg);
625 659
626/** 660/**
627 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 661 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
@@ -633,18 +667,15 @@ EXPORT_SYMBOL(dma_map_sg);
633 * Unmap a set of streaming mode DMA translations. Again, CPU access 667 * Unmap a set of streaming mode DMA translations. Again, CPU access
634 * rules concerning calls here are the same as for dma_unmap_single(). 668 * rules concerning calls here are the same as for dma_unmap_single().
635 */ 669 */
636void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 670void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
637 enum dma_data_direction dir) 671 enum dma_data_direction dir, struct dma_attrs *attrs)
638{ 672{
639 struct scatterlist *s; 673 struct scatterlist *s;
640 int i; 674 int i;
641 675
642 debug_dma_unmap_sg(dev, sg, nents, dir);
643
644 for_each_sg(sg, s, nents, i) 676 for_each_sg(sg, s, nents, i)
645 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 677 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
646} 678}
647EXPORT_SYMBOL(dma_unmap_sg);
648 679
649/** 680/**
650 * dma_sync_sg_for_cpu 681 * dma_sync_sg_for_cpu
@@ -653,7 +684,7 @@ EXPORT_SYMBOL(dma_unmap_sg);
653 * @nents: number of buffers to map (returned from dma_map_sg) 684 * @nents: number of buffers to map (returned from dma_map_sg)
654 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 685 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
655 */ 686 */
656void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 687void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
657 int nents, enum dma_data_direction dir) 688 int nents, enum dma_data_direction dir)
658{ 689{
659 struct scatterlist *s; 690 struct scatterlist *s;
@@ -667,10 +698,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
667 __dma_page_dev_to_cpu(sg_page(s), s->offset, 698 __dma_page_dev_to_cpu(sg_page(s), s->offset,
668 s->length, dir); 699 s->length, dir);
669 } 700 }
670
671 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
672} 701}
673EXPORT_SYMBOL(dma_sync_sg_for_cpu);
674 702
675/** 703/**
676 * dma_sync_sg_for_device 704 * dma_sync_sg_for_device
@@ -679,7 +707,7 @@ EXPORT_SYMBOL(dma_sync_sg_for_cpu);
679 * @nents: number of buffers to map (returned from dma_map_sg) 707 * @nents: number of buffers to map (returned from dma_map_sg)
680 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 708 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
681 */ 709 */
682void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 710void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
683 int nents, enum dma_data_direction dir) 711 int nents, enum dma_data_direction dir)
684{ 712{
685 struct scatterlist *s; 713 struct scatterlist *s;
@@ -693,10 +721,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
693 __dma_page_cpu_to_dev(sg_page(s), s->offset, 721 __dma_page_cpu_to_dev(sg_page(s), s->offset,
694 s->length, dir); 722 s->length, dir);
695 } 723 }
696
697 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
698} 724}
699EXPORT_SYMBOL(dma_sync_sg_for_device);
700 725
701/* 726/*
702 * Return whether the given device DMA address mask can be supported 727 * Return whether the given device DMA address mask can be supported
@@ -712,7 +737,7 @@ int dma_supported(struct device *dev, u64 mask)
712} 737}
713EXPORT_SYMBOL(dma_supported); 738EXPORT_SYMBOL(dma_supported);
714 739
715int dma_set_mask(struct device *dev, u64 dma_mask) 740static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
716{ 741{
717 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 742 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
718 return -EIO; 743 return -EIO;
@@ -723,7 +748,6 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
723 748
724 return 0; 749 return 0;
725} 750}
726EXPORT_SYMBOL(dma_set_mask);
727 751
728#define PREALLOC_DMA_DEBUG_ENTRIES 4096 752#define PREALLOC_DMA_DEBUG_ENTRIES 4096
729 753