aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2013-10-15 11:47:14 -0400
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2013-10-15 11:47:14 -0400
commit06e6295bcecefea9dc29fc84b5fd6848061365a0 (patch)
treeed978998c49f5bade8571675968a939dc830dd42 /arch/arm/include/asm/dma-mapping.h
parent61e6cfa80de5760bbe406f4e815b7739205754d2 (diff)
arm: make SWIOTLB available
IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary, provided by lib/iommu_helper.c. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> CC: will.deacon@arm.com CC: linux@arm.linux.org.uk Changes in v8: - use __phys_to_pfn and __pfn_to_phys. Changes in v7: - dma_mark_clean: empty implementation; - in dma_capable use coherent_dma_mask if dma_mask hasn't been allocated. Changes in v6: - check for dev->dma_mask being NULL in dma_capable. Changes in v5: - implement dma_mark_clean using dmac_flush_range. Changes in v3: - dma_capable: do not treat dma_mask as a limit; - remove SWIOTLB dependency on NEED_SG_DMA_LENGTH.
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h37
1 files changed, 37 insertions, 0 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b951503..01b5a3d92197 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -10,6 +10,7 @@
10 10
11#include <asm-generic/dma-coherent.h> 11#include <asm-generic/dma-coherent.h>
12#include <asm/memory.h> 12#include <asm/memory.h>
13#include <asm/cacheflush.h>
13 14
14#define DMA_ERROR_CODE (~0) 15#define DMA_ERROR_CODE (~0)
15extern struct dma_map_ops arm_dma_ops; 16extern struct dma_map_ops arm_dma_ops;
@@ -86,6 +87,42 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
86} 87}
87#endif 88#endif
88 89
90static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
91{
92 unsigned int offset = paddr & ~PAGE_MASK;
93 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
94}
95
96static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
97{
98 unsigned int offset = dev_addr & ~PAGE_MASK;
99 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
100}
101
102static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
103{
104 u64 limit, mask;
105
106 if (dev->dma_mask)
107 mask = *dev->dma_mask;
108 else
109 mask = dev->coherent_dma_mask;
110
111 if (mask == 0)
112 return 0;
113
114 limit = (mask + 1) & ~mask;
115 if (limit && size > limit)
116 return 0;
117
118 if ((addr | (addr + size - 1)) & ~mask)
119 return 0;
120
121 return 1;
122}
123
124static inline void dma_mark_clean(void *addr, size_t size) { }
125
89/* 126/*
90 * DMA errors are defined by all-bits-set in the DMA address. 127 * DMA errors are defined by all-bits-set in the DMA address.
91 */ 128 */