diff options
author | Stefano Stabellini <stefano.stabellini@eu.citrix.com> | 2013-10-29 21:21:27 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-11-08 15:16:07 -0500 |
commit | fbd989b1d73e3b3565dad5227a581e6f456c895f (patch) | |
tree | e3ed5d21f6ca786e144b8b3f86018d8e0249ad2c /arch/arm/include | |
parent | 3b284bde702c191e3c63168e2d627d90d2b2515b (diff) |
arm: make SWIOTLB available
IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary,
provided by lib/iommu_helper.c.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CC: will.deacon@arm.com
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Changes in v9:
- remove uneeded include asm/cacheflush.h;
- just return 0 if !dev->dma_mask in dma_capable.
Changes in v8:
- use __phys_to_pfn and __pfn_to_phys.
Changes in v7:
- dma_mark_clean: empty implementation;
- in dma_capable use coherent_dma_mask if dma_mask hasn't been
allocated.
Changes in v6:
- check for dev->dma_mask being NULL in dma_capable.
Changes in v5:
- implement dma_mark_clean using dmac_flush_range.
Changes in v3:
- dma_capable: do not treat dma_mask as a limit;
- remove SWIOTLB dependency on NEED_SG_DMA_LENGTH.
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 5b579b951503..1ad2c171054b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -86,6 +86,39 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |||
86 | } | 86 | } |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
90 | { | ||
91 | unsigned int offset = paddr & ~PAGE_MASK; | ||
92 | return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; | ||
93 | } | ||
94 | |||
95 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) | ||
96 | { | ||
97 | unsigned int offset = dev_addr & ~PAGE_MASK; | ||
98 | return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; | ||
99 | } | ||
100 | |||
101 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
102 | { | ||
103 | u64 limit, mask; | ||
104 | |||
105 | if (!dev->dma_mask) | ||
106 | return 0; | ||
107 | |||
108 | mask = *dev->dma_mask; | ||
109 | |||
110 | limit = (mask + 1) & ~mask; | ||
111 | if (limit && size > limit) | ||
112 | return 0; | ||
113 | |||
114 | if ((addr | (addr + size - 1)) & ~mask) | ||
115 | return 0; | ||
116 | |||
117 | return 1; | ||
118 | } | ||
119 | |||
120 | static inline void dma_mark_clean(void *addr, size_t size) { } | ||
121 | |||
89 | /* | 122 | /* |
90 | * DMA errors are defined by all-bits-set in the DMA address. | 123 | * DMA errors are defined by all-bits-set in the DMA address. |
91 | */ | 124 | */ |