diff options
| author | Christoph Hellwig <hch@lst.de> | 2016-01-20 18:02:05 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-20 20:09:18 -0500 |
| commit | e1c7e324539ada3b2b13ca2898bcb4948a9ef9db (patch) | |
| tree | 4590a89d149b205e65f0e64959460926367e3f9c /include/asm-generic | |
| parent | bd38118f9c57b22f57f9c2fccca4a82aef15cc5f (diff) | |
dma-mapping: always provide the dma_map_ops based implementation
Move the generic implementation to <linux/dma-mapping.h> now that all
architectures support it and remove the HAVE_DMA_ATTR Kconfig symbol now
that everyone supports them.
[valentinrothberg@gmail.com: remove leftovers in Kconfig]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Helge Deller <deller@gmx.de>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic')
| -rw-r--r-- | include/asm-generic/dma-mapping-broken.h | 95 | ||||
| -rw-r--r-- | include/asm-generic/dma-mapping-common.h | 358 |
2 files changed, 0 insertions, 453 deletions
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h deleted file mode 100644 index 6c32af918c2f..000000000000 --- a/include/asm-generic/dma-mapping-broken.h +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
| 2 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
| 3 | |||
| 4 | /* define the dma api to allow compilation but not linking of | ||
| 5 | * dma dependent code. Code that depends on the dma-mapping | ||
| 6 | * API needs to set 'depends on HAS_DMA' in its Kconfig | ||
| 7 | */ | ||
| 8 | |||
| 9 | struct scatterlist; | ||
| 10 | |||
| 11 | extern void * | ||
| 12 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
| 13 | gfp_t flag); | ||
| 14 | |||
| 15 | extern void | ||
| 16 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
| 17 | dma_addr_t dma_handle); | ||
| 18 | |||
| 19 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
| 20 | dma_addr_t *dma_handle, gfp_t flag, | ||
| 21 | struct dma_attrs *attrs) | ||
| 22 | { | ||
| 23 | /* attrs is not supported and ignored */ | ||
| 24 | return dma_alloc_coherent(dev, size, dma_handle, flag); | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
| 28 | void *cpu_addr, dma_addr_t dma_handle, | ||
| 29 | struct dma_attrs *attrs) | ||
| 30 | { | ||
| 31 | /* attrs is not supported and ignored */ | ||
| 32 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
| 33 | } | ||
| 34 | |||
| 35 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
| 36 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
| 37 | |||
| 38 | extern dma_addr_t | ||
| 39 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
| 40 | enum dma_data_direction direction); | ||
| 41 | |||
| 42 | extern void | ||
| 43 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
| 44 | enum dma_data_direction direction); | ||
| 45 | |||
| 46 | extern int | ||
| 47 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
| 48 | enum dma_data_direction direction); | ||
| 49 | |||
| 50 | extern void | ||
| 51 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
| 52 | enum dma_data_direction direction); | ||
| 53 | |||
| 54 | extern dma_addr_t | ||
| 55 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
| 56 | size_t size, enum dma_data_direction direction); | ||
| 57 | |||
| 58 | extern void | ||
| 59 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
| 60 | enum dma_data_direction direction); | ||
| 61 | |||
| 62 | extern void | ||
| 63 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
| 64 | enum dma_data_direction direction); | ||
| 65 | |||
| 66 | extern void | ||
| 67 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
| 68 | unsigned long offset, size_t size, | ||
| 69 | enum dma_data_direction direction); | ||
| 70 | |||
| 71 | extern void | ||
| 72 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
| 73 | enum dma_data_direction direction); | ||
| 74 | |||
| 75 | #define dma_sync_single_for_device dma_sync_single_for_cpu | ||
| 76 | #define dma_sync_single_range_for_device dma_sync_single_range_for_cpu | ||
| 77 | #define dma_sync_sg_for_device dma_sync_sg_for_cpu | ||
| 78 | |||
| 79 | extern int | ||
| 80 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | ||
| 81 | |||
| 82 | extern int | ||
| 83 | dma_supported(struct device *dev, u64 mask); | ||
| 84 | |||
| 85 | extern int | ||
| 86 | dma_set_mask(struct device *dev, u64 mask); | ||
| 87 | |||
| 88 | extern int | ||
| 89 | dma_get_cache_alignment(void); | ||
| 90 | |||
| 91 | extern void | ||
| 92 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
| 93 | enum dma_data_direction direction); | ||
| 94 | |||
| 95 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ | ||
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h deleted file mode 100644 index b1bc954eccf3..000000000000 --- a/include/asm-generic/dma-mapping-common.h +++ /dev/null | |||
| @@ -1,358 +0,0 @@ | |||
| 1 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
| 2 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
| 3 | |||
| 4 | #include <linux/kmemcheck.h> | ||
| 5 | #include <linux/bug.h> | ||
| 6 | #include <linux/scatterlist.h> | ||
| 7 | #include <linux/dma-debug.h> | ||
| 8 | #include <linux/dma-attrs.h> | ||
| 9 | #include <asm-generic/dma-coherent.h> | ||
| 10 | |||
| 11 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | ||
| 12 | size_t size, | ||
| 13 | enum dma_data_direction dir, | ||
| 14 | struct dma_attrs *attrs) | ||
| 15 | { | ||
| 16 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 17 | dma_addr_t addr; | ||
| 18 | |||
| 19 | kmemcheck_mark_initialized(ptr, size); | ||
| 20 | BUG_ON(!valid_dma_direction(dir)); | ||
| 21 | addr = ops->map_page(dev, virt_to_page(ptr), | ||
| 22 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
| 23 | dir, attrs); | ||
| 24 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
| 25 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
| 26 | dir, addr, true); | ||
| 27 | return addr; | ||
| 28 | } | ||
| 29 | |||
| 30 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | ||
| 31 | size_t size, | ||
| 32 | enum dma_data_direction dir, | ||
| 33 | struct dma_attrs *attrs) | ||
| 34 | { | ||
| 35 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 36 | |||
| 37 | BUG_ON(!valid_dma_direction(dir)); | ||
| 38 | if (ops->unmap_page) | ||
| 39 | ops->unmap_page(dev, addr, size, dir, attrs); | ||
| 40 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
| 41 | } | ||
| 42 | |||
| 43 | /* | ||
| 44 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | ||
| 45 | * It should never return a value < 0. | ||
| 46 | */ | ||
| 47 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
| 48 | int nents, enum dma_data_direction dir, | ||
| 49 | struct dma_attrs *attrs) | ||
| 50 | { | ||
| 51 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 52 | int i, ents; | ||
| 53 | struct scatterlist *s; | ||
| 54 | |||
| 55 | for_each_sg(sg, s, nents, i) | ||
| 56 | kmemcheck_mark_initialized(sg_virt(s), s->length); | ||
| 57 | BUG_ON(!valid_dma_direction(dir)); | ||
| 58 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | ||
| 59 | BUG_ON(ents < 0); | ||
| 60 | debug_dma_map_sg(dev, sg, nents, ents, dir); | ||
| 61 | |||
| 62 | return ents; | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
| 66 | int nents, enum dma_data_direction dir, | ||
| 67 | struct dma_attrs *attrs) | ||
| 68 | { | ||
| 69 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 70 | |||
| 71 | BUG_ON(!valid_dma_direction(dir)); | ||
| 72 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
| 73 | if (ops->unmap_sg) | ||
| 74 | ops->unmap_sg(dev, sg, nents, dir, attrs); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
| 78 | size_t offset, size_t size, | ||
| 79 | enum dma_data_direction dir) | ||
| 80 | { | ||
| 81 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 82 | dma_addr_t addr; | ||
| 83 | |||
| 84 | kmemcheck_mark_initialized(page_address(page) + offset, size); | ||
| 85 | BUG_ON(!valid_dma_direction(dir)); | ||
| 86 | addr = ops->map_page(dev, page, offset, size, dir, NULL); | ||
| 87 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
| 88 | |||
| 89 | return addr; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
| 93 | size_t size, enum dma_data_direction dir) | ||
| 94 | { | ||
| 95 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 96 | |||
| 97 | BUG_ON(!valid_dma_direction(dir)); | ||
| 98 | if (ops->unmap_page) | ||
| 99 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
| 100 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
| 101 | } | ||
| 102 | |||
| 103 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | ||
| 104 | size_t size, | ||
| 105 | enum dma_data_direction dir) | ||
| 106 | { | ||
| 107 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 108 | |||
| 109 | BUG_ON(!valid_dma_direction(dir)); | ||
| 110 | if (ops->sync_single_for_cpu) | ||
| 111 | ops->sync_single_for_cpu(dev, addr, size, dir); | ||
| 112 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline void dma_sync_single_for_device(struct device *dev, | ||
| 116 | dma_addr_t addr, size_t size, | ||
| 117 | enum dma_data_direction dir) | ||
| 118 | { | ||
| 119 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 120 | |||
| 121 | BUG_ON(!valid_dma_direction(dir)); | ||
| 122 | if (ops->sync_single_for_device) | ||
| 123 | ops->sync_single_for_device(dev, addr, size, dir); | ||
| 124 | debug_dma_sync_single_for_device(dev, addr, size, dir); | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
| 128 | dma_addr_t addr, | ||
| 129 | unsigned long offset, | ||
| 130 | size_t size, | ||
| 131 | enum dma_data_direction dir) | ||
| 132 | { | ||
| 133 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 134 | |||
| 135 | BUG_ON(!valid_dma_direction(dir)); | ||
| 136 | if (ops->sync_single_for_cpu) | ||
| 137 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | ||
| 138 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
| 142 | dma_addr_t addr, | ||
| 143 | unsigned long offset, | ||
| 144 | size_t size, | ||
| 145 | enum dma_data_direction dir) | ||
| 146 | { | ||
| 147 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 148 | |||
| 149 | BUG_ON(!valid_dma_direction(dir)); | ||
| 150 | if (ops->sync_single_for_device) | ||
| 151 | ops->sync_single_for_device(dev, addr + offset, size, dir); | ||
| 152 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline void | ||
| 156 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
| 157 | int nelems, enum dma_data_direction dir) | ||
| 158 | { | ||
| 159 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 160 | |||
| 161 | BUG_ON(!valid_dma_direction(dir)); | ||
| 162 | if (ops->sync_sg_for_cpu) | ||
| 163 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | ||
| 164 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
| 165 | } | ||
| 166 | |||
| 167 | static inline void | ||
| 168 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
| 169 | int nelems, enum dma_data_direction dir) | ||
| 170 | { | ||
| 171 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 172 | |||
| 173 | BUG_ON(!valid_dma_direction(dir)); | ||
| 174 | if (ops->sync_sg_for_device) | ||
| 175 | ops->sync_sg_for_device(dev, sg, nelems, dir); | ||
| 176 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | ||
| 177 | |||
| 178 | } | ||
| 179 | |||
| 180 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
| 181 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
| 182 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | ||
| 183 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
| 184 | |||
| 185 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 186 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
| 187 | |||
| 188 | void *dma_common_contiguous_remap(struct page *page, size_t size, | ||
| 189 | unsigned long vm_flags, | ||
| 190 | pgprot_t prot, const void *caller); | ||
| 191 | |||
| 192 | void *dma_common_pages_remap(struct page **pages, size_t size, | ||
| 193 | unsigned long vm_flags, pgprot_t prot, | ||
| 194 | const void *caller); | ||
| 195 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | ||
| 196 | |||
| 197 | /** | ||
| 198 | * dma_mmap_attrs - map a coherent DMA allocation into user space | ||
| 199 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 200 | * @vma: vm_area_struct describing requested user mapping | ||
| 201 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | ||
| 202 | * @handle: device-view address returned from dma_alloc_attrs | ||
| 203 | * @size: size of memory originally requested in dma_alloc_attrs | ||
| 204 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | ||
| 205 | * | ||
| 206 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | ||
| 207 | * into user space. The coherent DMA buffer must not be freed by the | ||
| 208 | * driver until the user space mapping has been released. | ||
| 209 | */ | ||
| 210 | static inline int | ||
| 211 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | ||
| 212 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
| 213 | { | ||
| 214 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 215 | BUG_ON(!ops); | ||
| 216 | if (ops->mmap) | ||
| 217 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
| 218 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
| 219 | } | ||
| 220 | |||
| 221 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | ||
| 222 | |||
| 223 | int | ||
| 224 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 225 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
| 226 | |||
| 227 | static inline int | ||
| 228 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | ||
| 229 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | ||
| 230 | { | ||
| 231 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 232 | BUG_ON(!ops); | ||
| 233 | if (ops->get_sgtable) | ||
| 234 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | ||
| 235 | attrs); | ||
| 236 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | ||
| 237 | } | ||
| 238 | |||
| 239 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) | ||
| 240 | |||
| 241 | #ifndef arch_dma_alloc_attrs | ||
| 242 | #define arch_dma_alloc_attrs(dev, flag) (true) | ||
| 243 | #endif | ||
| 244 | |||
| 245 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
| 246 | dma_addr_t *dma_handle, gfp_t flag, | ||
| 247 | struct dma_attrs *attrs) | ||
| 248 | { | ||
| 249 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 250 | void *cpu_addr; | ||
| 251 | |||
| 252 | BUG_ON(!ops); | ||
| 253 | |||
| 254 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) | ||
| 255 | return cpu_addr; | ||
| 256 | |||
| 257 | if (!arch_dma_alloc_attrs(&dev, &flag)) | ||
| 258 | return NULL; | ||
| 259 | if (!ops->alloc) | ||
| 260 | return NULL; | ||
| 261 | |||
| 262 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | ||
| 263 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||
| 264 | return cpu_addr; | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
| 268 | void *cpu_addr, dma_addr_t dma_handle, | ||
| 269 | struct dma_attrs *attrs) | ||
| 270 | { | ||
| 271 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 272 | |||
| 273 | BUG_ON(!ops); | ||
| 274 | WARN_ON(irqs_disabled()); | ||
| 275 | |||
| 276 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
| 277 | return; | ||
| 278 | |||
| 279 | if (!ops->free) | ||
| 280 | return; | ||
| 281 | |||
| 282 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
| 283 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||
| 284 | } | ||
| 285 | |||
| 286 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
| 287 | dma_addr_t *dma_handle, gfp_t flag) | ||
| 288 | { | ||
| 289 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); | ||
| 290 | } | ||
| 291 | |||
| 292 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
| 293 | void *cpu_addr, dma_addr_t dma_handle) | ||
| 294 | { | ||
| 295 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
| 299 | dma_addr_t *dma_handle, gfp_t gfp) | ||
| 300 | { | ||
| 301 | DEFINE_DMA_ATTRS(attrs); | ||
| 302 | |||
| 303 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
| 304 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); | ||
| 305 | } | ||
| 306 | |||
| 307 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | ||
| 308 | void *cpu_addr, dma_addr_t dma_handle) | ||
| 309 | { | ||
| 310 | DEFINE_DMA_ATTRS(attrs); | ||
| 311 | |||
| 312 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
| 313 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | ||
| 314 | } | ||
| 315 | |||
| 316 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
| 317 | { | ||
| 318 | debug_dma_mapping_error(dev, dma_addr); | ||
| 319 | |||
| 320 | if (get_dma_ops(dev)->mapping_error) | ||
| 321 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); | ||
| 322 | |||
| 323 | #ifdef DMA_ERROR_CODE | ||
| 324 | return dma_addr == DMA_ERROR_CODE; | ||
| 325 | #else | ||
| 326 | return 0; | ||
| 327 | #endif | ||
| 328 | } | ||
| 329 | |||
| 330 | #ifndef HAVE_ARCH_DMA_SUPPORTED | ||
| 331 | static inline int dma_supported(struct device *dev, u64 mask) | ||
| 332 | { | ||
| 333 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 334 | |||
| 335 | if (!ops) | ||
| 336 | return 0; | ||
| 337 | if (!ops->dma_supported) | ||
| 338 | return 1; | ||
| 339 | return ops->dma_supported(dev, mask); | ||
| 340 | } | ||
| 341 | #endif | ||
| 342 | |||
| 343 | #ifndef HAVE_ARCH_DMA_SET_MASK | ||
| 344 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
| 345 | { | ||
| 346 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
| 347 | |||
| 348 | if (ops->set_dma_mask) | ||
| 349 | return ops->set_dma_mask(dev, mask); | ||
| 350 | |||
| 351 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
| 352 | return -EIO; | ||
| 353 | *dev->dma_mask = mask; | ||
| 354 | return 0; | ||
| 355 | } | ||
| 356 | #endif | ||
| 357 | |||
| 358 | #endif | ||
