diff options
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
| -rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 458 |
1 files changed, 458 insertions, 0 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h new file mode 100644 index 000000000000..45329fca1b64 --- /dev/null +++ b/arch/arm/include/asm/dma-mapping.h | |||
| @@ -0,0 +1,458 @@ | |||
| 1 | #ifndef ASMARM_DMA_MAPPING_H | ||
| 2 | #define ASMARM_DMA_MAPPING_H | ||
| 3 | |||
| 4 | #ifdef __KERNEL__ | ||
| 5 | |||
| 6 | #include <linux/mm.h> /* need struct page */ | ||
| 7 | |||
| 8 | #include <linux/scatterlist.h> | ||
| 9 | |||
| 10 | #include <asm-generic/dma-coherent.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * DMA-consistent mapping functions. These allocate/free a region of | ||
| 14 | * uncached, unwrite-buffered mapped memory space for use with DMA | ||
| 15 | * devices. This is the "generic" version. The PCI specific version | ||
| 16 | * is in pci.h | ||
| 17 | * | ||
| 18 | * Note: Drivers should NOT use this function directly, as it will break | ||
| 19 | * platforms with CONFIG_DMABOUNCE. | ||
| 20 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | ||
| 21 | */ | ||
| 22 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Return whether the given device DMA address mask can be supported | ||
| 26 | * properly. For example, if your device can only drive the low 24-bits | ||
| 27 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
| 28 | * to this function. | ||
| 29 | * | ||
| 30 | * FIXME: This should really be a platform specific issue - we should | ||
| 31 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | ||
| 32 | */ | ||
| 33 | static inline int dma_supported(struct device *dev, u64 mask) | ||
| 34 | { | ||
| 35 | return dev->dma_mask && *dev->dma_mask != 0; | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
| 39 | { | ||
| 40 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
| 41 | return -EIO; | ||
| 42 | |||
| 43 | *dev->dma_mask = dma_mask; | ||
| 44 | |||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline int dma_get_cache_alignment(void) | ||
| 49 | { | ||
| 50 | return 32; | ||
| 51 | } | ||
| 52 | |||
| 53 | static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) | ||
| 54 | { | ||
| 55 | return !!arch_is_coherent(); | ||
| 56 | } | ||
| 57 | |||
| 58 | /* | ||
| 59 | * DMA errors are defined by all-bits-set in the DMA address. | ||
| 60 | */ | ||
| 61 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
| 62 | { | ||
| 63 | return dma_addr == ~0; | ||
| 64 | } | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync | ||
| 68 | * function so drivers using this API are highlighted with build warnings. | ||
| 69 | */ | ||
| 70 | static inline void * | ||
| 71 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | ||
| 72 | { | ||
| 73 | return NULL; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline void | ||
| 77 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | ||
| 78 | dma_addr_t handle) | ||
| 79 | { | ||
| 80 | } | ||
| 81 | |||
| 82 | /** | ||
| 83 | * dma_alloc_coherent - allocate consistent memory for DMA | ||
| 84 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 85 | * @size: required memory size | ||
| 86 | * @handle: bus-specific DMA address | ||
| 87 | * | ||
| 88 | * Allocate some uncached, unbuffered memory for a device for | ||
| 89 | * performing DMA. This function allocates pages, and will | ||
| 90 | * return the CPU-viewed address, and sets @handle to be the | ||
| 91 | * device-viewed address. | ||
| 92 | */ | ||
| 93 | extern void * | ||
| 94 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | ||
| 95 | |||
| 96 | /** | ||
| 97 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | ||
| 98 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 99 | * @size: size of memory originally requested in dma_alloc_coherent | ||
| 100 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | ||
| 101 | * @handle: device-view address returned from dma_alloc_coherent | ||
| 102 | * | ||
| 103 | * Free (and unmap) a DMA buffer previously allocated by | ||
| 104 | * dma_alloc_coherent(). | ||
| 105 | * | ||
| 106 | * References to memory and mappings associated with cpu_addr/handle | ||
| 107 | * during and after this call executing are illegal. | ||
| 108 | */ | ||
| 109 | extern void | ||
| 110 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
| 111 | dma_addr_t handle); | ||
| 112 | |||
| 113 | /** | ||
| 114 | * dma_mmap_coherent - map a coherent DMA allocation into user space | ||
| 115 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 116 | * @vma: vm_area_struct describing requested user mapping | ||
| 117 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent | ||
| 118 | * @handle: device-view address returned from dma_alloc_coherent | ||
| 119 | * @size: size of memory originally requested in dma_alloc_coherent | ||
| 120 | * | ||
| 121 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent | ||
| 122 | * into user space. The coherent DMA buffer must not be freed by the | ||
| 123 | * driver until the user space mapping has been released. | ||
| 124 | */ | ||
| 125 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
| 126 | void *cpu_addr, dma_addr_t handle, size_t size); | ||
| 127 | |||
| 128 | |||
| 129 | /** | ||
| 130 | * dma_alloc_writecombine - allocate writecombining memory for DMA | ||
| 131 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 132 | * @size: required memory size | ||
| 133 | * @handle: bus-specific DMA address | ||
| 134 | * | ||
| 135 | * Allocate some uncached, buffered memory for a device for | ||
| 136 | * performing DMA. This function allocates pages, and will | ||
| 137 | * return the CPU-viewed address, and sets @handle to be the | ||
| 138 | * device-viewed address. | ||
| 139 | */ | ||
| 140 | extern void * | ||
| 141 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | ||
| 142 | |||
| 143 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | ||
| 144 | dma_free_coherent(dev,size,cpu_addr,handle) | ||
| 145 | |||
| 146 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
| 147 | void *cpu_addr, dma_addr_t handle, size_t size); | ||
| 148 | |||
| 149 | |||
| 150 | /** | ||
| 151 | * dma_map_single - map a single buffer for streaming DMA | ||
| 152 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 153 | * @cpu_addr: CPU direct mapped address of buffer | ||
| 154 | * @size: size of buffer to map | ||
| 155 | * @dir: DMA transfer direction | ||
| 156 | * | ||
| 157 | * Ensure that any data held in the cache is appropriately discarded | ||
| 158 | * or written back. | ||
| 159 | * | ||
| 160 | * The device owns this memory once this call has completed. The CPU | ||
| 161 | * can regain ownership by calling dma_unmap_single() or | ||
| 162 | * dma_sync_single_for_cpu(). | ||
| 163 | */ | ||
| 164 | #ifndef CONFIG_DMABOUNCE | ||
| 165 | static inline dma_addr_t | ||
| 166 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
| 167 | enum dma_data_direction dir) | ||
| 168 | { | ||
| 169 | if (!arch_is_coherent()) | ||
| 170 | dma_cache_maint(cpu_addr, size, dir); | ||
| 171 | |||
| 172 | return virt_to_dma(dev, (unsigned long)cpu_addr); | ||
| 173 | } | ||
| 174 | #else | ||
| 175 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | ||
| 176 | #endif | ||
| 177 | |||
| 178 | /** | ||
| 179 | * dma_map_page - map a portion of a page for streaming DMA | ||
| 180 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 181 | * @page: page that buffer resides in | ||
| 182 | * @offset: offset into page for start of buffer | ||
| 183 | * @size: size of buffer to map | ||
| 184 | * @dir: DMA transfer direction | ||
| 185 | * | ||
| 186 | * Ensure that any data held in the cache is appropriately discarded | ||
| 187 | * or written back. | ||
| 188 | * | ||
| 189 | * The device owns this memory once this call has completed. The CPU | ||
| 190 | * can regain ownership by calling dma_unmap_page() or | ||
| 191 | * dma_sync_single_for_cpu(). | ||
| 192 | */ | ||
| 193 | static inline dma_addr_t | ||
| 194 | dma_map_page(struct device *dev, struct page *page, | ||
| 195 | unsigned long offset, size_t size, | ||
| 196 | enum dma_data_direction dir) | ||
| 197 | { | ||
| 198 | return dma_map_single(dev, page_address(page) + offset, size, (int)dir); | ||
| 199 | } | ||
| 200 | |||
| 201 | /** | ||
| 202 | * dma_unmap_single - unmap a single buffer previously mapped | ||
| 203 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 204 | * @handle: DMA address of buffer | ||
| 205 | * @size: size of buffer to map | ||
| 206 | * @dir: DMA transfer direction | ||
| 207 | * | ||
| 208 | * Unmap a single streaming mode DMA translation. The handle and size | ||
| 209 | * must match what was provided in the previous dma_map_single() call. | ||
| 210 | * All other usages are undefined. | ||
| 211 | * | ||
| 212 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
| 213 | * whatever the device wrote there. | ||
| 214 | */ | ||
| 215 | #ifndef CONFIG_DMABOUNCE | ||
| 216 | static inline void | ||
| 217 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | ||
| 218 | enum dma_data_direction dir) | ||
| 219 | { | ||
| 220 | /* nothing to do */ | ||
| 221 | } | ||
| 222 | #else | ||
| 223 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
| 224 | #endif | ||
| 225 | |||
| 226 | /** | ||
| 227 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
| 228 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 229 | * @handle: DMA address of buffer | ||
| 230 | * @size: size of buffer to map | ||
| 231 | * @dir: DMA transfer direction | ||
| 232 | * | ||
| 233 | * Unmap a single streaming mode DMA translation. The handle and size | ||
| 234 | * must match what was provided in the previous dma_map_single() call. | ||
| 235 | * All other usages are undefined. | ||
| 236 | * | ||
| 237 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
| 238 | * whatever the device wrote there. | ||
| 239 | */ | ||
| 240 | static inline void | ||
| 241 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | ||
| 242 | enum dma_data_direction dir) | ||
| 243 | { | ||
| 244 | dma_unmap_single(dev, handle, size, (int)dir); | ||
| 245 | } | ||
| 246 | |||
| 247 | /** | ||
| 248 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | ||
| 249 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 250 | * @sg: list of buffers | ||
| 251 | * @nents: number of buffers to map | ||
| 252 | * @dir: DMA transfer direction | ||
| 253 | * | ||
| 254 | * Map a set of buffers described by scatterlist in streaming | ||
| 255 | * mode for DMA. This is the scatter-gather version of the | ||
| 256 | * above dma_map_single interface. Here the scatter gather list | ||
| 257 | * elements are each tagged with the appropriate dma address | ||
| 258 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
| 259 | * | ||
| 260 | * NOTE: An implementation may be able to use a smaller number of | ||
| 261 | * DMA address/length pairs than there are SG table elements. | ||
| 262 | * (for example via virtual mapping capabilities) | ||
| 263 | * The routine returns the number of addr/length pairs actually | ||
| 264 | * used, at most nents. | ||
| 265 | * | ||
| 266 | * Device ownership issues as mentioned above for dma_map_single are | ||
| 267 | * the same here. | ||
| 268 | */ | ||
| 269 | #ifndef CONFIG_DMABOUNCE | ||
| 270 | static inline int | ||
| 271 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
| 272 | enum dma_data_direction dir) | ||
| 273 | { | ||
| 274 | int i; | ||
| 275 | |||
| 276 | for (i = 0; i < nents; i++, sg++) { | ||
| 277 | char *virt; | ||
| 278 | |||
| 279 | sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; | ||
| 280 | virt = sg_virt(sg); | ||
| 281 | |||
| 282 | if (!arch_is_coherent()) | ||
| 283 | dma_cache_maint(virt, sg->length, dir); | ||
| 284 | } | ||
| 285 | |||
| 286 | return nents; | ||
| 287 | } | ||
| 288 | #else | ||
| 289 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
| 290 | #endif | ||
| 291 | |||
| 292 | /** | ||
| 293 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
| 294 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 295 | * @sg: list of buffers | ||
| 296 | * @nents: number of buffers to map | ||
| 297 | * @dir: DMA transfer direction | ||
| 298 | * | ||
| 299 | * Unmap a set of streaming mode DMA translations. | ||
| 300 | * Again, CPU read rules concerning calls here are the same as for | ||
| 301 | * dma_unmap_single() above. | ||
| 302 | */ | ||
| 303 | #ifndef CONFIG_DMABOUNCE | ||
| 304 | static inline void | ||
| 305 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
| 306 | enum dma_data_direction dir) | ||
| 307 | { | ||
| 308 | |||
| 309 | /* nothing to do */ | ||
| 310 | } | ||
| 311 | #else | ||
| 312 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
| 313 | #endif | ||
| 314 | |||
| 315 | |||
| 316 | /** | ||
| 317 | * dma_sync_single_for_cpu | ||
| 318 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 319 | * @handle: DMA address of buffer | ||
| 320 | * @size: size of buffer to map | ||
| 321 | * @dir: DMA transfer direction | ||
| 322 | * | ||
| 323 | * Make physical memory consistent for a single streaming mode DMA | ||
| 324 | * translation after a transfer. | ||
| 325 | * | ||
| 326 | * If you perform a dma_map_single() but wish to interrogate the | ||
| 327 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
| 328 | * mapping, you must call this function before doing so. At the | ||
| 329 | * next point you give the PCI dma address back to the card, you | ||
| 330 | * must first the perform a dma_sync_for_device, and then the | ||
| 331 | * device again owns the buffer. | ||
| 332 | */ | ||
| 333 | #ifndef CONFIG_DMABOUNCE | ||
| 334 | static inline void | ||
| 335 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | ||
| 336 | enum dma_data_direction dir) | ||
| 337 | { | ||
| 338 | if (!arch_is_coherent()) | ||
| 339 | dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); | ||
| 340 | } | ||
| 341 | |||
| 342 | static inline void | ||
| 343 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | ||
| 344 | enum dma_data_direction dir) | ||
| 345 | { | ||
| 346 | if (!arch_is_coherent()) | ||
| 347 | dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); | ||
| 348 | } | ||
| 349 | #else | ||
| 350 | extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); | ||
| 351 | extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction); | ||
| 352 | #endif | ||
| 353 | |||
| 354 | |||
| 355 | /** | ||
| 356 | * dma_sync_sg_for_cpu | ||
| 357 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
| 358 | * @sg: list of buffers | ||
| 359 | * @nents: number of buffers to map | ||
| 360 | * @dir: DMA transfer direction | ||
| 361 | * | ||
| 362 | * Make physical memory consistent for a set of streaming | ||
| 363 | * mode DMA translations after a transfer. | ||
| 364 | * | ||
| 365 | * The same as dma_sync_single_for_* but for a scatter-gather list, | ||
| 366 | * same rules and usage. | ||
| 367 | */ | ||
| 368 | #ifndef CONFIG_DMABOUNCE | ||
| 369 | static inline void | ||
| 370 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
| 371 | enum dma_data_direction dir) | ||
| 372 | { | ||
| 373 | int i; | ||
| 374 | |||
| 375 | for (i = 0; i < nents; i++, sg++) { | ||
| 376 | char *virt = sg_virt(sg); | ||
| 377 | if (!arch_is_coherent()) | ||
| 378 | dma_cache_maint(virt, sg->length, dir); | ||
| 379 | } | ||
| 380 | } | ||
| 381 | |||
| 382 | static inline void | ||
| 383 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | ||
| 384 | enum dma_data_direction dir) | ||
| 385 | { | ||
| 386 | int i; | ||
| 387 | |||
| 388 | for (i = 0; i < nents; i++, sg++) { | ||
| 389 | char *virt = sg_virt(sg); | ||
| 390 | if (!arch_is_coherent()) | ||
| 391 | dma_cache_maint(virt, sg->length, dir); | ||
| 392 | } | ||
| 393 | } | ||
| 394 | #else | ||
| 395 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
| 396 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
| 397 | #endif | ||
| 398 | |||
| 399 | #ifdef CONFIG_DMABOUNCE | ||
| 400 | /* | ||
| 401 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
| 402 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
| 403 | * | ||
| 404 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
| 405 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
| 406 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
| 407 | * | ||
| 408 | * The following are helper functions used by the dmabounce subystem | ||
| 409 | * | ||
| 410 | */ | ||
| 411 | |||
| 412 | /** | ||
| 413 | * dmabounce_register_dev | ||
| 414 | * | ||
| 415 | * @dev: valid struct device pointer | ||
| 416 | * @small_buf_size: size of buffers to use with small buffer pool | ||
| 417 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
| 418 | * | ||
| 419 | * This function should be called by low-level platform code to register | ||
| 420 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
| 421 | * appropriate DMA pools for the device. | ||
| 422 | * | ||
| 423 | */ | ||
| 424 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
| 425 | |||
| 426 | /** | ||
| 427 | * dmabounce_unregister_dev | ||
| 428 | * | ||
| 429 | * @dev: valid struct device pointer | ||
| 430 | * | ||
| 431 | * This function should be called by low-level platform code when device | ||
| 432 | * that was previously registered with dmabounce_register_dev is removed | ||
| 433 | * from the system. | ||
| 434 | * | ||
| 435 | */ | ||
| 436 | extern void dmabounce_unregister_dev(struct device *); | ||
| 437 | |||
| 438 | /** | ||
| 439 | * dma_needs_bounce | ||
| 440 | * | ||
| 441 | * @dev: valid struct device pointer | ||
| 442 | * @dma_handle: dma_handle of unbounced buffer | ||
| 443 | * @size: size of region being mapped | ||
| 444 | * | ||
| 445 | * Platforms that utilize the dmabounce mechanism must implement | ||
| 446 | * this function. | ||
| 447 | * | ||
| 448 | * The dmabounce routines call this function whenever a dma-mapping | ||
| 449 | * is requested to determine whether a given buffer needs to be bounced | ||
| 450 | * or not. The function must return 0 if the buffer is OK for | ||
| 451 | * DMA access and 1 if the buffer needs to be bounced. | ||
| 452 | * | ||
| 453 | */ | ||
| 454 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
| 455 | #endif /* CONFIG_DMABOUNCE */ | ||
| 456 | |||
| 457 | #endif /* __KERNEL__ */ | ||
| 458 | #endif | ||
