diff options
| author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-02-10 13:55:20 -0500 |
|---|---|---|
| committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-21 09:06:18 -0400 |
| commit | 15237e1f505b3e5c2276f240b01cd2133e110cbc (patch) | |
| tree | 989e8a8580420ad3759a7bab81cd86347a3dadca | |
| parent | 2a550e73d3e5f040a3e8eb733c942ab352eafb36 (diff) | |
ARM: dma-mapping: move all dma bounce code to separate dma ops structure
This patch removes dma bounce hooks from the common dma mapping
implementation on ARM architecture and creates a separate set of
dma_map_ops for dma bounce devices.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
| -rw-r--r-- | arch/arm/common/dmabounce.c | 62 | ||||
| -rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 99 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 79 |
3 files changed, 120 insertions, 120 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 32e9cc6ca7d9..813c29dc6613 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
| @@ -308,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, | |||
| 308 | * substitute the safe buffer for the unsafe one. | 308 | * substitute the safe buffer for the unsafe one. |
| 309 | * (basically move the buffer from an unsafe area to a safe one) | 309 | * (basically move the buffer from an unsafe area to a safe one) |
| 310 | */ | 310 | */ |
| 311 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 311 | static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, |
| 312 | unsigned long offset, size_t size, enum dma_data_direction dir) | 312 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 313 | struct dma_attrs *attrs) | ||
| 313 | { | 314 | { |
| 314 | dma_addr_t dma_addr; | 315 | dma_addr_t dma_addr; |
| 315 | int ret; | 316 | int ret; |
| @@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page, | |||
| 324 | return DMA_ERROR_CODE; | 325 | return DMA_ERROR_CODE; |
| 325 | 326 | ||
| 326 | if (ret == 0) { | 327 | if (ret == 0) { |
| 327 | __dma_page_cpu_to_dev(page, offset, size, dir); | 328 | arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); |
| 328 | return dma_addr; | 329 | return dma_addr; |
| 329 | } | 330 | } |
| 330 | 331 | ||
| @@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page, | |||
| 335 | 336 | ||
| 336 | return map_single(dev, page_address(page) + offset, size, dir); | 337 | return map_single(dev, page_address(page) + offset, size, dir); |
| 337 | } | 338 | } |
| 338 | EXPORT_SYMBOL(__dma_map_page); | ||
| 339 | 339 | ||
| 340 | /* | 340 | /* |
| 341 | * see if a mapped address was really a "safe" buffer and if so, copy | 341 | * see if a mapped address was really a "safe" buffer and if so, copy |
| @@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page); | |||
| 343 | * the safe buffer. (basically return things back to the way they | 343 | * the safe buffer. (basically return things back to the way they |
| 344 | * should be) | 344 | * should be) |
| 345 | */ | 345 | */ |
| 346 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 346 | static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 347 | enum dma_data_direction dir) | 347 | enum dma_data_direction dir, struct dma_attrs *attrs) |
| 348 | { | 348 | { |
| 349 | struct safe_buffer *buf; | 349 | struct safe_buffer *buf; |
| 350 | 350 | ||
| @@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 353 | 353 | ||
| 354 | buf = find_safe_buffer_dev(dev, dma_addr, __func__); | 354 | buf = find_safe_buffer_dev(dev, dma_addr, __func__); |
| 355 | if (!buf) { | 355 | if (!buf) { |
| 356 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), | 356 | arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir); |
| 357 | dma_addr & ~PAGE_MASK, size, dir); | ||
| 358 | return; | 357 | return; |
| 359 | } | 358 | } |
| 360 | 359 | ||
| 361 | unmap_single(dev, buf, size, dir); | 360 | unmap_single(dev, buf, size, dir); |
| 362 | } | 361 | } |
| 363 | EXPORT_SYMBOL(__dma_unmap_page); | ||
| 364 | 362 | ||
| 365 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | 363 | static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
| 366 | size_t sz, enum dma_data_direction dir) | 364 | size_t sz, enum dma_data_direction dir) |
| 367 | { | 365 | { |
| 368 | struct safe_buffer *buf; | 366 | struct safe_buffer *buf; |
| @@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
| 392 | } | 390 | } |
| 393 | return 0; | 391 | return 0; |
| 394 | } | 392 | } |
| 395 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); | ||
| 396 | 393 | ||
| 397 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | 394 | static void dmabounce_sync_for_cpu(struct device *dev, |
| 395 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 396 | { | ||
| 397 | if (!__dmabounce_sync_for_cpu(dev, handle, size, dir)) | ||
| 398 | return; | ||
| 399 | |||
| 400 | arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir); | ||
| 401 | } | ||
| 402 | |||
| 403 | static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | ||
| 398 | size_t sz, enum dma_data_direction dir) | 404 | size_t sz, enum dma_data_direction dir) |
| 399 | { | 405 | { |
| 400 | struct safe_buffer *buf; | 406 | struct safe_buffer *buf; |
| @@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
| 424 | } | 430 | } |
| 425 | return 0; | 431 | return 0; |
| 426 | } | 432 | } |
| 427 | EXPORT_SYMBOL(dmabounce_sync_for_device); | 433 | |
| 434 | static void dmabounce_sync_for_device(struct device *dev, | ||
| 435 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 436 | { | ||
| 437 | if (!__dmabounce_sync_for_device(dev, handle, size, dir)) | ||
| 438 | return; | ||
| 439 | |||
| 440 | arm_dma_ops.sync_single_for_device(dev, handle, size, dir); | ||
| 441 | } | ||
| 442 | |||
| 443 | static int dmabounce_set_mask(struct device *dev, u64 dma_mask) | ||
| 444 | { | ||
| 445 | if (dev->archdata.dmabounce) | ||
| 446 | return 0; | ||
| 447 | |||
| 448 | return arm_dma_ops.set_dma_mask(dev, dma_mask); | ||
| 449 | } | ||
| 450 | |||
| 451 | static struct dma_map_ops dmabounce_ops = { | ||
| 452 | .map_page = dmabounce_map_page, | ||
| 453 | .unmap_page = dmabounce_unmap_page, | ||
| 454 | .sync_single_for_cpu = dmabounce_sync_for_cpu, | ||
| 455 | .sync_single_for_device = dmabounce_sync_for_device, | ||
| 456 | .map_sg = arm_dma_map_sg, | ||
| 457 | .unmap_sg = arm_dma_unmap_sg, | ||
| 458 | .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, | ||
| 459 | .sync_sg_for_device = arm_dma_sync_sg_for_device, | ||
| 460 | .set_dma_mask = dmabounce_set_mask, | ||
| 461 | }; | ||
| 428 | 462 | ||
| 429 | static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, | 463 | static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, |
| 430 | const char *name, unsigned long size) | 464 | const char *name, unsigned long size) |
| @@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
| 486 | #endif | 520 | #endif |
| 487 | 521 | ||
| 488 | dev->archdata.dmabounce = device_info; | 522 | dev->archdata.dmabounce = device_info; |
| 523 | set_dma_ops(dev, &dmabounce_ops); | ||
| 489 | 524 | ||
| 490 | dev_info(dev, "dmabounce: registered device\n"); | 525 | dev_info(dev, "dmabounce: registered device\n"); |
| 491 | 526 | ||
| @@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct device *dev) | |||
| 504 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 539 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
| 505 | 540 | ||
| 506 | dev->archdata.dmabounce = NULL; | 541 | dev->archdata.dmabounce = NULL; |
| 542 | set_dma_ops(dev, NULL); | ||
| 507 | 543 | ||
| 508 | if (!device_info) { | 544 | if (!device_info) { |
| 509 | dev_warn(dev, | 545 | dev_warn(dev, |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 6725a08a5c21..7a7c3c762f5f 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
| @@ -85,62 +85,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |||
| 85 | #endif | 85 | #endif |
| 86 | 86 | ||
| 87 | /* | 87 | /* |
| 88 | * The DMA API is built upon the notion of "buffer ownership". A buffer | ||
| 89 | * is either exclusively owned by the CPU (and therefore may be accessed | ||
| 90 | * by it) or exclusively owned by the DMA device. These helper functions | ||
| 91 | * represent the transitions between these two ownership states. | ||
| 92 | * | ||
| 93 | * Note, however, that on later ARMs, this notion does not work due to | ||
| 94 | * speculative prefetches. We model our approach on the assumption that | ||
| 95 | * the CPU does do speculative prefetches, which means we clean caches | ||
| 96 | * before transfers and delay cache invalidation until transfer completion. | ||
| 97 | * | ||
| 98 | * Private support functions: these are not part of the API and are | ||
| 99 | * liable to change. Drivers must not use these. | ||
| 100 | */ | ||
| 101 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
| 102 | enum dma_data_direction dir) | ||
| 103 | { | ||
| 104 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
| 105 | enum dma_data_direction); | ||
| 106 | |||
| 107 | if (!arch_is_coherent()) | ||
| 108 | ___dma_single_cpu_to_dev(kaddr, size, dir); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
| 112 | enum dma_data_direction dir) | ||
| 113 | { | ||
| 114 | extern void ___dma_single_dev_to_cpu(const void *, size_t, | ||
| 115 | enum dma_data_direction); | ||
| 116 | |||
| 117 | if (!arch_is_coherent()) | ||
| 118 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
| 122 | size_t size, enum dma_data_direction dir) | ||
| 123 | { | ||
| 124 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
| 125 | size_t, enum dma_data_direction); | ||
| 126 | |||
| 127 | if (!arch_is_coherent()) | ||
| 128 | ___dma_page_cpu_to_dev(page, off, size, dir); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
| 132 | size_t size, enum dma_data_direction dir) | ||
| 133 | { | ||
| 134 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | ||
| 135 | size_t, enum dma_data_direction); | ||
| 136 | |||
| 137 | if (!arch_is_coherent()) | ||
| 138 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
| 139 | } | ||
| 140 | |||
| 141 | extern int dma_supported(struct device *, u64); | ||
| 142 | extern int dma_set_mask(struct device *, u64); | ||
| 143 | /* | ||
| 144 | * DMA errors are defined by all-bits-set in the DMA address. | 88 | * DMA errors are defined by all-bits-set in the DMA address. |
| 145 | */ | 89 | */ |
| 146 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 90 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| @@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, | |||
| 163 | { | 107 | { |
| 164 | } | 108 | } |
| 165 | 109 | ||
| 110 | extern int dma_supported(struct device *dev, u64 mask); | ||
| 111 | |||
| 166 | /** | 112 | /** |
| 167 | * dma_alloc_coherent - allocate consistent memory for DMA | 113 | * dma_alloc_coherent - allocate consistent memory for DMA |
| 168 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 114 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| @@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | |||
| 235 | extern void __init init_consistent_dma_size(unsigned long size); | 181 | extern void __init init_consistent_dma_size(unsigned long size); |
| 236 | 182 | ||
| 237 | 183 | ||
| 238 | #ifdef CONFIG_DMABOUNCE | ||
| 239 | /* | 184 | /* |
| 240 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | 185 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
| 241 | * and utilize bounce buffers as needed to work around limited DMA windows. | 186 | * and utilize bounce buffers as needed to work around limited DMA windows. |
| @@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long, | |||
| 275 | */ | 220 | */ |
| 276 | extern void dmabounce_unregister_dev(struct device *); | 221 | extern void dmabounce_unregister_dev(struct device *); |
| 277 | 222 | ||
| 278 | /* | ||
| 279 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | ||
| 280 | */ | ||
| 281 | extern dma_addr_t __dma_map_page(struct device *, struct page *, | ||
| 282 | unsigned long, size_t, enum dma_data_direction); | ||
| 283 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, | ||
| 284 | enum dma_data_direction); | ||
| 285 | |||
| 286 | /* | ||
| 287 | * Private functions | ||
| 288 | */ | ||
| 289 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
| 290 | int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
| 291 | #else | ||
| 292 | static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, | ||
| 293 | size_t size, enum dma_data_direction dir) | ||
| 294 | { | ||
| 295 | return 1; | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | ||
| 299 | size_t size, enum dma_data_direction dir) | ||
| 300 | { | ||
| 301 | return 1; | ||
| 302 | } | ||
| 303 | |||
| 304 | 223 | ||
| 305 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | ||
| 306 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
| 307 | { | ||
| 308 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
| 309 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
| 310 | } | ||
| 311 | |||
| 312 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
| 313 | size_t size, enum dma_data_direction dir) | ||
| 314 | { | ||
| 315 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
| 316 | handle & ~PAGE_MASK, size, dir); | ||
| 317 | } | ||
| 318 | #endif /* CONFIG_DMABOUNCE */ | ||
| 319 | 224 | ||
| 320 | /* | 225 | /* |
| 321 | * The scatter list versions of the above methods. | 226 | * The scatter list versions of the above methods. |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b50fa578df81..c94966891dee 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -29,6 +29,75 @@ | |||
| 29 | 29 | ||
| 30 | #include "mm.h" | 30 | #include "mm.h" |
| 31 | 31 | ||
| 32 | /* | ||
| 33 | * The DMA API is built upon the notion of "buffer ownership". A buffer | ||
| 34 | * is either exclusively owned by the CPU (and therefore may be accessed | ||
| 35 | * by it) or exclusively owned by the DMA device. These helper functions | ||
| 36 | * represent the transitions between these two ownership states. | ||
| 37 | * | ||
| 38 | * Note, however, that on later ARMs, this notion does not work due to | ||
| 39 | * speculative prefetches. We model our approach on the assumption that | ||
| 40 | * the CPU does do speculative prefetches, which means we clean caches | ||
| 41 | * before transfers and delay cache invalidation until transfer completion. | ||
| 42 | * | ||
| 43 | * Private support functions: these are not part of the API and are | ||
| 44 | * liable to change. Drivers must not use these. | ||
| 45 | */ | ||
| 46 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
| 47 | enum dma_data_direction dir) | ||
| 48 | { | ||
| 49 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
| 50 | enum dma_data_direction); | ||
| 51 | |||
| 52 | if (!arch_is_coherent()) | ||
| 53 | ___dma_single_cpu_to_dev(kaddr, size, dir); | ||
| 54 | } | ||
| 55 | |||
| 56 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
| 57 | enum dma_data_direction dir) | ||
| 58 | { | ||
| 59 | extern void ___dma_single_dev_to_cpu(const void *, size_t, | ||
| 60 | enum dma_data_direction); | ||
| 61 | |||
| 62 | if (!arch_is_coherent()) | ||
| 63 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
| 67 | size_t size, enum dma_data_direction dir) | ||
| 68 | { | ||
| 69 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
| 70 | size_t, enum dma_data_direction); | ||
| 71 | |||
| 72 | if (!arch_is_coherent()) | ||
| 73 | ___dma_page_cpu_to_dev(page, off, size, dir); | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
| 77 | size_t size, enum dma_data_direction dir) | ||
| 78 | { | ||
| 79 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | ||
| 80 | size_t, enum dma_data_direction); | ||
| 81 | |||
| 82 | if (!arch_is_coherent()) | ||
| 83 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
| 84 | } | ||
| 85 | |||
| 86 | |||
| 87 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | ||
| 88 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
| 89 | { | ||
| 90 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
| 91 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
| 95 | size_t size, enum dma_data_direction dir) | ||
| 96 | { | ||
| 97 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
| 98 | handle & ~PAGE_MASK, size, dir); | ||
| 99 | } | ||
| 100 | |||
| 32 | /** | 101 | /** |
| 33 | * arm_dma_map_page - map a portion of a page for streaming DMA | 102 | * arm_dma_map_page - map a portion of a page for streaming DMA |
| 34 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 103 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| @@ -76,9 +145,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev, | |||
| 76 | { | 145 | { |
| 77 | unsigned int offset = handle & (PAGE_SIZE - 1); | 146 | unsigned int offset = handle & (PAGE_SIZE - 1); |
| 78 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 147 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
| 79 | if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) | ||
| 80 | return; | ||
| 81 | |||
| 82 | __dma_page_dev_to_cpu(page, offset, size, dir); | 148 | __dma_page_dev_to_cpu(page, offset, size, dir); |
| 83 | } | 149 | } |
| 84 | 150 | ||
| @@ -87,9 +153,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev, | |||
| 87 | { | 153 | { |
| 88 | unsigned int offset = handle & (PAGE_SIZE - 1); | 154 | unsigned int offset = handle & (PAGE_SIZE - 1); |
| 89 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 155 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
| 90 | if (!dmabounce_sync_for_device(dev, handle, size, dir)) | ||
| 91 | return; | ||
| 92 | |||
| 93 | __dma_page_cpu_to_dev(page, offset, size, dir); | 156 | __dma_page_cpu_to_dev(page, offset, size, dir); |
| 94 | } | 157 | } |
| 95 | 158 | ||
| @@ -599,7 +662,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | |||
| 599 | } | 662 | } |
| 600 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 663 | /* FIXME: non-speculating: flush on bidirectional mappings? */ |
| 601 | } | 664 | } |
| 602 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | ||
| 603 | 665 | ||
| 604 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | 666 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, |
| 605 | size_t size, enum dma_data_direction dir) | 667 | size_t size, enum dma_data_direction dir) |
| @@ -619,7 +681,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
| 619 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 681 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) |
| 620 | set_bit(PG_dcache_clean, &page->flags); | 682 | set_bit(PG_dcache_clean, &page->flags); |
| 621 | } | 683 | } |
| 622 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | ||
| 623 | 684 | ||
| 624 | /** | 685 | /** |
| 625 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA | 686 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA |
| @@ -737,9 +798,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask) | |||
| 737 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 798 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
| 738 | return -EIO; | 799 | return -EIO; |
| 739 | 800 | ||
| 740 | #ifndef CONFIG_DMABOUNCE | ||
| 741 | *dev->dma_mask = dma_mask; | 801 | *dev->dma_mask = dma_mask; |
| 742 | #endif | ||
| 743 | 802 | ||
| 744 | return 0; | 803 | return 0; |
| 745 | } | 804 | } |
