diff options
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/common/dmabounce.c | 24 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 150 |
2 files changed, 76 insertions, 98 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 0a98148279b1..1cb880b734df 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -444,30 +444,6 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
444 | unmap_single(dev, dma_addr, size, dir); | 444 | unmap_single(dev, dma_addr, size, dir); |
445 | } | 445 | } |
446 | 446 | ||
447 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr, | ||
448 | unsigned long offset, size_t size, | ||
449 | enum dma_data_direction dir) | ||
450 | { | ||
451 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n", | ||
452 | __func__, dma_addr, offset, size, dir); | ||
453 | |||
454 | if (sync_single(dev, dma_addr, offset + size, dir)) | ||
455 | dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir); | ||
456 | } | ||
457 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
458 | |||
459 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr, | ||
460 | unsigned long offset, size_t size, | ||
461 | enum dma_data_direction dir) | ||
462 | { | ||
463 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n", | ||
464 | __func__, dma_addr, offset, size, dir); | ||
465 | |||
466 | if (sync_single(dev, dma_addr, offset + size, dir)) | ||
467 | dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir); | ||
468 | } | ||
469 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
470 | |||
471 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | 447 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
472 | unsigned long off, size_t sz, enum dma_data_direction dir) | 448 | unsigned long off, size_t sz, enum dma_data_direction dir) |
473 | { | 449 | { |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 29404f71ab87..c003ad390def 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -184,6 +184,76 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |||
184 | void *cpu_addr, dma_addr_t handle, size_t size); | 184 | void *cpu_addr, dma_addr_t handle, size_t size); |
185 | 185 | ||
186 | 186 | ||
187 | #ifdef CONFIG_DMABOUNCE | ||
188 | /* | ||
189 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
190 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
191 | * | ||
192 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
193 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
194 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
195 | * | ||
196 | * The following are helper functions used by the dmabounce subystem | ||
197 | * | ||
198 | */ | ||
199 | |||
200 | /** | ||
201 | * dmabounce_register_dev | ||
202 | * | ||
203 | * @dev: valid struct device pointer | ||
204 | * @small_buf_size: size of buffers to use with small buffer pool | ||
205 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
206 | * | ||
207 | * This function should be called by low-level platform code to register | ||
208 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
209 | * appropriate DMA pools for the device. | ||
210 | * | ||
211 | */ | ||
212 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
213 | |||
214 | /** | ||
215 | * dmabounce_unregister_dev | ||
216 | * | ||
217 | * @dev: valid struct device pointer | ||
218 | * | ||
219 | * This function should be called by low-level platform code when device | ||
220 | * that was previously registered with dmabounce_register_dev is removed | ||
221 | * from the system. | ||
222 | * | ||
223 | */ | ||
224 | extern void dmabounce_unregister_dev(struct device *); | ||
225 | |||
226 | /** | ||
227 | * dma_needs_bounce | ||
228 | * | ||
229 | * @dev: valid struct device pointer | ||
230 | * @dma_handle: dma_handle of unbounced buffer | ||
231 | * @size: size of region being mapped | ||
232 | * | ||
233 | * Platforms that utilize the dmabounce mechanism must implement | ||
234 | * this function. | ||
235 | * | ||
236 | * The dmabounce routines call this function whenever a dma-mapping | ||
237 | * is requested to determine whether a given buffer needs to be bounced | ||
238 | * or not. The function must return 0 if the buffer is OK for | ||
239 | * DMA access and 1 if the buffer needs to be bounced. | ||
240 | * | ||
241 | */ | ||
242 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
243 | |||
244 | /* | ||
245 | * Private functions | ||
246 | */ | ||
247 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
248 | size_t, enum dma_data_direction); | ||
249 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
250 | size_t, enum dma_data_direction); | ||
251 | #else | ||
252 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | ||
253 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | ||
254 | #endif /* CONFIG_DMABOUNCE */ | ||
255 | |||
256 | |||
187 | /** | 257 | /** |
188 | * dma_map_single - map a single buffer for streaming DMA | 258 | * dma_map_single - map a single buffer for streaming DMA |
189 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 259 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
@@ -308,12 +378,14 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | |||
308 | * must first the perform a dma_sync_for_device, and then the | 378 | * must first the perform a dma_sync_for_device, and then the |
309 | * device again owns the buffer. | 379 | * device again owns the buffer. |
310 | */ | 380 | */ |
311 | #ifndef CONFIG_DMABOUNCE | ||
312 | static inline void | 381 | static inline void |
313 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | 382 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, |
314 | unsigned long offset, size_t size, | 383 | unsigned long offset, size_t size, |
315 | enum dma_data_direction dir) | 384 | enum dma_data_direction dir) |
316 | { | 385 | { |
386 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | ||
387 | return; | ||
388 | |||
317 | if (!arch_is_coherent()) | 389 | if (!arch_is_coherent()) |
318 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 390 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
319 | } | 391 | } |
@@ -323,13 +395,12 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | |||
323 | unsigned long offset, size_t size, | 395 | unsigned long offset, size_t size, |
324 | enum dma_data_direction dir) | 396 | enum dma_data_direction dir) |
325 | { | 397 | { |
398 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | ||
399 | return; | ||
400 | |||
326 | if (!arch_is_coherent()) | 401 | if (!arch_is_coherent()) |
327 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 402 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
328 | } | 403 | } |
329 | #else | ||
330 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
331 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
332 | #endif | ||
333 | 404 | ||
334 | static inline void | 405 | static inline void |
335 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | 406 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, |
@@ -354,74 +425,5 @@ extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum d | |||
354 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | 425 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); |
355 | 426 | ||
356 | 427 | ||
357 | #ifdef CONFIG_DMABOUNCE | ||
358 | /* | ||
359 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
360 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
361 | * | ||
362 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
363 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
364 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
365 | * | ||
366 | * The following are helper functions used by the dmabounce subystem | ||
367 | * | ||
368 | */ | ||
369 | |||
370 | /** | ||
371 | * dmabounce_register_dev | ||
372 | * | ||
373 | * @dev: valid struct device pointer | ||
374 | * @small_buf_size: size of buffers to use with small buffer pool | ||
375 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
376 | * | ||
377 | * This function should be called by low-level platform code to register | ||
378 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
379 | * appropriate DMA pools for the device. | ||
380 | * | ||
381 | */ | ||
382 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
383 | |||
384 | /** | ||
385 | * dmabounce_unregister_dev | ||
386 | * | ||
387 | * @dev: valid struct device pointer | ||
388 | * | ||
389 | * This function should be called by low-level platform code when device | ||
390 | * that was previously registered with dmabounce_register_dev is removed | ||
391 | * from the system. | ||
392 | * | ||
393 | */ | ||
394 | extern void dmabounce_unregister_dev(struct device *); | ||
395 | |||
396 | /** | ||
397 | * dma_needs_bounce | ||
398 | * | ||
399 | * @dev: valid struct device pointer | ||
400 | * @dma_handle: dma_handle of unbounced buffer | ||
401 | * @size: size of region being mapped | ||
402 | * | ||
403 | * Platforms that utilize the dmabounce mechanism must implement | ||
404 | * this function. | ||
405 | * | ||
406 | * The dmabounce routines call this function whenever a dma-mapping | ||
407 | * is requested to determine whether a given buffer needs to be bounced | ||
408 | * or not. The function must return 0 if the buffer is OK for | ||
409 | * DMA access and 1 if the buffer needs to be bounced. | ||
410 | * | ||
411 | */ | ||
412 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
413 | |||
414 | /* | ||
415 | * Private functions | ||
416 | */ | ||
417 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
418 | size_t, enum dma_data_direction); | ||
419 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
420 | size_t, enum dma_data_direction); | ||
421 | #else | ||
422 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | ||
423 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | ||
424 | #endif /* CONFIG_DMABOUNCE */ | ||
425 | |||
426 | #endif /* __KERNEL__ */ | 428 | #endif /* __KERNEL__ */ |
427 | #endif | 429 | #endif |