diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-25 16:52:49 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 05:40:16 -0400 |
commit | 8c8a0ec57ee285ff407e9a64b3a5a37eaf800ad8 (patch) | |
tree | 0f9f00769aba9a43d444eac4435a117a93315137 /arch/arm/include/asm/dma-mapping.h | |
parent | 2638b4dbe768aba023a06acd8e7eba708bb76ee6 (diff) |
[ARM] dma: use new dmabounce_sync_for_xxx() for dma_sync_single_xxx()
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 150 |
1 files changed, 76 insertions, 74 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 29404f71ab87..c003ad390def 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -184,6 +184,76 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |||
184 | void *cpu_addr, dma_addr_t handle, size_t size); | 184 | void *cpu_addr, dma_addr_t handle, size_t size); |
185 | 185 | ||
186 | 186 | ||
187 | #ifdef CONFIG_DMABOUNCE | ||
188 | /* | ||
189 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
190 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
191 | * | ||
192 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
193 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
194 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
195 | * | ||
196 | * The following are helper functions used by the dmabounce subystem | ||
197 | * | ||
198 | */ | ||
199 | |||
200 | /** | ||
201 | * dmabounce_register_dev | ||
202 | * | ||
203 | * @dev: valid struct device pointer | ||
204 | * @small_buf_size: size of buffers to use with small buffer pool | ||
205 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
206 | * | ||
207 | * This function should be called by low-level platform code to register | ||
208 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
209 | * appropriate DMA pools for the device. | ||
210 | * | ||
211 | */ | ||
212 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
213 | |||
214 | /** | ||
215 | * dmabounce_unregister_dev | ||
216 | * | ||
217 | * @dev: valid struct device pointer | ||
218 | * | ||
219 | * This function should be called by low-level platform code when device | ||
220 | * that was previously registered with dmabounce_register_dev is removed | ||
221 | * from the system. | ||
222 | * | ||
223 | */ | ||
224 | extern void dmabounce_unregister_dev(struct device *); | ||
225 | |||
226 | /** | ||
227 | * dma_needs_bounce | ||
228 | * | ||
229 | * @dev: valid struct device pointer | ||
230 | * @dma_handle: dma_handle of unbounced buffer | ||
231 | * @size: size of region being mapped | ||
232 | * | ||
233 | * Platforms that utilize the dmabounce mechanism must implement | ||
234 | * this function. | ||
235 | * | ||
236 | * The dmabounce routines call this function whenever a dma-mapping | ||
237 | * is requested to determine whether a given buffer needs to be bounced | ||
238 | * or not. The function must return 0 if the buffer is OK for | ||
239 | * DMA access and 1 if the buffer needs to be bounced. | ||
240 | * | ||
241 | */ | ||
242 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
243 | |||
244 | /* | ||
245 | * Private functions | ||
246 | */ | ||
247 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
248 | size_t, enum dma_data_direction); | ||
249 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
250 | size_t, enum dma_data_direction); | ||
251 | #else | ||
252 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | ||
253 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | ||
254 | #endif /* CONFIG_DMABOUNCE */ | ||
255 | |||
256 | |||
187 | /** | 257 | /** |
188 | * dma_map_single - map a single buffer for streaming DMA | 258 | * dma_map_single - map a single buffer for streaming DMA |
189 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 259 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
@@ -308,12 +378,14 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | |||
308 | * must first the perform a dma_sync_for_device, and then the | 378 | * must first the perform a dma_sync_for_device, and then the |
309 | * device again owns the buffer. | 379 | * device again owns the buffer. |
310 | */ | 380 | */ |
311 | #ifndef CONFIG_DMABOUNCE | ||
312 | static inline void | 381 | static inline void |
313 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | 382 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, |
314 | unsigned long offset, size_t size, | 383 | unsigned long offset, size_t size, |
315 | enum dma_data_direction dir) | 384 | enum dma_data_direction dir) |
316 | { | 385 | { |
386 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | ||
387 | return; | ||
388 | |||
317 | if (!arch_is_coherent()) | 389 | if (!arch_is_coherent()) |
318 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 390 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
319 | } | 391 | } |
@@ -323,13 +395,12 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | |||
323 | unsigned long offset, size_t size, | 395 | unsigned long offset, size_t size, |
324 | enum dma_data_direction dir) | 396 | enum dma_data_direction dir) |
325 | { | 397 | { |
398 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | ||
399 | return; | ||
400 | |||
326 | if (!arch_is_coherent()) | 401 | if (!arch_is_coherent()) |
327 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 402 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
328 | } | 403 | } |
329 | #else | ||
330 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
331 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
332 | #endif | ||
333 | 404 | ||
334 | static inline void | 405 | static inline void |
335 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | 406 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, |
@@ -354,74 +425,5 @@ extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum d | |||
354 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | 425 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); |
355 | 426 | ||
356 | 427 | ||
357 | #ifdef CONFIG_DMABOUNCE | ||
358 | /* | ||
359 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
360 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
361 | * | ||
362 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
363 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
364 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
365 | * | ||
366 | * The following are helper functions used by the dmabounce subystem | ||
367 | * | ||
368 | */ | ||
369 | |||
370 | /** | ||
371 | * dmabounce_register_dev | ||
372 | * | ||
373 | * @dev: valid struct device pointer | ||
374 | * @small_buf_size: size of buffers to use with small buffer pool | ||
375 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
376 | * | ||
377 | * This function should be called by low-level platform code to register | ||
378 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
379 | * appropriate DMA pools for the device. | ||
380 | * | ||
381 | */ | ||
382 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
383 | |||
384 | /** | ||
385 | * dmabounce_unregister_dev | ||
386 | * | ||
387 | * @dev: valid struct device pointer | ||
388 | * | ||
389 | * This function should be called by low-level platform code when device | ||
390 | * that was previously registered with dmabounce_register_dev is removed | ||
391 | * from the system. | ||
392 | * | ||
393 | */ | ||
394 | extern void dmabounce_unregister_dev(struct device *); | ||
395 | |||
396 | /** | ||
397 | * dma_needs_bounce | ||
398 | * | ||
399 | * @dev: valid struct device pointer | ||
400 | * @dma_handle: dma_handle of unbounced buffer | ||
401 | * @size: size of region being mapped | ||
402 | * | ||
403 | * Platforms that utilize the dmabounce mechanism must implement | ||
404 | * this function. | ||
405 | * | ||
406 | * The dmabounce routines call this function whenever a dma-mapping | ||
407 | * is requested to determine whether a given buffer needs to be bounced | ||
408 | * or not. The function must return 0 if the buffer is OK for | ||
409 | * DMA access and 1 if the buffer needs to be bounced. | ||
410 | * | ||
411 | */ | ||
412 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
413 | |||
414 | /* | ||
415 | * Private functions | ||
416 | */ | ||
417 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
418 | size_t, enum dma_data_direction); | ||
419 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
420 | size_t, enum dma_data_direction); | ||
421 | #else | ||
422 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | ||
423 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | ||
424 | #endif /* CONFIG_DMABOUNCE */ | ||
425 | |||
426 | #endif /* __KERNEL__ */ | 428 | #endif /* __KERNEL__ */ |
427 | #endif | 429 | #endif |