diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-25 17:16:22 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 05:40:22 -0400 |
commit | 125ab12acf64ff86b55d20e14db20becd917b7c4 (patch) | |
tree | ce39f81e6a3a409314735ce0cca1e366ea8fd94e | |
parent | 8c8a0ec57ee285ff407e9a64b3a5a37eaf800ad8 (diff) |
[ARM] dma: fix dmabounce dma_sync_xxx() implementations
The dmabounce dma_sync_xxx() implementation have been broken for
quite some time; they all copy data between the DMA buffer and
the CPU visible buffer no irrespective of the change of ownership.
(IOW, a DMA_FROM_DEVICE mapping copies data from the DMA buffer
to the CPU buffer during a call to dma_sync_single_for_device().)
Fix it by getting rid of sync_single(), moving the contents into
the recently created dmabounce_sync_for_xxx() functions and adjusting
appropriately.
This also makes it possible to properly support the DMA range sync
functions.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/common/dmabounce.c | 144 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 26 |
2 files changed, 69 insertions, 101 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1cb880b734df..d4b0c608fdee 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -205,6 +205,21 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * | |||
205 | 205 | ||
206 | /* ************************************************** */ | 206 | /* ************************************************** */ |
207 | 207 | ||
208 | static struct safe_buffer *find_safe_buffer_dev(struct device *dev, | ||
209 | dma_addr_t dma_addr, const char *where) | ||
210 | { | ||
211 | if (!dev || !dev->archdata.dmabounce) | ||
212 | return NULL; | ||
213 | if (dma_mapping_error(dev, dma_addr)) { | ||
214 | if (dev) | ||
215 | dev_err(dev, "Trying to %s invalid mapping\n", where); | ||
216 | else | ||
217 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | ||
218 | return NULL; | ||
219 | } | ||
220 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | ||
221 | } | ||
222 | |||
208 | static inline dma_addr_t | 223 | static inline dma_addr_t |
209 | map_single(struct device *dev, void *ptr, size_t size, | 224 | map_single(struct device *dev, void *ptr, size_t size, |
210 | enum dma_data_direction dir) | 225 | enum dma_data_direction dir) |
@@ -274,19 +289,7 @@ static inline void | |||
274 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 289 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
275 | enum dma_data_direction dir) | 290 | enum dma_data_direction dir) |
276 | { | 291 | { |
277 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 292 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); |
278 | struct safe_buffer *buf = NULL; | ||
279 | |||
280 | /* | ||
281 | * Trying to unmap an invalid mapping | ||
282 | */ | ||
283 | if (dma_mapping_error(dev, dma_addr)) { | ||
284 | dev_err(dev, "Trying to unmap invalid mapping\n"); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | if (device_info) | ||
289 | buf = find_safe_buffer(device_info, dma_addr); | ||
290 | 293 | ||
291 | if (buf) { | 294 | if (buf) { |
292 | BUG_ON(buf->size != size); | 295 | BUG_ON(buf->size != size); |
@@ -296,7 +299,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
296 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 299 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
297 | buf->safe, buf->safe_dma_addr); | 300 | buf->safe, buf->safe_dma_addr); |
298 | 301 | ||
299 | DO_STATS ( device_info->bounce_count++ ); | 302 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
300 | 303 | ||
301 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | 304 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
302 | void *ptr = buf->ptr; | 305 | void *ptr = buf->ptr; |
@@ -317,74 +320,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
317 | dmac_clean_range(ptr, ptr + size); | 320 | dmac_clean_range(ptr, ptr + size); |
318 | outer_clean_range(__pa(ptr), __pa(ptr) + size); | 321 | outer_clean_range(__pa(ptr), __pa(ptr) + size); |
319 | } | 322 | } |
320 | free_safe_buffer(device_info, buf); | 323 | free_safe_buffer(dev->archdata.dmabounce, buf); |
321 | } | ||
322 | } | ||
323 | |||
324 | static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
325 | enum dma_data_direction dir) | ||
326 | { | ||
327 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | ||
328 | struct safe_buffer *buf = NULL; | ||
329 | |||
330 | if (device_info) | ||
331 | buf = find_safe_buffer(device_info, dma_addr); | ||
332 | |||
333 | if (buf) { | ||
334 | /* | ||
335 | * Both of these checks from original code need to be | ||
336 | * commented out b/c some drivers rely on the following: | ||
337 | * | ||
338 | * 1) Drivers may map a large chunk of memory into DMA space | ||
339 | * but only sync a small portion of it. Good example is | ||
340 | * allocating a large buffer, mapping it, and then | ||
341 | * breaking it up into small descriptors. No point | ||
342 | * in syncing the whole buffer if you only have to | ||
343 | * touch one descriptor. | ||
344 | * | ||
345 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | ||
346 | * usually only synced in one dir at a time. | ||
347 | * | ||
348 | * See drivers/net/eepro100.c for examples of both cases. | ||
349 | * | ||
350 | * -ds | ||
351 | * | ||
352 | * BUG_ON(buf->size != size); | ||
353 | * BUG_ON(buf->direction != dir); | ||
354 | */ | ||
355 | |||
356 | dev_dbg(dev, | ||
357 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
358 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
359 | buf->safe, buf->safe_dma_addr); | ||
360 | |||
361 | DO_STATS ( device_info->bounce_count++ ); | ||
362 | |||
363 | switch (dir) { | ||
364 | case DMA_FROM_DEVICE: | ||
365 | dev_dbg(dev, | ||
366 | "%s: copy back safe %p to unsafe %p size %d\n", | ||
367 | __func__, buf->safe, buf->ptr, size); | ||
368 | memcpy(buf->ptr, buf->safe, size); | ||
369 | break; | ||
370 | case DMA_TO_DEVICE: | ||
371 | dev_dbg(dev, | ||
372 | "%s: copy out unsafe %p to safe %p, size %d\n", | ||
373 | __func__,buf->ptr, buf->safe, size); | ||
374 | memcpy(buf->safe, buf->ptr, size); | ||
375 | break; | ||
376 | case DMA_BIDIRECTIONAL: | ||
377 | BUG(); /* is this allowed? what does it mean? */ | ||
378 | default: | ||
379 | BUG(); | ||
380 | } | ||
381 | /* | ||
382 | * No need to sync the safe buffer - it was allocated | ||
383 | * via the coherent allocators. | ||
384 | */ | ||
385 | return 0; | ||
386 | } else { | ||
387 | return 1; | ||
388 | } | 324 | } |
389 | } | 325 | } |
390 | 326 | ||
@@ -447,18 +383,54 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
447 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | 383 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
448 | unsigned long off, size_t sz, enum dma_data_direction dir) | 384 | unsigned long off, size_t sz, enum dma_data_direction dir) |
449 | { | 385 | { |
450 | dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n", | 386 | struct safe_buffer *buf; |
387 | |||
388 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | ||
451 | __func__, addr, off, sz, dir); | 389 | __func__, addr, off, sz, dir); |
452 | return sync_single(dev, addr, off + sz, dir); | 390 | |
391 | buf = find_safe_buffer_dev(dev, addr, __func__); | ||
392 | if (!buf) | ||
393 | return 1; | ||
394 | |||
395 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
396 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
397 | buf->safe, buf->safe_dma_addr); | ||
398 | |||
399 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | ||
400 | |||
401 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | ||
402 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", | ||
403 | __func__, buf->safe + off, buf->ptr + off, sz); | ||
404 | memcpy(buf->ptr + off, buf->safe + off, sz); | ||
405 | } | ||
406 | return 0; | ||
453 | } | 407 | } |
454 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); | 408 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); |
455 | 409 | ||
456 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | 410 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, |
457 | unsigned long off, size_t sz, enum dma_data_direction dir) | 411 | unsigned long off, size_t sz, enum dma_data_direction dir) |
458 | { | 412 | { |
459 | dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n", | 413 | struct safe_buffer *buf; |
414 | |||
415 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | ||
460 | __func__, addr, off, sz, dir); | 416 | __func__, addr, off, sz, dir); |
461 | return sync_single(dev, addr, off + sz, dir); | 417 | |
418 | buf = find_safe_buffer_dev(dev, addr, __func__); | ||
419 | if (!buf) | ||
420 | return 1; | ||
421 | |||
422 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
423 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
424 | buf->safe, buf->safe_dma_addr); | ||
425 | |||
426 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | ||
427 | |||
428 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { | ||
429 | dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", | ||
430 | __func__,buf->ptr + off, buf->safe + off, sz); | ||
431 | memcpy(buf->safe + off, buf->ptr + off, sz); | ||
432 | } | ||
433 | return 0; | ||
462 | } | 434 | } |
463 | EXPORT_SYMBOL(dmabounce_sync_for_device); | 435 | EXPORT_SYMBOL(dmabounce_sync_for_device); |
464 | 436 | ||
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c003ad390def..1204dc958c43 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -242,6 +242,15 @@ extern void dmabounce_unregister_dev(struct device *); | |||
242 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | 242 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | ||
246 | */ | ||
247 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | ||
248 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
249 | unsigned long offset, size_t size, | ||
250 | enum dma_data_direction dir); | ||
251 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
252 | |||
253 | /* | ||
245 | * Private functions | 254 | * Private functions |
246 | */ | 255 | */ |
247 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | 256 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, |
@@ -251,7 +260,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | |||
251 | #else | 260 | #else |
252 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | 261 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) |
253 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | 262 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) |
254 | #endif /* CONFIG_DMABOUNCE */ | ||
255 | 263 | ||
256 | 264 | ||
257 | /** | 265 | /** |
@@ -268,7 +276,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | |||
268 | * can regain ownership by calling dma_unmap_single() or | 276 | * can regain ownership by calling dma_unmap_single() or |
269 | * dma_sync_single_for_cpu(). | 277 | * dma_sync_single_for_cpu(). |
270 | */ | 278 | */ |
271 | #ifndef CONFIG_DMABOUNCE | ||
272 | static inline dma_addr_t | 279 | static inline dma_addr_t |
273 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 280 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
274 | enum dma_data_direction dir) | 281 | enum dma_data_direction dir) |
@@ -278,9 +285,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
278 | 285 | ||
279 | return virt_to_dma(dev, cpu_addr); | 286 | return virt_to_dma(dev, cpu_addr); |
280 | } | 287 | } |
281 | #else | 288 | |
282 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | ||
283 | #endif | ||
284 | 289 | ||
285 | /** | 290 | /** |
286 | * dma_map_page - map a portion of a page for streaming DMA | 291 | * dma_map_page - map a portion of a page for streaming DMA |
@@ -297,7 +302,6 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d | |||
297 | * can regain ownership by calling dma_unmap_page() or | 302 | * can regain ownership by calling dma_unmap_page() or |
298 | * dma_sync_single_for_cpu(). | 303 | * dma_sync_single_for_cpu(). |
299 | */ | 304 | */ |
300 | #ifndef CONFIG_DMABOUNCE | ||
301 | static inline dma_addr_t | 305 | static inline dma_addr_t |
302 | dma_map_page(struct device *dev, struct page *page, | 306 | dma_map_page(struct device *dev, struct page *page, |
303 | unsigned long offset, size_t size, | 307 | unsigned long offset, size_t size, |
@@ -308,11 +312,6 @@ dma_map_page(struct device *dev, struct page *page, | |||
308 | 312 | ||
309 | return page_to_dma(dev, page) + offset; | 313 | return page_to_dma(dev, page) + offset; |
310 | } | 314 | } |
311 | #else | ||
312 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
313 | unsigned long offset, size_t size, | ||
314 | enum dma_data_direction dir); | ||
315 | #endif | ||
316 | 315 | ||
317 | /** | 316 | /** |
318 | * dma_unmap_single - unmap a single buffer previously mapped | 317 | * dma_unmap_single - unmap a single buffer previously mapped |
@@ -328,16 +327,13 @@ extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
328 | * After this call, reads by the CPU to the buffer are guaranteed to see | 327 | * After this call, reads by the CPU to the buffer are guaranteed to see |
329 | * whatever the device wrote there. | 328 | * whatever the device wrote there. |
330 | */ | 329 | */ |
331 | #ifndef CONFIG_DMABOUNCE | ||
332 | static inline void | 330 | static inline void |
333 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | 331 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, |
334 | enum dma_data_direction dir) | 332 | enum dma_data_direction dir) |
335 | { | 333 | { |
336 | /* nothing to do */ | 334 | /* nothing to do */ |
337 | } | 335 | } |
338 | #else | 336 | #endif /* CONFIG_DMABOUNCE */ |
339 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
340 | #endif | ||
341 | 337 | ||
342 | /** | 338 | /** |
343 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | 339 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() |