diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-25 17:16:22 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 05:40:22 -0400 |
commit | 125ab12acf64ff86b55d20e14db20becd917b7c4 (patch) | |
tree | ce39f81e6a3a409314735ce0cca1e366ea8fd94e /arch/arm/common | |
parent | 8c8a0ec57ee285ff407e9a64b3a5a37eaf800ad8 (diff) |
[ARM] dma: fix dmabounce dma_sync_xxx() implementations
The dmabounce dma_sync_xxx() implementation have been broken for
quite some time; they all copy data between the DMA buffer and
the CPU visible buffer no irrespective of the change of ownership.
(IOW, a DMA_FROM_DEVICE mapping copies data from the DMA buffer
to the CPU buffer during a call to dma_sync_single_for_device().)
Fix it by getting rid of sync_single(), moving the contents into
the recently created dmabounce_sync_for_xxx() functions and adjusting
appropriately.
This also makes it possible to properly support the DMA range sync
functions.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/dmabounce.c | 144 |
1 files changed, 58 insertions, 86 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1cb880b734df..d4b0c608fdee 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -205,6 +205,21 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * | |||
205 | 205 | ||
206 | /* ************************************************** */ | 206 | /* ************************************************** */ |
207 | 207 | ||
208 | static struct safe_buffer *find_safe_buffer_dev(struct device *dev, | ||
209 | dma_addr_t dma_addr, const char *where) | ||
210 | { | ||
211 | if (!dev || !dev->archdata.dmabounce) | ||
212 | return NULL; | ||
213 | if (dma_mapping_error(dev, dma_addr)) { | ||
214 | if (dev) | ||
215 | dev_err(dev, "Trying to %s invalid mapping\n", where); | ||
216 | else | ||
217 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | ||
218 | return NULL; | ||
219 | } | ||
220 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | ||
221 | } | ||
222 | |||
208 | static inline dma_addr_t | 223 | static inline dma_addr_t |
209 | map_single(struct device *dev, void *ptr, size_t size, | 224 | map_single(struct device *dev, void *ptr, size_t size, |
210 | enum dma_data_direction dir) | 225 | enum dma_data_direction dir) |
@@ -274,19 +289,7 @@ static inline void | |||
274 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 289 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
275 | enum dma_data_direction dir) | 290 | enum dma_data_direction dir) |
276 | { | 291 | { |
277 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 292 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); |
278 | struct safe_buffer *buf = NULL; | ||
279 | |||
280 | /* | ||
281 | * Trying to unmap an invalid mapping | ||
282 | */ | ||
283 | if (dma_mapping_error(dev, dma_addr)) { | ||
284 | dev_err(dev, "Trying to unmap invalid mapping\n"); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | if (device_info) | ||
289 | buf = find_safe_buffer(device_info, dma_addr); | ||
290 | 293 | ||
291 | if (buf) { | 294 | if (buf) { |
292 | BUG_ON(buf->size != size); | 295 | BUG_ON(buf->size != size); |
@@ -296,7 +299,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
296 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 299 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
297 | buf->safe, buf->safe_dma_addr); | 300 | buf->safe, buf->safe_dma_addr); |
298 | 301 | ||
299 | DO_STATS ( device_info->bounce_count++ ); | 302 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
300 | 303 | ||
301 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | 304 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
302 | void *ptr = buf->ptr; | 305 | void *ptr = buf->ptr; |
@@ -317,74 +320,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
317 | dmac_clean_range(ptr, ptr + size); | 320 | dmac_clean_range(ptr, ptr + size); |
318 | outer_clean_range(__pa(ptr), __pa(ptr) + size); | 321 | outer_clean_range(__pa(ptr), __pa(ptr) + size); |
319 | } | 322 | } |
320 | free_safe_buffer(device_info, buf); | 323 | free_safe_buffer(dev->archdata.dmabounce, buf); |
321 | } | ||
322 | } | ||
323 | |||
324 | static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
325 | enum dma_data_direction dir) | ||
326 | { | ||
327 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | ||
328 | struct safe_buffer *buf = NULL; | ||
329 | |||
330 | if (device_info) | ||
331 | buf = find_safe_buffer(device_info, dma_addr); | ||
332 | |||
333 | if (buf) { | ||
334 | /* | ||
335 | * Both of these checks from original code need to be | ||
336 | * commented out b/c some drivers rely on the following: | ||
337 | * | ||
338 | * 1) Drivers may map a large chunk of memory into DMA space | ||
339 | * but only sync a small portion of it. Good example is | ||
340 | * allocating a large buffer, mapping it, and then | ||
341 | * breaking it up into small descriptors. No point | ||
342 | * in syncing the whole buffer if you only have to | ||
343 | * touch one descriptor. | ||
344 | * | ||
345 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | ||
346 | * usually only synced in one dir at a time. | ||
347 | * | ||
348 | * See drivers/net/eepro100.c for examples of both cases. | ||
349 | * | ||
350 | * -ds | ||
351 | * | ||
352 | * BUG_ON(buf->size != size); | ||
353 | * BUG_ON(buf->direction != dir); | ||
354 | */ | ||
355 | |||
356 | dev_dbg(dev, | ||
357 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
358 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
359 | buf->safe, buf->safe_dma_addr); | ||
360 | |||
361 | DO_STATS ( device_info->bounce_count++ ); | ||
362 | |||
363 | switch (dir) { | ||
364 | case DMA_FROM_DEVICE: | ||
365 | dev_dbg(dev, | ||
366 | "%s: copy back safe %p to unsafe %p size %d\n", | ||
367 | __func__, buf->safe, buf->ptr, size); | ||
368 | memcpy(buf->ptr, buf->safe, size); | ||
369 | break; | ||
370 | case DMA_TO_DEVICE: | ||
371 | dev_dbg(dev, | ||
372 | "%s: copy out unsafe %p to safe %p, size %d\n", | ||
373 | __func__,buf->ptr, buf->safe, size); | ||
374 | memcpy(buf->safe, buf->ptr, size); | ||
375 | break; | ||
376 | case DMA_BIDIRECTIONAL: | ||
377 | BUG(); /* is this allowed? what does it mean? */ | ||
378 | default: | ||
379 | BUG(); | ||
380 | } | ||
381 | /* | ||
382 | * No need to sync the safe buffer - it was allocated | ||
383 | * via the coherent allocators. | ||
384 | */ | ||
385 | return 0; | ||
386 | } else { | ||
387 | return 1; | ||
388 | } | 324 | } |
389 | } | 325 | } |
390 | 326 | ||
@@ -447,18 +383,54 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
447 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | 383 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
448 | unsigned long off, size_t sz, enum dma_data_direction dir) | 384 | unsigned long off, size_t sz, enum dma_data_direction dir) |
449 | { | 385 | { |
450 | dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n", | 386 | struct safe_buffer *buf; |
387 | |||
388 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | ||
451 | __func__, addr, off, sz, dir); | 389 | __func__, addr, off, sz, dir); |
452 | return sync_single(dev, addr, off + sz, dir); | 390 | |
391 | buf = find_safe_buffer_dev(dev, addr, __func__); | ||
392 | if (!buf) | ||
393 | return 1; | ||
394 | |||
395 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
396 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
397 | buf->safe, buf->safe_dma_addr); | ||
398 | |||
399 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | ||
400 | |||
401 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | ||
402 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", | ||
403 | __func__, buf->safe + off, buf->ptr + off, sz); | ||
404 | memcpy(buf->ptr + off, buf->safe + off, sz); | ||
405 | } | ||
406 | return 0; | ||
453 | } | 407 | } |
454 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); | 408 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); |
455 | 409 | ||
456 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | 410 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, |
457 | unsigned long off, size_t sz, enum dma_data_direction dir) | 411 | unsigned long off, size_t sz, enum dma_data_direction dir) |
458 | { | 412 | { |
459 | dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n", | 413 | struct safe_buffer *buf; |
414 | |||
415 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | ||
460 | __func__, addr, off, sz, dir); | 416 | __func__, addr, off, sz, dir); |
461 | return sync_single(dev, addr, off + sz, dir); | 417 | |
418 | buf = find_safe_buffer_dev(dev, addr, __func__); | ||
419 | if (!buf) | ||
420 | return 1; | ||
421 | |||
422 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
423 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
424 | buf->safe, buf->safe_dma_addr); | ||
425 | |||
426 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | ||
427 | |||
428 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { | ||
429 | dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", | ||
430 | __func__,buf->ptr + off, buf->safe + off, sz); | ||
431 | memcpy(buf->safe + off, buf->ptr + off, sz); | ||
432 | } | ||
433 | return 0; | ||
462 | } | 434 | } |
463 | EXPORT_SYMBOL(dmabounce_sync_for_device); | 435 | EXPORT_SYMBOL(dmabounce_sync_for_device); |
464 | 436 | ||