diff options
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r-- | arch/arm/common/dmabounce.c | 287 |
1 files changed, 84 insertions, 203 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index aecc6c3f908f..f030f0775be7 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -154,9 +154,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |||
154 | #endif | 154 | #endif |
155 | 155 | ||
156 | write_lock_irqsave(&device_info->lock, flags); | 156 | write_lock_irqsave(&device_info->lock, flags); |
157 | |||
158 | list_add(&buf->node, &device_info->safe_buffers); | 157 | list_add(&buf->node, &device_info->safe_buffers); |
159 | |||
160 | write_unlock_irqrestore(&device_info->lock, flags); | 158 | write_unlock_irqrestore(&device_info->lock, flags); |
161 | 159 | ||
162 | return buf; | 160 | return buf; |
@@ -205,8 +203,22 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * | |||
205 | 203 | ||
206 | /* ************************************************** */ | 204 | /* ************************************************** */ |
207 | 205 | ||
208 | static inline dma_addr_t | 206 | static struct safe_buffer *find_safe_buffer_dev(struct device *dev, |
209 | map_single(struct device *dev, void *ptr, size_t size, | 207 | dma_addr_t dma_addr, const char *where) |
208 | { | ||
209 | if (!dev || !dev->archdata.dmabounce) | ||
210 | return NULL; | ||
211 | if (dma_mapping_error(dev, dma_addr)) { | ||
212 | if (dev) | ||
213 | dev_err(dev, "Trying to %s invalid mapping\n", where); | ||
214 | else | ||
215 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | ||
216 | return NULL; | ||
217 | } | ||
218 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | ||
219 | } | ||
220 | |||
221 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, | ||
210 | enum dma_data_direction dir) | 222 | enum dma_data_direction dir) |
211 | { | 223 | { |
212 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 224 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
@@ -270,33 +282,21 @@ map_single(struct device *dev, void *ptr, size_t size, | |||
270 | return dma_addr; | 282 | return dma_addr; |
271 | } | 283 | } |
272 | 284 | ||
273 | static inline void | 285 | static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, |
274 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 286 | size_t size, enum dma_data_direction dir) |
275 | enum dma_data_direction dir) | ||
276 | { | 287 | { |
277 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 288 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); |
278 | struct safe_buffer *buf = NULL; | ||
279 | |||
280 | /* | ||
281 | * Trying to unmap an invalid mapping | ||
282 | */ | ||
283 | if (dma_mapping_error(dev, dma_addr)) { | ||
284 | dev_err(dev, "Trying to unmap invalid mapping\n"); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | if (device_info) | ||
289 | buf = find_safe_buffer(device_info, dma_addr); | ||
290 | 289 | ||
291 | if (buf) { | 290 | if (buf) { |
292 | BUG_ON(buf->size != size); | 291 | BUG_ON(buf->size != size); |
292 | BUG_ON(buf->direction != dir); | ||
293 | 293 | ||
294 | dev_dbg(dev, | 294 | dev_dbg(dev, |
295 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 295 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
296 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 296 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
297 | buf->safe, buf->safe_dma_addr); | 297 | buf->safe, buf->safe_dma_addr); |
298 | 298 | ||
299 | DO_STATS ( device_info->bounce_count++ ); | 299 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
300 | 300 | ||
301 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | 301 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
302 | void *ptr = buf->ptr; | 302 | void *ptr = buf->ptr; |
@@ -317,74 +317,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
317 | dmac_clean_range(ptr, ptr + size); | 317 | dmac_clean_range(ptr, ptr + size); |
318 | outer_clean_range(__pa(ptr), __pa(ptr) + size); | 318 | outer_clean_range(__pa(ptr), __pa(ptr) + size); |
319 | } | 319 | } |
320 | free_safe_buffer(device_info, buf); | 320 | free_safe_buffer(dev->archdata.dmabounce, buf); |
321 | } | ||
322 | } | ||
323 | |||
324 | static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
325 | enum dma_data_direction dir) | ||
326 | { | ||
327 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | ||
328 | struct safe_buffer *buf = NULL; | ||
329 | |||
330 | if (device_info) | ||
331 | buf = find_safe_buffer(device_info, dma_addr); | ||
332 | |||
333 | if (buf) { | ||
334 | /* | ||
335 | * Both of these checks from original code need to be | ||
336 | * commented out b/c some drivers rely on the following: | ||
337 | * | ||
338 | * 1) Drivers may map a large chunk of memory into DMA space | ||
339 | * but only sync a small portion of it. Good example is | ||
340 | * allocating a large buffer, mapping it, and then | ||
341 | * breaking it up into small descriptors. No point | ||
342 | * in syncing the whole buffer if you only have to | ||
343 | * touch one descriptor. | ||
344 | * | ||
345 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | ||
346 | * usually only synced in one dir at a time. | ||
347 | * | ||
348 | * See drivers/net/eepro100.c for examples of both cases. | ||
349 | * | ||
350 | * -ds | ||
351 | * | ||
352 | * BUG_ON(buf->size != size); | ||
353 | * BUG_ON(buf->direction != dir); | ||
354 | */ | ||
355 | |||
356 | dev_dbg(dev, | ||
357 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | ||
358 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
359 | buf->safe, buf->safe_dma_addr); | ||
360 | |||
361 | DO_STATS ( device_info->bounce_count++ ); | ||
362 | |||
363 | switch (dir) { | ||
364 | case DMA_FROM_DEVICE: | ||
365 | dev_dbg(dev, | ||
366 | "%s: copy back safe %p to unsafe %p size %d\n", | ||
367 | __func__, buf->safe, buf->ptr, size); | ||
368 | memcpy(buf->ptr, buf->safe, size); | ||
369 | break; | ||
370 | case DMA_TO_DEVICE: | ||
371 | dev_dbg(dev, | ||
372 | "%s: copy out unsafe %p to safe %p, size %d\n", | ||
373 | __func__,buf->ptr, buf->safe, size); | ||
374 | memcpy(buf->safe, buf->ptr, size); | ||
375 | break; | ||
376 | case DMA_BIDIRECTIONAL: | ||
377 | BUG(); /* is this allowed? what does it mean? */ | ||
378 | default: | ||
379 | BUG(); | ||
380 | } | ||
381 | /* | ||
382 | * No need to sync the safe buffer - it was allocated | ||
383 | * via the coherent allocators. | ||
384 | */ | ||
385 | return 0; | ||
386 | } else { | ||
387 | return 1; | ||
388 | } | 321 | } |
389 | } | 322 | } |
390 | 323 | ||
@@ -396,21 +329,29 @@ static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
396 | * substitute the safe buffer for the unsafe one. | 329 | * substitute the safe buffer for the unsafe one. |
397 | * (basically move the buffer from an unsafe area to a safe one) | 330 | * (basically move the buffer from an unsafe area to a safe one) |
398 | */ | 331 | */ |
399 | dma_addr_t | 332 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, |
400 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
401 | enum dma_data_direction dir) | 333 | enum dma_data_direction dir) |
402 | { | 334 | { |
403 | dma_addr_t dma_addr; | ||
404 | |||
405 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 335 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
406 | __func__, ptr, size, dir); | 336 | __func__, ptr, size, dir); |
407 | 337 | ||
408 | BUG_ON(dir == DMA_NONE); | 338 | BUG_ON(!valid_dma_direction(dir)); |
409 | 339 | ||
410 | dma_addr = map_single(dev, ptr, size, dir); | 340 | return map_single(dev, ptr, size, dir); |
341 | } | ||
342 | EXPORT_SYMBOL(dma_map_single); | ||
411 | 343 | ||
412 | return dma_addr; | 344 | dma_addr_t dma_map_page(struct device *dev, struct page *page, |
345 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
346 | { | ||
347 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | ||
348 | __func__, page, offset, size, dir); | ||
349 | |||
350 | BUG_ON(!valid_dma_direction(dir)); | ||
351 | |||
352 | return map_single(dev, page_address(page) + offset, size, dir); | ||
413 | } | 353 | } |
354 | EXPORT_SYMBOL(dma_map_page); | ||
414 | 355 | ||
415 | /* | 356 | /* |
416 | * see if a mapped address was really a "safe" buffer and if so, copy | 357 | * see if a mapped address was really a "safe" buffer and if so, copy |
@@ -419,126 +360,76 @@ dma_map_single(struct device *dev, void *ptr, size_t size, | |||
419 | * should be) | 360 | * should be) |
420 | */ | 361 | */ |
421 | 362 | ||
422 | void | 363 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
423 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 364 | enum dma_data_direction dir) |
424 | enum dma_data_direction dir) | ||
425 | { | 365 | { |
426 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 366 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
427 | __func__, (void *) dma_addr, size, dir); | 367 | __func__, (void *) dma_addr, size, dir); |
428 | 368 | ||
429 | BUG_ON(dir == DMA_NONE); | ||
430 | |||
431 | unmap_single(dev, dma_addr, size, dir); | 369 | unmap_single(dev, dma_addr, size, dir); |
432 | } | 370 | } |
371 | EXPORT_SYMBOL(dma_unmap_single); | ||
433 | 372 | ||
434 | int | 373 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
435 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 374 | unsigned long off, size_t sz, enum dma_data_direction dir) |
436 | enum dma_data_direction dir) | ||
437 | { | 375 | { |
438 | int i; | 376 | struct safe_buffer *buf; |
439 | |||
440 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | ||
441 | __func__, sg, nents, dir); | ||
442 | |||
443 | BUG_ON(dir == DMA_NONE); | ||
444 | |||
445 | for (i = 0; i < nents; i++, sg++) { | ||
446 | struct page *page = sg_page(sg); | ||
447 | unsigned int offset = sg->offset; | ||
448 | unsigned int length = sg->length; | ||
449 | void *ptr = page_address(page) + offset; | ||
450 | 377 | ||
451 | sg->dma_address = | 378 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", |
452 | map_single(dev, ptr, length, dir); | 379 | __func__, addr, off, sz, dir); |
453 | } | ||
454 | 380 | ||
455 | return nents; | 381 | buf = find_safe_buffer_dev(dev, addr, __func__); |
456 | } | 382 | if (!buf) |
457 | 383 | return 1; | |
458 | void | ||
459 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
460 | enum dma_data_direction dir) | ||
461 | { | ||
462 | int i; | ||
463 | 384 | ||
464 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | 385 | BUG_ON(buf->direction != dir); |
465 | __func__, sg, nents, dir); | ||
466 | 386 | ||
467 | BUG_ON(dir == DMA_NONE); | 387 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
388 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
389 | buf->safe, buf->safe_dma_addr); | ||
468 | 390 | ||
469 | for (i = 0; i < nents; i++, sg++) { | 391 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
470 | dma_addr_t dma_addr = sg->dma_address; | ||
471 | unsigned int length = sg->length; | ||
472 | 392 | ||
473 | unmap_single(dev, dma_addr, length, dir); | 393 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
394 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", | ||
395 | __func__, buf->safe + off, buf->ptr + off, sz); | ||
396 | memcpy(buf->ptr + off, buf->safe + off, sz); | ||
474 | } | 397 | } |
398 | return 0; | ||
475 | } | 399 | } |
400 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); | ||
476 | 401 | ||
477 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr, | 402 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, |
478 | unsigned long offset, size_t size, | 403 | unsigned long off, size_t sz, enum dma_data_direction dir) |
479 | enum dma_data_direction dir) | ||
480 | { | ||
481 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n", | ||
482 | __func__, dma_addr, offset, size, dir); | ||
483 | |||
484 | if (sync_single(dev, dma_addr, offset + size, dir)) | ||
485 | dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir); | ||
486 | } | ||
487 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
488 | |||
489 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr, | ||
490 | unsigned long offset, size_t size, | ||
491 | enum dma_data_direction dir) | ||
492 | { | ||
493 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n", | ||
494 | __func__, dma_addr, offset, size, dir); | ||
495 | |||
496 | if (sync_single(dev, dma_addr, offset + size, dir)) | ||
497 | dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir); | ||
498 | } | ||
499 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
500 | |||
501 | void | ||
502 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
503 | enum dma_data_direction dir) | ||
504 | { | 404 | { |
505 | int i; | 405 | struct safe_buffer *buf; |
506 | |||
507 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | ||
508 | __func__, sg, nents, dir); | ||
509 | |||
510 | BUG_ON(dir == DMA_NONE); | ||
511 | 406 | ||
512 | for (i = 0; i < nents; i++, sg++) { | 407 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", |
513 | dma_addr_t dma_addr = sg->dma_address; | 408 | __func__, addr, off, sz, dir); |
514 | unsigned int length = sg->length; | ||
515 | 409 | ||
516 | sync_single(dev, dma_addr, length, dir); | 410 | buf = find_safe_buffer_dev(dev, addr, __func__); |
517 | } | 411 | if (!buf) |
518 | } | 412 | return 1; |
519 | |||
520 | void | ||
521 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | ||
522 | enum dma_data_direction dir) | ||
523 | { | ||
524 | int i; | ||
525 | 413 | ||
526 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | 414 | BUG_ON(buf->direction != dir); |
527 | __func__, sg, nents, dir); | ||
528 | 415 | ||
529 | BUG_ON(dir == DMA_NONE); | 416 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
417 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
418 | buf->safe, buf->safe_dma_addr); | ||
530 | 419 | ||
531 | for (i = 0; i < nents; i++, sg++) { | 420 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
532 | dma_addr_t dma_addr = sg->dma_address; | ||
533 | unsigned int length = sg->length; | ||
534 | 421 | ||
535 | sync_single(dev, dma_addr, length, dir); | 422 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { |
423 | dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", | ||
424 | __func__,buf->ptr + off, buf->safe + off, sz); | ||
425 | memcpy(buf->safe + off, buf->ptr + off, sz); | ||
536 | } | 426 | } |
427 | return 0; | ||
537 | } | 428 | } |
429 | EXPORT_SYMBOL(dmabounce_sync_for_device); | ||
538 | 430 | ||
539 | static int | 431 | static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, |
540 | dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, | 432 | const char *name, unsigned long size) |
541 | unsigned long size) | ||
542 | { | 433 | { |
543 | pool->size = size; | 434 | pool->size = size; |
544 | DO_STATS(pool->allocs = 0); | 435 | DO_STATS(pool->allocs = 0); |
@@ -549,9 +440,8 @@ dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char | |||
549 | return pool->pool ? 0 : -ENOMEM; | 440 | return pool->pool ? 0 : -ENOMEM; |
550 | } | 441 | } |
551 | 442 | ||
552 | int | 443 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
553 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | 444 | unsigned long large_buffer_size) |
554 | unsigned long large_buffer_size) | ||
555 | { | 445 | { |
556 | struct dmabounce_device_info *device_info; | 446 | struct dmabounce_device_info *device_info; |
557 | int ret; | 447 | int ret; |
@@ -607,9 +497,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
607 | kfree(device_info); | 497 | kfree(device_info); |
608 | return ret; | 498 | return ret; |
609 | } | 499 | } |
500 | EXPORT_SYMBOL(dmabounce_register_dev); | ||
610 | 501 | ||
611 | void | 502 | void dmabounce_unregister_dev(struct device *dev) |
612 | dmabounce_unregister_dev(struct device *dev) | ||
613 | { | 503 | { |
614 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 504 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
615 | 505 | ||
@@ -642,15 +532,6 @@ dmabounce_unregister_dev(struct device *dev) | |||
642 | 532 | ||
643 | dev_info(dev, "dmabounce: device unregistered\n"); | 533 | dev_info(dev, "dmabounce: device unregistered\n"); |
644 | } | 534 | } |
645 | |||
646 | |||
647 | EXPORT_SYMBOL(dma_map_single); | ||
648 | EXPORT_SYMBOL(dma_unmap_single); | ||
649 | EXPORT_SYMBOL(dma_map_sg); | ||
650 | EXPORT_SYMBOL(dma_unmap_sg); | ||
651 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
652 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
653 | EXPORT_SYMBOL(dmabounce_register_dev); | ||
654 | EXPORT_SYMBOL(dmabounce_unregister_dev); | 535 | EXPORT_SYMBOL(dmabounce_unregister_dev); |
655 | 536 | ||
656 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | 537 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); |