aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c159
1 files changed, 85 insertions, 74 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 48eedab1609b..0da7eccf7749 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -401,78 +401,44 @@ EXPORT_SYMBOL(dma_free_coherent);
401 * platforms with CONFIG_DMABOUNCE. 401 * platforms with CONFIG_DMABOUNCE.
402 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 402 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
403 */ 403 */
404void dma_cache_maint(const void *start, size_t size, int direction) 404void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
405 enum dma_data_direction dir)
405{ 406{
406 void (*inner_op)(const void *, const void *); 407 unsigned long paddr;
407 void (*outer_op)(unsigned long, unsigned long); 408
408 409 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
409 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); 410
410 411 dmac_map_area(kaddr, size, dir);
411 switch (direction) {
412 case DMA_FROM_DEVICE: /* invalidate only */
413 inner_op = dmac_inv_range;
414 outer_op = outer_inv_range;
415 break;
416 case DMA_TO_DEVICE: /* writeback only */
417 inner_op = dmac_clean_range;
418 outer_op = outer_clean_range;
419 break;
420 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
421 inner_op = dmac_flush_range;
422 outer_op = outer_flush_range;
423 break;
424 default:
425 BUG();
426 }
427 412
428 inner_op(start, start + size); 413 paddr = __pa(kaddr);
429 outer_op(__pa(start), __pa(start) + size); 414 if (dir == DMA_FROM_DEVICE) {
415 outer_inv_range(paddr, paddr + size);
416 } else {
417 outer_clean_range(paddr, paddr + size);
418 }
419 /* FIXME: non-speculating: flush on bidirectional mappings? */
430} 420}
431EXPORT_SYMBOL(dma_cache_maint); 421EXPORT_SYMBOL(___dma_single_cpu_to_dev);
432 422
433static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, 423void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
434 size_t size, int direction) 424 enum dma_data_direction dir)
435{ 425{
436 void *vaddr; 426 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
437 unsigned long paddr;
438 void (*inner_op)(const void *, const void *);
439 void (*outer_op)(unsigned long, unsigned long);
440
441 switch (direction) {
442 case DMA_FROM_DEVICE: /* invalidate only */
443 inner_op = dmac_inv_range;
444 outer_op = outer_inv_range;
445 break;
446 case DMA_TO_DEVICE: /* writeback only */
447 inner_op = dmac_clean_range;
448 outer_op = outer_clean_range;
449 break;
450 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
451 inner_op = dmac_flush_range;
452 outer_op = outer_flush_range;
453 break;
454 default:
455 BUG();
456 }
457 427
458 if (!PageHighMem(page)) { 428 /* FIXME: non-speculating: not required */
459 vaddr = page_address(page) + offset; 429 /* don't bother invalidating if DMA to device */
460 inner_op(vaddr, vaddr + size); 430 if (dir != DMA_TO_DEVICE) {
461 } else { 431 unsigned long paddr = __pa(kaddr);
462 vaddr = kmap_high_get(page); 432 outer_inv_range(paddr, paddr + size);
463 if (vaddr) {
464 vaddr += offset;
465 inner_op(vaddr, vaddr + size);
466 kunmap_high(page);
467 }
468 } 433 }
469 434
470 paddr = page_to_phys(page) + offset; 435 dmac_unmap_area(kaddr, size, dir);
471 outer_op(paddr, paddr + size);
472} 436}
437EXPORT_SYMBOL(___dma_single_dev_to_cpu);
473 438
474void dma_cache_maint_page(struct page *page, unsigned long offset, 439static void dma_cache_maint_page(struct page *page, unsigned long offset,
475 size_t size, int dir) 440 size_t size, enum dma_data_direction dir,
441 void (*op)(const void *, size_t, int))
476{ 442{
477 /* 443 /*
478 * A single sg entry may refer to multiple physically contiguous 444 * A single sg entry may refer to multiple physically contiguous
@@ -483,20 +449,62 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
483 size_t left = size; 449 size_t left = size;
484 do { 450 do {
485 size_t len = left; 451 size_t len = left;
486 if (PageHighMem(page) && len + offset > PAGE_SIZE) { 452 void *vaddr;
487 if (offset >= PAGE_SIZE) { 453
488 page += offset / PAGE_SIZE; 454 if (PageHighMem(page)) {
489 offset %= PAGE_SIZE; 455 if (len + offset > PAGE_SIZE) {
456 if (offset >= PAGE_SIZE) {
457 page += offset / PAGE_SIZE;
458 offset %= PAGE_SIZE;
459 }
460 len = PAGE_SIZE - offset;
490 } 461 }
491 len = PAGE_SIZE - offset; 462 vaddr = kmap_high_get(page);
463 if (vaddr) {
464 vaddr += offset;
465 op(vaddr, len, dir);
466 kunmap_high(page);
467 }
468 } else {
469 vaddr = page_address(page) + offset;
470 op(vaddr, len, dir);
492 } 471 }
493 dma_cache_maint_contiguous(page, offset, len, dir);
494 offset = 0; 472 offset = 0;
495 page++; 473 page++;
496 left -= len; 474 left -= len;
497 } while (left); 475 } while (left);
498} 476}
499EXPORT_SYMBOL(dma_cache_maint_page); 477
478void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
479 size_t size, enum dma_data_direction dir)
480{
481 unsigned long paddr;
482
483 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
484
485 paddr = page_to_phys(page) + off;
486 if (dir == DMA_FROM_DEVICE) {
487 outer_inv_range(paddr, paddr + size);
488 } else {
489 outer_clean_range(paddr, paddr + size);
490 }
491 /* FIXME: non-speculating: flush on bidirectional mappings? */
492}
493EXPORT_SYMBOL(___dma_page_cpu_to_dev);
494
495void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
496 size_t size, enum dma_data_direction dir)
497{
498 unsigned long paddr = page_to_phys(page) + off;
499
500 /* FIXME: non-speculating: not required */
501 /* don't bother invalidating if DMA to device */
502 if (dir != DMA_TO_DEVICE)
503 outer_inv_range(paddr, paddr + size);
504
505 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
506}
507EXPORT_SYMBOL(___dma_page_dev_to_cpu);
500 508
501/** 509/**
502 * dma_map_sg - map a set of SG buffers for streaming mode DMA 510 * dma_map_sg - map a set of SG buffers for streaming mode DMA
@@ -570,8 +578,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
570 int i; 578 int i;
571 579
572 for_each_sg(sg, s, nents, i) { 580 for_each_sg(sg, s, nents, i) {
573 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 581 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
574 sg_dma_len(s), dir); 582 sg_dma_len(s), dir))
583 continue;
584
585 __dma_page_dev_to_cpu(sg_page(s), s->offset,
586 s->length, dir);
575 } 587 }
576} 588}
577EXPORT_SYMBOL(dma_sync_sg_for_cpu); 589EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -594,9 +606,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
594 sg_dma_len(s), dir)) 606 sg_dma_len(s), dir))
595 continue; 607 continue;
596 608
597 if (!arch_is_coherent()) 609 __dma_page_cpu_to_dev(sg_page(s), s->offset,
598 dma_cache_maint_page(sg_page(s), s->offset, 610 s->length, dir);
599 s->length, dir);
600 } 611 }
601} 612}
602EXPORT_SYMBOL(dma_sync_sg_for_device); 613EXPORT_SYMBOL(dma_sync_sg_for_device);