diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 43 |
1 files changed, 19 insertions, 24 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 70be6e106667..b50fa578df81 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -622,7 +622,7 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
622 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | 622 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
623 | 623 | ||
624 | /** | 624 | /** |
625 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | 625 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA |
626 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 626 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
627 | * @sg: list of buffers | 627 | * @sg: list of buffers |
628 | * @nents: number of buffers to map | 628 | * @nents: number of buffers to map |
@@ -640,12 +640,13 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu); | |||
640 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 640 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
641 | enum dma_data_direction dir, struct dma_attrs *attrs) | 641 | enum dma_data_direction dir, struct dma_attrs *attrs) |
642 | { | 642 | { |
643 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
643 | struct scatterlist *s; | 644 | struct scatterlist *s; |
644 | int i, j; | 645 | int i, j; |
645 | 646 | ||
646 | for_each_sg(sg, s, nents, i) { | 647 | for_each_sg(sg, s, nents, i) { |
647 | s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, | 648 | s->dma_address = ops->map_page(dev, sg_page(s), s->offset, |
648 | s->length, dir); | 649 | s->length, dir, attrs); |
649 | if (dma_mapping_error(dev, s->dma_address)) | 650 | if (dma_mapping_error(dev, s->dma_address)) |
650 | goto bad_mapping; | 651 | goto bad_mapping; |
651 | } | 652 | } |
@@ -653,12 +654,12 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
653 | 654 | ||
654 | bad_mapping: | 655 | bad_mapping: |
655 | for_each_sg(sg, s, i, j) | 656 | for_each_sg(sg, s, i, j) |
656 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 657 | ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); |
657 | return 0; | 658 | return 0; |
658 | } | 659 | } |
659 | 660 | ||
660 | /** | 661 | /** |
661 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 662 | * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
662 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 663 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
663 | * @sg: list of buffers | 664 | * @sg: list of buffers |
664 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | 665 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) |
@@ -670,15 +671,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
670 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 671 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
671 | enum dma_data_direction dir, struct dma_attrs *attrs) | 672 | enum dma_data_direction dir, struct dma_attrs *attrs) |
672 | { | 673 | { |
674 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
673 | struct scatterlist *s; | 675 | struct scatterlist *s; |
676 | |||
674 | int i; | 677 | int i; |
675 | 678 | ||
676 | for_each_sg(sg, s, nents, i) | 679 | for_each_sg(sg, s, nents, i) |
677 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 680 | ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); |
678 | } | 681 | } |
679 | 682 | ||
680 | /** | 683 | /** |
681 | * dma_sync_sg_for_cpu | 684 | * arm_dma_sync_sg_for_cpu |
682 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 685 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
683 | * @sg: list of buffers | 686 | * @sg: list of buffers |
684 | * @nents: number of buffers to map (returned from dma_map_sg) | 687 | * @nents: number of buffers to map (returned from dma_map_sg) |
@@ -687,21 +690,17 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
687 | void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 690 | void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
688 | int nents, enum dma_data_direction dir) | 691 | int nents, enum dma_data_direction dir) |
689 | { | 692 | { |
693 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
690 | struct scatterlist *s; | 694 | struct scatterlist *s; |
691 | int i; | 695 | int i; |
692 | 696 | ||
693 | for_each_sg(sg, s, nents, i) { | 697 | for_each_sg(sg, s, nents, i) |
694 | if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), | 698 | ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, |
695 | sg_dma_len(s), dir)) | 699 | dir); |
696 | continue; | ||
697 | |||
698 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | ||
699 | s->length, dir); | ||
700 | } | ||
701 | } | 700 | } |
702 | 701 | ||
703 | /** | 702 | /** |
704 | * dma_sync_sg_for_device | 703 | * arm_dma_sync_sg_for_device |
705 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 704 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
706 | * @sg: list of buffers | 705 | * @sg: list of buffers |
707 | * @nents: number of buffers to map (returned from dma_map_sg) | 706 | * @nents: number of buffers to map (returned from dma_map_sg) |
@@ -710,17 +709,13 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
710 | void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 709 | void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
711 | int nents, enum dma_data_direction dir) | 710 | int nents, enum dma_data_direction dir) |
712 | { | 711 | { |
712 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
713 | struct scatterlist *s; | 713 | struct scatterlist *s; |
714 | int i; | 714 | int i; |
715 | 715 | ||
716 | for_each_sg(sg, s, nents, i) { | 716 | for_each_sg(sg, s, nents, i) |
717 | if (!dmabounce_sync_for_device(dev, sg_dma_address(s), | 717 | ops->sync_single_for_device(dev, sg_dma_address(s), s->length, |
718 | sg_dma_len(s), dir)) | 718 | dir); |
719 | continue; | ||
720 | |||
721 | __dma_page_cpu_to_dev(sg_page(s), s->offset, | ||
722 | s->length, dir); | ||
723 | } | ||
724 | } | 719 | } |
725 | 720 | ||
726 | /* | 721 | /* |