aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-02-10 13:55:20 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:06:17 -0400
commit2a550e73d3e5f040a3e8eb733c942ab352eafb36 (patch)
treedd4d161b5e3db2983737bad3f7b5787488bbe229 /arch/arm/mm
parent2dc6a016bbedf18f18ad73997e5338307d6dbde9 (diff)
ARM: dma-mapping: implement dma sg methods on top of any generic dma ops
This patch converts all dma_sg methods to be generic (independent of the current DMA mapping implementation for ARM architecture). All dma sg operations are now implemented on top of respective dma_map_page/dma_sync_single_for* operations from dma_map_ops structure. Before this patch there were custom methods for all scatter/gather related operations. They iterated over the whole scatter list and called cache related operations directly (which in turn checked if we use dma bounce code or not and called respective version). This patch changes them not to use such shortcut. Instead it provides similar loop over scatter list and calls methods from the device's dma_map_ops structure. This enables us to use device dependent implementations of cache related operations (direct linear or dma bounce) depending on the provided dma_map_ops structure. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c43
1 files changed, 19 insertions, 24 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 70be6e106667..b50fa578df81 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,7 +622,7 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
622EXPORT_SYMBOL(___dma_page_dev_to_cpu); 622EXPORT_SYMBOL(___dma_page_dev_to_cpu);
623 623
624/** 624/**
625 * dma_map_sg - map a set of SG buffers for streaming mode DMA 625 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
626 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 626 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
627 * @sg: list of buffers 627 * @sg: list of buffers
628 * @nents: number of buffers to map 628 * @nents: number of buffers to map
@@ -640,12 +640,13 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
640int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 640int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
641 enum dma_data_direction dir, struct dma_attrs *attrs) 641 enum dma_data_direction dir, struct dma_attrs *attrs)
642{ 642{
643 struct dma_map_ops *ops = get_dma_ops(dev);
643 struct scatterlist *s; 644 struct scatterlist *s;
644 int i, j; 645 int i, j;
645 646
646 for_each_sg(sg, s, nents, i) { 647 for_each_sg(sg, s, nents, i) {
647 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, 648 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
648 s->length, dir); 649 s->length, dir, attrs);
649 if (dma_mapping_error(dev, s->dma_address)) 650 if (dma_mapping_error(dev, s->dma_address))
650 goto bad_mapping; 651 goto bad_mapping;
651 } 652 }
@@ -653,12 +654,12 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
653 654
654 bad_mapping: 655 bad_mapping:
655 for_each_sg(sg, s, i, j) 656 for_each_sg(sg, s, i, j)
656 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 657 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
657 return 0; 658 return 0;
658} 659}
659 660
660/** 661/**
661 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 662 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
662 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 663 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
663 * @sg: list of buffers 664 * @sg: list of buffers
664 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 665 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -670,15 +671,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
670void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 671void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
671 enum dma_data_direction dir, struct dma_attrs *attrs) 672 enum dma_data_direction dir, struct dma_attrs *attrs)
672{ 673{
674 struct dma_map_ops *ops = get_dma_ops(dev);
673 struct scatterlist *s; 675 struct scatterlist *s;
676
674 int i; 677 int i;
675 678
676 for_each_sg(sg, s, nents, i) 679 for_each_sg(sg, s, nents, i)
677 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 680 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
678} 681}
679 682
680/** 683/**
681 * dma_sync_sg_for_cpu 684 * arm_dma_sync_sg_for_cpu
682 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 685 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
683 * @sg: list of buffers 686 * @sg: list of buffers
684 * @nents: number of buffers to map (returned from dma_map_sg) 687 * @nents: number of buffers to map (returned from dma_map_sg)
@@ -687,21 +690,17 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
687void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 690void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
688 int nents, enum dma_data_direction dir) 691 int nents, enum dma_data_direction dir)
689{ 692{
693 struct dma_map_ops *ops = get_dma_ops(dev);
690 struct scatterlist *s; 694 struct scatterlist *s;
691 int i; 695 int i;
692 696
693 for_each_sg(sg, s, nents, i) { 697 for_each_sg(sg, s, nents, i)
694 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 698 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
695 sg_dma_len(s), dir)) 699 dir);
696 continue;
697
698 __dma_page_dev_to_cpu(sg_page(s), s->offset,
699 s->length, dir);
700 }
701} 700}
702 701
703/** 702/**
704 * dma_sync_sg_for_device 703 * arm_dma_sync_sg_for_device
705 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 704 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
706 * @sg: list of buffers 705 * @sg: list of buffers
707 * @nents: number of buffers to map (returned from dma_map_sg) 706 * @nents: number of buffers to map (returned from dma_map_sg)
@@ -710,17 +709,13 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
710void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 709void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
711 int nents, enum dma_data_direction dir) 710 int nents, enum dma_data_direction dir)
712{ 711{
712 struct dma_map_ops *ops = get_dma_ops(dev);
713 struct scatterlist *s; 713 struct scatterlist *s;
714 int i; 714 int i;
715 715
716 for_each_sg(sg, s, nents, i) { 716 for_each_sg(sg, s, nents, i)
717 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 717 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
718 sg_dma_len(s), dir)) 718 dir);
719 continue;
720
721 __dma_page_cpu_to_dev(sg_page(s), s->offset,
722 s->length, dir);
723 }
724} 719}
725 720
726/* 721/*