aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_xdmac.c
diff options
context:
space:
mode:
authorMaxime Ripard <maxime.ripard@free-electrons.com>2015-05-07 11:38:11 -0400
committerVinod Koul <vinod.koul@intel.com>2015-05-18 01:29:35 -0400
commit6007ccb57744fdd393385b135e7b7cea9bd4bd6b (patch)
treeb51916328af61df2e9178fd8538af159e7abaa29 /drivers/dma/at_xdmac.c
parent0d0ee751f7f7cd7d44eeb596f3b430ed0b178e07 (diff)
dmaengine: xdmac: Add interleaved transfer support
The XDMAC supports interleaved tranfers through its flexible descriptor configuration. Add support for that kind of transfers to the dmaengine driver. Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/at_xdmac.c')
-rw-r--r--drivers/dma/at_xdmac.c233
1 files changed, 233 insertions, 0 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 0144a935b8f2..9b602a67d40d 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -485,6 +485,19 @@ static void at_xdmac_queue_desc(struct dma_chan *chan,
485 __func__, prev, &prev->lld.mbr_nda); 485 __func__, prev, &prev->lld.mbr_nda);
486} 486}
487 487
488static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
489 struct at_xdmac_desc *desc)
490{
491 if (!desc)
492 return;
493
494 desc->lld.mbr_bc++;
495
496 dev_dbg(chan2dev(chan),
497 "%s: incrementing the block count of the desc 0x%p\n",
498 __func__, desc);
499}
500
488static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, 501static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
489 struct of_dma *of_dma) 502 struct of_dma *of_dma)
490{ 503{
@@ -782,6 +795,224 @@ static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
782 return width; 795 return width;
783} 796}
784 797
798static struct at_xdmac_desc *
799at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
800 struct at_xdmac_chan *atchan,
801 struct at_xdmac_desc *prev,
802 dma_addr_t src, dma_addr_t dst,
803 struct dma_interleaved_template *xt,
804 struct data_chunk *chunk)
805{
806 struct at_xdmac_desc *desc;
807 u32 dwidth;
808 unsigned long flags;
809 size_t ublen;
810 /*
811 * WARNING: The channel configuration is set here since there is no
812 * dmaengine_slave_config call in this case. Moreover we don't know the
813 * direction, it involves we can't dynamically set the source and dest
814 * interface so we have to use the same one. Only interface 0 allows EBI
815 * access. Hopefully we can access DDR through both ports (at least on
816 * SAMA5D4x), so we can use the same interface for source and dest,
817 * that solves the fact we don't know the direction.
818 */
819 u32 chan_cc = AT_XDMAC_CC_DIF(0)
820 | AT_XDMAC_CC_SIF(0)
821 | AT_XDMAC_CC_MBSIZE_SIXTEEN
822 | AT_XDMAC_CC_TYPE_MEM_TRAN;
823
824 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
825 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
826 dev_dbg(chan2dev(chan),
827 "%s: chunk too big (%d, max size %lu)...\n",
828 __func__, chunk->size,
829 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
830 return NULL;
831 }
832
833 if (prev)
834 dev_dbg(chan2dev(chan),
835 "Adding items at the end of desc 0x%p\n", prev);
836
837 if (xt->src_inc) {
838 if (xt->src_sgl)
839 chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM;
840 else
841 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
842 }
843
844 if (xt->dst_inc) {
845 if (xt->dst_sgl)
846 chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM;
847 else
848 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
849 }
850
851 spin_lock_irqsave(&atchan->lock, flags);
852 desc = at_xdmac_get_desc(atchan);
853 spin_unlock_irqrestore(&atchan->lock, flags);
854 if (!desc) {
855 dev_err(chan2dev(chan), "can't get descriptor\n");
856 return NULL;
857 }
858
859 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
860
861 ublen = chunk->size >> dwidth;
862
863 desc->lld.mbr_sa = src;
864 desc->lld.mbr_da = dst;
865
866 if (xt->src_inc && xt->src_sgl) {
867 if (chunk->src_icg)
868 desc->lld.mbr_sus = chunk->src_icg;
869 else
870 desc->lld.mbr_sus = chunk->icg;
871 }
872
873 if (xt->dst_inc && xt->dst_sgl) {
874 if (chunk->dst_icg)
875 desc->lld.mbr_dus = chunk->dst_icg;
876 else
877 desc->lld.mbr_dus = chunk->icg;
878 }
879
880 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
881 | AT_XDMAC_MBR_UBC_NDEN
882 | AT_XDMAC_MBR_UBC_NSEN
883 | ublen;
884 desc->lld.mbr_cfg = chan_cc;
885
886 dev_dbg(chan2dev(chan),
887 "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
888 __func__, desc->lld.mbr_sa, desc->lld.mbr_da,
889 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
890
891 /* Chain lld. */
892 if (prev)
893 at_xdmac_queue_desc(chan, prev, desc);
894
895 return desc;
896}
897
898static size_t at_xdmac_get_icg(bool inc, bool sgl, size_t icg, size_t dir_icg)
899{
900 if (inc) {
901 if (dir_icg)
902 return dir_icg;
903 else if (sgl)
904 return icg;
905 }
906
907 return 0;
908}
909
910static size_t at_xdmac_get_dst_icg(struct dma_interleaved_template *xt,
911 struct data_chunk *chunk)
912{
913 return at_xdmac_get_icg(xt->dst_inc, xt->dst_sgl,
914 chunk->icg, chunk->dst_icg);
915}
916
917static size_t at_xdmac_get_src_icg(struct dma_interleaved_template *xt,
918 struct data_chunk *chunk)
919{
920 return at_xdmac_get_icg(xt->src_inc, xt->src_sgl,
921 chunk->icg, chunk->src_icg);
922}
923
924static struct dma_async_tx_descriptor *
925at_xdmac_prep_interleaved(struct dma_chan *chan,
926 struct dma_interleaved_template *xt,
927 unsigned long flags)
928{
929 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
930 struct at_xdmac_desc *prev = NULL, *first = NULL;
931 struct data_chunk *chunk, *prev_chunk = NULL;
932 dma_addr_t dst_addr, src_addr;
933 size_t dst_skip, src_skip, len = 0;
934 size_t prev_dst_icg = 0, prev_src_icg = 0;
935 int i;
936
937 if (!xt || (xt->numf != 1) || (xt->dir != DMA_MEM_TO_MEM))
938 return NULL;
939
940 dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
941 __func__, xt->src_start, xt->dst_start, xt->numf,
942 xt->frame_size, flags);
943
944 src_addr = xt->src_start;
945 dst_addr = xt->dst_start;
946
947 for (i = 0; i < xt->frame_size; i++) {
948 struct at_xdmac_desc *desc;
949 size_t src_icg, dst_icg;
950
951 chunk = xt->sgl + i;
952
953 dst_icg = at_xdmac_get_dst_icg(xt, chunk);
954 src_icg = at_xdmac_get_src_icg(xt, chunk);
955
956 src_skip = chunk->size + src_icg;
957 dst_skip = chunk->size + dst_icg;
958
959 dev_dbg(chan2dev(chan),
960 "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
961 __func__, chunk->size, src_icg, dst_icg);
962
963 /*
964 * Handle the case where we just have the same
965 * transfer to setup, we can just increase the
966 * block number and reuse the same descriptor.
967 */
968 if (prev_chunk && prev &&
969 (prev_chunk->size == chunk->size) &&
970 (prev_src_icg == src_icg) &&
971 (prev_dst_icg == dst_icg)) {
972 dev_dbg(chan2dev(chan),
973 "%s: same configuration that the previous chunk, merging the descriptors...\n",
974 __func__);
975 at_xdmac_increment_block_count(chan, prev);
976 continue;
977 }
978
979 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
980 prev,
981 src_addr, dst_addr,
982 xt, chunk);
983 if (!desc) {
984 list_splice_init(&first->descs_list,
985 &atchan->free_descs_list);
986 return NULL;
987 }
988
989 if (!first)
990 first = desc;
991
992 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
993 __func__, desc, first);
994 list_add_tail(&desc->desc_node, &first->descs_list);
995
996 if (xt->src_sgl)
997 src_addr += src_skip;
998
999 if (xt->dst_sgl)
1000 dst_addr += dst_skip;
1001
1002 len += chunk->size;
1003 prev_chunk = chunk;
1004 prev_dst_icg = dst_icg;
1005 prev_src_icg = src_icg;
1006 prev = desc;
1007 }
1008
1009 first->tx_dma_desc.cookie = -EBUSY;
1010 first->tx_dma_desc.flags = flags;
1011 first->xfer_size = len;
1012
1013 return &first->tx_dma_desc;
1014}
1015
785static struct dma_async_tx_descriptor * 1016static struct dma_async_tx_descriptor *
786at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1017at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
787 size_t len, unsigned long flags) 1018 size_t len, unsigned long flags)
@@ -1404,6 +1635,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
1404 } 1635 }
1405 1636
1406 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); 1637 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1638 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
1407 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); 1639 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1408 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); 1640 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1409 /* 1641 /*
@@ -1417,6 +1649,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
1417 atxdmac->dma.device_tx_status = at_xdmac_tx_status; 1649 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
1418 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; 1650 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
1419 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; 1651 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1652 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
1420 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; 1653 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1421 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; 1654 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1422 atxdmac->dma.device_config = at_xdmac_device_config; 1655 atxdmac->dma.device_config = at_xdmac_device_config;