diff options
author | Joel Fernandes <joelf@ti.com> | 2014-04-18 22:50:33 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2014-04-22 12:08:56 -0400 |
commit | 8cc3e30bea9a90f9ab7a1bc4612792c40ad7ae95 (patch) | |
tree | ff4990234e5673004afb6ac75a0e356589281b3d /drivers/dma | |
parent | e6fad592b0e8a6205f23a3e55b2e682e4f36d32f (diff) |
dmaengine: edma: Add DMA memcpy support
We add DMA memcpy support to EDMA driver. Successful tests performed using
dmatest kernel module. Copy alignment is set to DMA_SLAVE_BUSWIDTH_4_BYTES and
users must ensure length is aligned so that copy is performed fully.
Signed-off-by: Joel Fernandes <joelf@ti.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/edma.c | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index ea04b2192822..43f56a7d9d61 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -379,6 +379,11 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | |||
379 | src_cidx = 0; | 379 | src_cidx = 0; |
380 | dst_bidx = acnt; | 380 | dst_bidx = acnt; |
381 | dst_cidx = cidx; | 381 | dst_cidx = cidx; |
382 | } else if (direction == DMA_MEM_TO_MEM) { | ||
383 | src_bidx = acnt; | ||
384 | src_cidx = cidx; | ||
385 | dst_bidx = acnt; | ||
386 | dst_cidx = cidx; | ||
382 | } else { | 387 | } else { |
383 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | 388 | dev_err(dev, "%s: direction not implemented yet\n", __func__); |
384 | return -EINVAL; | 389 | return -EINVAL; |
@@ -499,6 +504,44 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
499 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 504 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
500 | } | 505 | } |
501 | 506 | ||
507 | struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | ||
508 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
509 | size_t len, unsigned long tx_flags) | ||
510 | { | ||
511 | int ret; | ||
512 | struct edma_desc *edesc; | ||
513 | struct device *dev = chan->device->dev; | ||
514 | struct edma_chan *echan = to_edma_chan(chan); | ||
515 | |||
516 | if (unlikely(!echan || !len)) | ||
517 | return NULL; | ||
518 | |||
519 | edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
520 | if (!edesc) { | ||
521 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
522 | return NULL; | ||
523 | } | ||
524 | |||
525 | edesc->pset_nr = 1; | ||
526 | |||
527 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, | ||
528 | DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); | ||
529 | if (ret < 0) | ||
530 | return NULL; | ||
531 | |||
532 | edesc->absync = ret; | ||
533 | |||
534 | /* | ||
535 | * Enable intermediate transfer chaining to re-trigger channel | ||
536 | * on completion of every TR, and enable transfer-completion | ||
537 | * interrupt on completion of the whole transfer. | ||
538 | */ | ||
539 | edesc->pset[0].opt |= ITCCHEN; | ||
540 | edesc->pset[0].opt |= TCINTEN; | ||
541 | |||
542 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
543 | } | ||
544 | |||
502 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | 545 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
503 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 546 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
504 | size_t period_len, enum dma_transfer_direction direction, | 547 | size_t period_len, enum dma_transfer_direction direction, |
@@ -877,6 +920,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |||
877 | { | 920 | { |
878 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 921 | dma->device_prep_slave_sg = edma_prep_slave_sg; |
879 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | 922 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; |
923 | dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
880 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 924 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
881 | dma->device_free_chan_resources = edma_free_chan_resources; | 925 | dma->device_free_chan_resources = edma_free_chan_resources; |
882 | dma->device_issue_pending = edma_issue_pending; | 926 | dma->device_issue_pending = edma_issue_pending; |
@@ -885,6 +929,12 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |||
885 | dma->device_slave_caps = edma_dma_device_slave_caps; | 929 | dma->device_slave_caps = edma_dma_device_slave_caps; |
886 | dma->dev = dev; | 930 | dma->dev = dev; |
887 | 931 | ||
932 | /* | ||
933 | * code using dma memcpy must make sure alignment of | ||
934 | * length is at dma->copy_align boundary. | ||
935 | */ | ||
936 | dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
937 | |||
888 | INIT_LIST_HEAD(&dma->channels); | 938 | INIT_LIST_HEAD(&dma->channels); |
889 | } | 939 | } |
890 | 940 | ||
@@ -913,6 +963,7 @@ static int edma_probe(struct platform_device *pdev) | |||
913 | dma_cap_zero(ecc->dma_slave.cap_mask); | 963 | dma_cap_zero(ecc->dma_slave.cap_mask); |
914 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | 964 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); |
915 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); | 965 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); |
966 | dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); | ||
916 | 967 | ||
917 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | 968 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); |
918 | 969 | ||