aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mmp_pdma.c
diff options
context:
space:
mode:
authorDaniel Mack <zonque@gmail.com>2013-08-21 08:08:56 -0400
committerVinod Koul <vinod.koul@intel.com>2013-08-25 12:34:52 -0400
commit50440d74aae31893f0c901b9effbd52b43d3ce63 (patch)
tree711f1f87b7243088300e9d43c6ecd13d54a07a60 /drivers/dma/mmp_pdma.c
parent0cd6156177a10d39fb5811bcd23e1d3b7e58f1c0 (diff)
dma: mmp_pdma: add support for cyclic DMA descriptors
Provide a callback to prepare cyclic DMA transfers. This is for instance needed for audio channel transport. Signed-off-by: Daniel Mack <zonque@gmail.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/mmp_pdma.c')
-rw-r--r--drivers/dma/mmp_pdma.c112
1 files changed, 111 insertions, 1 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 3676fdeac96d..f0e6d7d49b06 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -98,6 +98,9 @@ struct mmp_pdma_chan {
98 struct mmp_pdma_phy *phy; 98 struct mmp_pdma_phy *phy;
99 enum dma_transfer_direction dir; 99 enum dma_transfer_direction dir;
100 100
101 struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
102 * is in cyclic mode */
103
101 /* channel's basic info */ 104 /* channel's basic info */
102 struct tasklet_struct tasklet; 105 struct tasklet_struct tasklet;
103 u32 dcmd; 106 u32 dcmd;
@@ -500,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
500 new->desc.ddadr = DDADR_STOP; 503 new->desc.ddadr = DDADR_STOP;
501 new->desc.dcmd |= DCMD_ENDIRQEN; 504 new->desc.dcmd |= DCMD_ENDIRQEN;
502 505
506 chan->cyclic_first = NULL;
507
503 return &first->async_tx; 508 return &first->async_tx;
504 509
505fail: 510fail:
@@ -575,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
575 new->desc.ddadr = DDADR_STOP; 580 new->desc.ddadr = DDADR_STOP;
576 new->desc.dcmd |= DCMD_ENDIRQEN; 581 new->desc.dcmd |= DCMD_ENDIRQEN;
577 582
583 chan->dir = dir;
584 chan->cyclic_first = NULL;
585
586 return &first->async_tx;
587
588fail:
589 if (first)
590 mmp_pdma_free_desc_list(chan, &first->tx_list);
591 return NULL;
592}
593
594static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
595 struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
596 size_t period_len, enum dma_transfer_direction direction,
597 unsigned long flags, void *context)
598{
599 struct mmp_pdma_chan *chan;
600 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
601 dma_addr_t dma_src, dma_dst;
602
603 if (!dchan || !len || !period_len)
604 return NULL;
605
606 /* the buffer length must be a multiple of period_len */
607 if (len % period_len != 0)
608 return NULL;
609
610 if (period_len > PDMA_MAX_DESC_BYTES)
611 return NULL;
612
613 chan = to_mmp_pdma_chan(dchan);
614
615 switch (direction) {
616 case DMA_MEM_TO_DEV:
617 dma_src = buf_addr;
618 dma_dst = chan->dev_addr;
619 break;
620 case DMA_DEV_TO_MEM:
621 dma_dst = buf_addr;
622 dma_src = chan->dev_addr;
623 break;
624 default:
625 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
626 return NULL;
627 }
628
629 chan->dir = direction;
630
631 do {
632 /* Allocate the link descriptor from DMA pool */
633 new = mmp_pdma_alloc_descriptor(chan);
634 if (!new) {
635 dev_err(chan->dev, "no memory for desc\n");
636 goto fail;
637 }
638
639 new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
640 (DCMD_LENGTH & period_len);
641 new->desc.dsadr = dma_src;
642 new->desc.dtadr = dma_dst;
643
644 if (!first)
645 first = new;
646 else
647 prev->desc.ddadr = new->async_tx.phys;
648
649 new->async_tx.cookie = 0;
650 async_tx_ack(&new->async_tx);
651
652 prev = new;
653 len -= period_len;
654
655 if (chan->dir == DMA_MEM_TO_DEV)
656 dma_src += period_len;
657 else
658 dma_dst += period_len;
659
660 /* Insert the link descriptor to the LD ring */
661 list_add_tail(&new->node, &first->tx_list);
662 } while (len);
663
664 first->async_tx.flags = flags; /* client is in control of this ack */
665 first->async_tx.cookie = -EBUSY;
666
667 /* make the cyclic link */
668 new->desc.ddadr = first->async_tx.phys;
669 chan->cyclic_first = first;
670
578 return &first->async_tx; 671 return &first->async_tx;
579 672
580fail: 673fail:
@@ -681,8 +774,23 @@ static void dma_do_tasklet(unsigned long data)
681 LIST_HEAD(chain_cleanup); 774 LIST_HEAD(chain_cleanup);
682 unsigned long flags; 775 unsigned long flags;
683 776
684 /* submit pending list; callback for each desc; free desc */ 777 if (chan->cyclic_first) {
778 dma_async_tx_callback cb = NULL;
779 void *cb_data = NULL;
685 780
781 spin_lock_irqsave(&chan->desc_lock, flags);
782 desc = chan->cyclic_first;
783 cb = desc->async_tx.callback;
784 cb_data = desc->async_tx.callback_param;
785 spin_unlock_irqrestore(&chan->desc_lock, flags);
786
787 if (cb)
788 cb(cb_data);
789
790 return;
791 }
792
793 /* submit pending list; callback for each desc; free desc */
686 spin_lock_irqsave(&chan->desc_lock, flags); 794 spin_lock_irqsave(&chan->desc_lock, flags);
687 795
688 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { 796 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
@@ -876,12 +984,14 @@ static int mmp_pdma_probe(struct platform_device *op)
876 984
877 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); 985 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
878 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); 986 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
987 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
879 pdev->device.dev = &op->dev; 988 pdev->device.dev = &op->dev;
880 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; 989 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
881 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; 990 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
882 pdev->device.device_tx_status = mmp_pdma_tx_status; 991 pdev->device.device_tx_status = mmp_pdma_tx_status;
883 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; 992 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
884 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; 993 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
994 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
885 pdev->device.device_issue_pending = mmp_pdma_issue_pending; 995 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
886 pdev->device.device_control = mmp_pdma_control; 996 pdev->device.device_control = mmp_pdma_control;
887 pdev->device.copy_align = PDMA_ALIGNMENT; 997 pdev->device.copy_align = PDMA_ALIGNMENT;