aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/edma.c
diff options
context:
space:
mode:
authorJoel Fernandes <joelf@ti.com>2013-10-31 17:31:23 -0400
committerVinod Koul <vinod.koul@intel.com>2013-11-11 22:47:14 -0500
commit50a9c70714dfb17a85a3fb762675a64f598d504b (patch)
tree5a58cf777087ed8948860577dc58c23397f2049b /drivers/dma/edma.c
parent6b327a028f09a054ac09ef854a90b6e49027f39a (diff)
dma: edma: Add support for Cyclic DMA
Using the PaRAM configuration function that we split for reuse by the different DMA types, we implement Cyclic DMA support. For the cyclic case, we pass different configuration parameters to this function, and handle all the Cyclic-specific functionality separately. Callbacks to the DMA users are handled using vchan_cyclic_callback in the virt-dma layer. Linking is handled the same way as the slave SG case except for the last slot where we link it back to the first one in a cyclic fashion. For continuity, we check for cases where no.of periods is great than the MAX number of slots the driver can allocate for a particular descriptor and error out on such cases. Signed-off-by: Joel Fernandes <joelf@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/edma.c')
-rw-r--r--drivers/dma/edma.c159
1 files changed, 151 insertions, 8 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 183ec85cb6ca..592f3be8b435 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -60,6 +60,7 @@
60struct edma_desc { 60struct edma_desc {
61 struct virt_dma_desc vdesc; 61 struct virt_dma_desc vdesc;
62 struct list_head node; 62 struct list_head node;
63 int cyclic;
63 int absync; 64 int absync;
64 int pset_nr; 65 int pset_nr;
65 int processed; 66 int processed;
@@ -173,8 +174,13 @@ static void edma_execute(struct edma_chan *echan)
173 * then setup a link to the dummy slot, this results in all future 174 * then setup a link to the dummy slot, this results in all future
174 * events being absorbed and that's OK because we're done 175 * events being absorbed and that's OK because we're done
175 */ 176 */
176 if (edesc->processed == edesc->pset_nr) 177 if (edesc->processed == edesc->pset_nr) {
177 edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); 178 if (edesc->cyclic)
179 edma_link(echan->slot[nslots-1], echan->slot[1]);
180 else
181 edma_link(echan->slot[nslots-1],
182 echan->ecc->dummy_slot);
183 }
178 184
179 edma_resume(echan->ch_num); 185 edma_resume(echan->ch_num);
180 186
@@ -459,6 +465,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
459 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 465 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
460} 466}
461 467
468static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
469 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
470 size_t period_len, enum dma_transfer_direction direction,
471 unsigned long tx_flags, void *context)
472{
473 struct edma_chan *echan = to_edma_chan(chan);
474 struct device *dev = chan->device->dev;
475 struct edma_desc *edesc;
476 dma_addr_t src_addr, dst_addr;
477 enum dma_slave_buswidth dev_width;
478 u32 burst;
479 int i, ret, nslots;
480
481 if (unlikely(!echan || !buf_len || !period_len))
482 return NULL;
483
484 if (direction == DMA_DEV_TO_MEM) {
485 src_addr = echan->cfg.src_addr;
486 dst_addr = buf_addr;
487 dev_width = echan->cfg.src_addr_width;
488 burst = echan->cfg.src_maxburst;
489 } else if (direction == DMA_MEM_TO_DEV) {
490 src_addr = buf_addr;
491 dst_addr = echan->cfg.dst_addr;
492 dev_width = echan->cfg.dst_addr_width;
493 burst = echan->cfg.dst_maxburst;
494 } else {
495 dev_err(dev, "%s: bad direction?\n", __func__);
496 return NULL;
497 }
498
499 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
500 dev_err(dev, "Undefined slave buswidth\n");
501 return NULL;
502 }
503
504 if (unlikely(buf_len % period_len)) {
505 dev_err(dev, "Period should be multiple of Buffer length\n");
506 return NULL;
507 }
508
509 nslots = (buf_len / period_len) + 1;
510
511 /*
512 * Cyclic DMA users such as audio cannot tolerate delays introduced
513 * by cases where the number of periods is more than the maximum
514 * number of SGs the EDMA driver can handle at a time. For DMA types
515 * such as Slave SGs, such delays are tolerable and synchronized,
516 * but the synchronization is difficult to achieve with Cyclic and
517 * cannot be guaranteed, so we error out early.
518 */
519 if (nslots > MAX_NR_SG)
520 return NULL;
521
522 edesc = kzalloc(sizeof(*edesc) + nslots *
523 sizeof(edesc->pset[0]), GFP_ATOMIC);
524 if (!edesc) {
525 dev_dbg(dev, "Failed to allocate a descriptor\n");
526 return NULL;
527 }
528
529 edesc->cyclic = 1;
530 edesc->pset_nr = nslots;
531
532 dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
533 dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
534 dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
535
536 for (i = 0; i < nslots; i++) {
537 /* Allocate a PaRAM slot, if needed */
538 if (echan->slot[i] < 0) {
539 echan->slot[i] =
540 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
541 EDMA_SLOT_ANY);
542 if (echan->slot[i] < 0) {
543 dev_err(dev, "Failed to allocate slot\n");
544 return NULL;
545 }
546 }
547
548 if (i == nslots - 1) {
549 memcpy(&edesc->pset[i], &edesc->pset[0],
550 sizeof(edesc->pset[0]));
551 break;
552 }
553
554 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
555 dst_addr, burst, dev_width, period_len,
556 direction);
557 if (ret < 0)
558 return NULL;
559
560 if (direction == DMA_DEV_TO_MEM)
561 dst_addr += period_len;
562 else
563 src_addr += period_len;
564
565 dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
566 dev_dbg(dev,
567 "\n pset[%d]:\n"
568 " chnum\t%d\n"
569 " slot\t%d\n"
570 " opt\t%08x\n"
571 " src\t%08x\n"
572 " dst\t%08x\n"
573 " abcnt\t%08x\n"
574 " ccnt\t%08x\n"
575 " bidx\t%08x\n"
576 " cidx\t%08x\n"
577 " lkrld\t%08x\n",
578 i, echan->ch_num, echan->slot[i],
579 edesc->pset[i].opt,
580 edesc->pset[i].src,
581 edesc->pset[i].dst,
582 edesc->pset[i].a_b_cnt,
583 edesc->pset[i].ccnt,
584 edesc->pset[i].src_dst_bidx,
585 edesc->pset[i].src_dst_cidx,
586 edesc->pset[i].link_bcntrld);
587
588 edesc->absync = ret;
589
590 /*
591 * Enable interrupts for every period because callback
592 * has to be called for every period.
593 */
594 edesc->pset[i].opt |= TCINTEN;
595 }
596
597 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
598}
599
462static void edma_callback(unsigned ch_num, u16 ch_status, void *data) 600static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
463{ 601{
464 struct edma_chan *echan = data; 602 struct edma_chan *echan = data;
@@ -467,24 +605,28 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
467 unsigned long flags; 605 unsigned long flags;
468 struct edmacc_param p; 606 struct edmacc_param p;
469 607
470 /* Pause the channel */ 608 edesc = echan->edesc;
471 edma_pause(echan->ch_num); 609
610 /* Pause the channel for non-cyclic */
611 if (!edesc || (edesc && !edesc->cyclic))
612 edma_pause(echan->ch_num);
472 613
473 switch (ch_status) { 614 switch (ch_status) {
474 case EDMA_DMA_COMPLETE: 615 case EDMA_DMA_COMPLETE:
475 spin_lock_irqsave(&echan->vchan.lock, flags); 616 spin_lock_irqsave(&echan->vchan.lock, flags);
476 617
477 edesc = echan->edesc;
478 if (edesc) { 618 if (edesc) {
479 if (edesc->processed == edesc->pset_nr) { 619 if (edesc->cyclic) {
620 vchan_cyclic_callback(&edesc->vdesc);
621 } else if (edesc->processed == edesc->pset_nr) {
480 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 622 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
481 edma_stop(echan->ch_num); 623 edma_stop(echan->ch_num);
482 vchan_cookie_complete(&edesc->vdesc); 624 vchan_cookie_complete(&edesc->vdesc);
625 edma_execute(echan);
483 } else { 626 } else {
484 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 627 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
628 edma_execute(echan);
485 } 629 }
486
487 edma_execute(echan);
488 } 630 }
489 631
490 spin_unlock_irqrestore(&echan->vchan.lock, flags); 632 spin_unlock_irqrestore(&echan->vchan.lock, flags);
@@ -680,6 +822,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
680 struct device *dev) 822 struct device *dev)
681{ 823{
682 dma->device_prep_slave_sg = edma_prep_slave_sg; 824 dma->device_prep_slave_sg = edma_prep_slave_sg;
825 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
683 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 826 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
684 dma->device_free_chan_resources = edma_free_chan_resources; 827 dma->device_free_chan_resources = edma_free_chan_resources;
685 dma->device_issue_pending = edma_issue_pending; 828 dma->device_issue_pending = edma_issue_pending;