aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorKuninori Morimoto <kuninori.morimoto.gx@renesas.com>2014-04-02 23:17:00 -0400
committerVinod Koul <vinod.koul@intel.com>2014-05-02 12:18:33 -0400
commitdfbb85cab5f0819d0424a3637b03e7892704fa42 (patch)
tree7241a4f922d4bd21d8e01aa825a5be6021fb52bf /drivers/dma
parent91ea74e9ec5c584eef1dcd69554b8315c1ebb0d9 (diff)
DMA: shdma: add cyclic transfer support
This patch add cyclic transfer support and enables dmaengine_prep_dma_cyclic() Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> [reflown changelog for readablity] Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/sh/shdma-base.c72
1 files changed, 65 insertions, 7 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6786ecbd5ed4..974794cdb6ed 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
304 dma_async_tx_callback callback = NULL; 304 dma_async_tx_callback callback = NULL;
305 void *param = NULL; 305 void *param = NULL;
306 unsigned long flags; 306 unsigned long flags;
307 LIST_HEAD(cyclic_list);
307 308
308 spin_lock_irqsave(&schan->chan_lock, flags); 309 spin_lock_irqsave(&schan->chan_lock, flags);
309 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { 310 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
@@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
369 if (((desc->mark == DESC_COMPLETED || 370 if (((desc->mark == DESC_COMPLETED ||
370 desc->mark == DESC_WAITING) && 371 desc->mark == DESC_WAITING) &&
371 async_tx_test_ack(&desc->async_tx)) || all) { 372 async_tx_test_ack(&desc->async_tx)) || all) {
372 /* Remove from ld_queue list */
373 desc->mark = DESC_IDLE;
374 373
375 list_move(&desc->node, &schan->ld_free); 374 if (all || !desc->cyclic) {
375 /* Remove from ld_queue list */
376 desc->mark = DESC_IDLE;
377 list_move(&desc->node, &schan->ld_free);
378 } else {
379 /* reuse as cyclic */
380 desc->mark = DESC_SUBMITTED;
381 list_move_tail(&desc->node, &cyclic_list);
382 }
376 383
377 if (list_empty(&schan->ld_queue)) { 384 if (list_empty(&schan->ld_queue)) {
378 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 385 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
@@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
389 */ 396 */
390 schan->dma_chan.completed_cookie = schan->dma_chan.cookie; 397 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
391 398
399 list_splice_tail(&cyclic_list, &schan->ld_queue);
400
392 spin_unlock_irqrestore(&schan->chan_lock, flags); 401 spin_unlock_irqrestore(&schan->chan_lock, flags);
393 402
394 if (callback) 403 if (callback)
@@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
521 */ 530 */
522static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, 531static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
523 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 532 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
524 enum dma_transfer_direction direction, unsigned long flags) 533 enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
525{ 534{
526 struct scatterlist *sg; 535 struct scatterlist *sg;
527 struct shdma_desc *first = NULL, *new = NULL /* compiler... */; 536 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
@@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
569 if (!new) 578 if (!new)
570 goto err_get_desc; 579 goto err_get_desc;
571 580
572 new->chunks = chunks--; 581 new->cyclic = cyclic;
582 if (cyclic)
583 new->chunks = 1;
584 else
585 new->chunks = chunks--;
573 list_add_tail(&new->node, &tx_list); 586 list_add_tail(&new->node, &tx_list);
574 } while (len); 587 } while (len);
575 } 588 }
@@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy(
612 sg_dma_address(&sg) = dma_src; 625 sg_dma_address(&sg) = dma_src;
613 sg_dma_len(&sg) = len; 626 sg_dma_len(&sg) = len;
614 627
615 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); 628 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
629 flags, false);
616} 630}
617 631
618static struct dma_async_tx_descriptor *shdma_prep_slave_sg( 632static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
@@ -640,7 +654,50 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
640 slave_addr = ops->slave_addr(schan); 654 slave_addr = ops->slave_addr(schan);
641 655
642 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, 656 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
643 direction, flags); 657 direction, flags, false);
658}
659
660struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
661 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
662 size_t period_len, enum dma_transfer_direction direction,
663 unsigned long flags, void *context)
664{
665 struct shdma_chan *schan = to_shdma_chan(chan);
666 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
667 const struct shdma_ops *ops = sdev->ops;
668 unsigned int sg_len = buf_len / period_len;
669 int slave_id = schan->slave_id;
670 dma_addr_t slave_addr;
671 struct scatterlist sgl[sg_len];
672 int i;
673
674 if (!chan)
675 return NULL;
676
677 BUG_ON(!schan->desc_num);
678
679 /* Someone calling slave DMA on a generic channel? */
680 if (slave_id < 0 || (buf_len < period_len)) {
681 dev_warn(schan->dev,
682 "%s: bad parameter: buf_len=%d, period_len=%d, id=%d\n",
683 __func__, buf_len, period_len, slave_id);
684 return NULL;
685 }
686
687 slave_addr = ops->slave_addr(schan);
688
689 sg_init_table(sgl, sg_len);
690 for (i = 0; i < sg_len; i++) {
691 dma_addr_t src = buf_addr + (period_len * i);
692
693 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
694 offset_in_page(src));
695 sg_dma_address(&sgl[i]) = src;
696 sg_dma_len(&sgl[i]) = period_len;
697 }
698
699 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
700 direction, flags, true);
644} 701}
645 702
646static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 703static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -915,6 +972,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
915 972
916 /* Compulsory for DMA_SLAVE fields */ 973 /* Compulsory for DMA_SLAVE fields */
917 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; 974 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
975 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
918 dma_dev->device_control = shdma_control; 976 dma_dev->device_control = shdma_control;
919 977
920 dma_dev->dev = dev; 978 dma_dev->dev = dev;