aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/imx-dma.c
diff options
context:
space:
mode:
authorJavier Martin <javier.martin@vista-silicon.com>2012-03-22 09:54:14 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-03-26 02:01:32 -0400
commitf606ab897b6d7f35b57c7474424676e30457520b (patch)
tree2a52df10191cbe331160230894be3542df08e416 /drivers/dma/imx-dma.c
parentf9b283a6e41be584f4b1f4c6634625f41ff0c728 (diff)
dmaengine: i.MX: Add support for interleaved transfers.
i.MX2 and i.MX1 chips have the possibility to do interleaved transfers with two constraints: - Only one chunk can be used (i.e. only 2D transfers are allowed). - Only 2 interleaved configurations can be applied at the same time for all channels. Since this patch adds a new resource 'slots_2d' which is shared by all the DMA channels and to avoid disgustin locking BUGs, the 'lock' member has been moved to the global 'imxdma_engine' structure. Signed-off-by: Javier Martin <javier.martin@vista-silicon.com> Acked-by: Sascha Hauer <s.hauer@pengutronix.de> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/imx-dma.c')
-rw-r--r--drivers/dma/imx-dma.c146
1 files changed, 131 insertions, 15 deletions
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 307cd142f06a..0f698f883cca 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -36,6 +36,10 @@
36#define IMXDMA_MAX_CHAN_DESCRIPTORS 16 36#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37#define IMX_DMA_CHANNELS 16 37#define IMX_DMA_CHANNELS 16
38 38
39#define IMX_DMA_2D_SLOTS 2
40#define IMX_DMA_2D_SLOT_A 0
41#define IMX_DMA_2D_SLOT_B 1
42
39#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) 43#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
40#define IMX_DMA_MEMSIZE_32 (0 << 4) 44#define IMX_DMA_MEMSIZE_32 (0 << 4)
41#define IMX_DMA_MEMSIZE_8 (1 << 4) 45#define IMX_DMA_MEMSIZE_8 (1 << 4)
@@ -111,6 +115,13 @@ enum imxdma_prep_type {
111 IMXDMA_DESC_CYCLIC, 115 IMXDMA_DESC_CYCLIC,
112}; 116};
113 117
118struct imx_dma_2d_config {
119 u16 xsr;
120 u16 ysr;
121 u16 wsr;
122 int count;
123};
124
114struct imxdma_desc { 125struct imxdma_desc {
115 struct list_head node; 126 struct list_head node;
116 struct dma_async_tx_descriptor desc; 127 struct dma_async_tx_descriptor desc;
@@ -147,13 +158,14 @@ struct imxdma_channel {
147 dma_addr_t per_address; 158 dma_addr_t per_address;
148 u32 watermark_level; 159 u32 watermark_level;
149 struct dma_chan chan; 160 struct dma_chan chan;
150 spinlock_t lock;
151 struct dma_async_tx_descriptor desc; 161 struct dma_async_tx_descriptor desc;
152 enum dma_status status; 162 enum dma_status status;
153 int dma_request; 163 int dma_request;
154 struct scatterlist *sg_list; 164 struct scatterlist *sg_list;
155 u32 ccr_from_device; 165 u32 ccr_from_device;
156 u32 ccr_to_device; 166 u32 ccr_to_device;
167 bool enabled_2d;
168 int slot_2d;
157}; 169};
158 170
159struct imxdma_engine { 171struct imxdma_engine {
@@ -162,6 +174,8 @@ struct imxdma_engine {
162 struct dma_device dma_device; 174 struct dma_device dma_device;
163 void __iomem *base; 175 void __iomem *base;
164 struct clk *dma_clk; 176 struct clk *dma_clk;
177 spinlock_t lock;
178 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
165 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 179 struct imxdma_channel channel[IMX_DMA_CHANNELS];
166}; 180};
167 181
@@ -361,16 +375,16 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
361 int chno = imxdmac->channel; 375 int chno = imxdmac->channel;
362 struct imxdma_desc *desc; 376 struct imxdma_desc *desc;
363 377
364 spin_lock(&imxdmac->lock); 378 spin_lock(&imxdma->lock);
365 if (list_empty(&imxdmac->ld_active)) { 379 if (list_empty(&imxdmac->ld_active)) {
366 spin_unlock(&imxdmac->lock); 380 spin_unlock(&imxdma->lock);
367 goto out; 381 goto out;
368 } 382 }
369 383
370 desc = list_first_entry(&imxdmac->ld_active, 384 desc = list_first_entry(&imxdmac->ld_active,
371 struct imxdma_desc, 385 struct imxdma_desc,
372 node); 386 node);
373 spin_unlock(&imxdmac->lock); 387 spin_unlock(&imxdma->lock);
374 388
375 if (desc->sg) { 389 if (desc->sg) {
376 u32 tmp; 390 u32 tmp;
@@ -442,9 +456,53 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
442{ 456{
443 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 457 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
444 struct imxdma_engine *imxdma = imxdmac->imxdma; 458 struct imxdma_engine *imxdma = imxdmac->imxdma;
459 unsigned long flags;
460 int slot = -1;
461 int i;
445 462
446 /* Configure and enable */ 463 /* Configure and enable */
447 switch (d->type) { 464 switch (d->type) {
465 case IMXDMA_DESC_INTERLEAVED:
466 /* Try to get a free 2D slot */
467 spin_lock_irqsave(&imxdma->lock, flags);
468 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
469 if ((imxdma->slots_2d[i].count > 0) &&
470 ((imxdma->slots_2d[i].xsr != d->x) ||
471 (imxdma->slots_2d[i].ysr != d->y) ||
472 (imxdma->slots_2d[i].wsr != d->w)))
473 continue;
474 slot = i;
475 break;
476 }
477 if (slot < 0)
478 return -EBUSY;
479
480 imxdma->slots_2d[slot].xsr = d->x;
481 imxdma->slots_2d[slot].ysr = d->y;
482 imxdma->slots_2d[slot].wsr = d->w;
483 imxdma->slots_2d[slot].count++;
484
485 imxdmac->slot_2d = slot;
486 imxdmac->enabled_2d = true;
487 spin_unlock_irqrestore(&imxdma->lock, flags);
488
489 if (slot == IMX_DMA_2D_SLOT_A) {
490 d->config_mem &= ~CCR_MSEL_B;
491 d->config_port &= ~CCR_MSEL_B;
492 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
493 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
494 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
495 } else {
496 d->config_mem |= CCR_MSEL_B;
497 d->config_port |= CCR_MSEL_B;
498 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
499 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
500 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
501 }
502 /*
503 * We fall-through here intentionally, since a 2D transfer is
504 * similar to MEMCPY just adding the 2D slot configuration.
505 */
448 case IMXDMA_DESC_MEMCPY: 506 case IMXDMA_DESC_MEMCPY:
449 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); 507 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
450 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); 508 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
@@ -503,7 +561,7 @@ static void imxdma_tasklet(unsigned long data)
503 struct imxdma_engine *imxdma = imxdmac->imxdma; 561 struct imxdma_engine *imxdma = imxdmac->imxdma;
504 struct imxdma_desc *desc; 562 struct imxdma_desc *desc;
505 563
506 spin_lock(&imxdmac->lock); 564 spin_lock(&imxdma->lock);
507 565
508 if (list_empty(&imxdmac->ld_active)) { 566 if (list_empty(&imxdmac->ld_active)) {
509 /* Someone might have called terminate all */ 567 /* Someone might have called terminate all */
@@ -520,6 +578,12 @@ static void imxdma_tasklet(unsigned long data)
520 if (imxdma_chan_is_doing_cyclic(imxdmac)) 578 if (imxdma_chan_is_doing_cyclic(imxdmac))
521 goto out; 579 goto out;
522 580
581 /* Free 2D slot if it was an interleaved transfer */
582 if (imxdmac->enabled_2d) {
583 imxdma->slots_2d[imxdmac->slot_2d].count--;
584 imxdmac->enabled_2d = false;
585 }
586
523 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 587 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
524 588
525 if (!list_empty(&imxdmac->ld_queue)) { 589 if (!list_empty(&imxdmac->ld_queue)) {
@@ -531,7 +595,7 @@ static void imxdma_tasklet(unsigned long data)
531 __func__, imxdmac->channel); 595 __func__, imxdmac->channel);
532 } 596 }
533out: 597out:
534 spin_unlock(&imxdmac->lock); 598 spin_unlock(&imxdma->lock);
535} 599}
536 600
537static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 601static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -547,10 +611,10 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
547 case DMA_TERMINATE_ALL: 611 case DMA_TERMINATE_ALL:
548 imxdma_disable_hw(imxdmac); 612 imxdma_disable_hw(imxdmac);
549 613
550 spin_lock_irqsave(&imxdmac->lock, flags); 614 spin_lock_irqsave(&imxdma->lock, flags);
551 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 615 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
552 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 616 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
553 spin_unlock_irqrestore(&imxdmac->lock, flags); 617 spin_unlock_irqrestore(&imxdma->lock, flags);
554 return 0; 618 return 0;
555 case DMA_SLAVE_CONFIG: 619 case DMA_SLAVE_CONFIG:
556 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 620 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
@@ -610,12 +674,13 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan,
610static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 674static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
611{ 675{
612 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 676 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
677 struct imxdma_engine *imxdma = imxdmac->imxdma;
613 dma_cookie_t cookie; 678 dma_cookie_t cookie;
614 unsigned long flags; 679 unsigned long flags;
615 680
616 spin_lock_irqsave(&imxdmac->lock, flags); 681 spin_lock_irqsave(&imxdma->lock, flags);
617 cookie = dma_cookie_assign(tx); 682 cookie = dma_cookie_assign(tx);
618 spin_unlock_irqrestore(&imxdmac->lock, flags); 683 spin_unlock_irqrestore(&imxdma->lock, flags);
619 684
620 return cookie; 685 return cookie;
621} 686}
@@ -654,16 +719,17 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
654static void imxdma_free_chan_resources(struct dma_chan *chan) 719static void imxdma_free_chan_resources(struct dma_chan *chan)
655{ 720{
656 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 721 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
722 struct imxdma_engine *imxdma = imxdmac->imxdma;
657 struct imxdma_desc *desc, *_desc; 723 struct imxdma_desc *desc, *_desc;
658 unsigned long flags; 724 unsigned long flags;
659 725
660 spin_lock_irqsave(&imxdmac->lock, flags); 726 spin_lock_irqsave(&imxdma->lock, flags);
661 727
662 imxdma_disable_hw(imxdmac); 728 imxdma_disable_hw(imxdmac);
663 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 729 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
664 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 730 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
665 731
666 spin_unlock_irqrestore(&imxdmac->lock, flags); 732 spin_unlock_irqrestore(&imxdma->lock, flags);
667 733
668 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { 734 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
669 kfree(desc); 735 kfree(desc);
@@ -818,6 +884,49 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
818 return &desc->desc; 884 return &desc->desc;
819} 885}
820 886
887static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
888 struct dma_chan *chan, struct dma_interleaved_template *xt,
889 unsigned long flags)
890{
891 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
892 struct imxdma_engine *imxdma = imxdmac->imxdma;
893 struct imxdma_desc *desc;
894
895 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
896 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
897 imxdmac->channel, xt->src_start, xt->dst_start,
898 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
899 xt->numf, xt->frame_size);
900
901 if (list_empty(&imxdmac->ld_free) ||
902 imxdma_chan_is_doing_cyclic(imxdmac))
903 return NULL;
904
905 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
906 return NULL;
907
908 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
909
910 desc->type = IMXDMA_DESC_INTERLEAVED;
911 desc->src = xt->src_start;
912 desc->dest = xt->dst_start;
913 desc->x = xt->sgl[0].size;
914 desc->y = xt->numf;
915 desc->w = xt->sgl[0].icg + desc->x;
916 desc->len = desc->x * desc->y;
917 desc->direction = DMA_MEM_TO_MEM;
918 desc->config_port = IMX_DMA_MEMSIZE_32;
919 desc->config_mem = IMX_DMA_MEMSIZE_32;
920 if (xt->src_sgl)
921 desc->config_mem |= IMX_DMA_TYPE_2D;
922 if (xt->dst_sgl)
923 desc->config_port |= IMX_DMA_TYPE_2D;
924 desc->desc.callback = NULL;
925 desc->desc.callback_param = NULL;
926
927 return &desc->desc;
928}
929
821static void imxdma_issue_pending(struct dma_chan *chan) 930static void imxdma_issue_pending(struct dma_chan *chan)
822{ 931{
823 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 932 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -825,7 +934,7 @@ static void imxdma_issue_pending(struct dma_chan *chan)
825 struct imxdma_desc *desc; 934 struct imxdma_desc *desc;
826 unsigned long flags; 935 unsigned long flags;
827 936
828 spin_lock_irqsave(&imxdmac->lock, flags); 937 spin_lock_irqsave(&imxdma->lock, flags);
829 if (list_empty(&imxdmac->ld_active) && 938 if (list_empty(&imxdmac->ld_active) &&
830 !list_empty(&imxdmac->ld_queue)) { 939 !list_empty(&imxdmac->ld_queue)) {
831 desc = list_first_entry(&imxdmac->ld_queue, 940 desc = list_first_entry(&imxdmac->ld_queue,
@@ -840,7 +949,7 @@ static void imxdma_issue_pending(struct dma_chan *chan)
840 &imxdmac->ld_active); 949 &imxdmac->ld_active);
841 } 950 }
842 } 951 }
843 spin_unlock_irqrestore(&imxdmac->lock, flags); 952 spin_unlock_irqrestore(&imxdma->lock, flags);
844} 953}
845 954
846static int __init imxdma_probe(struct platform_device *pdev) 955static int __init imxdma_probe(struct platform_device *pdev)
@@ -903,6 +1012,13 @@ static int __init imxdma_probe(struct platform_device *pdev)
903 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 1012 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
904 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 1013 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
905 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); 1014 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1015 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1016
1017 /* Initialize 2D global parameters */
1018 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1019 imxdma->slots_2d[i].count = 0;
1020
1021 spin_lock_init(&imxdma->lock);
906 1022
907 /* Initialize channel parameters */ 1023 /* Initialize channel parameters */
908 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1024 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
@@ -923,7 +1039,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
923 } 1039 }
924 1040
925 imxdmac->imxdma = imxdma; 1041 imxdmac->imxdma = imxdma;
926 spin_lock_init(&imxdmac->lock);
927 1042
928 INIT_LIST_HEAD(&imxdmac->ld_queue); 1043 INIT_LIST_HEAD(&imxdmac->ld_queue);
929 INIT_LIST_HEAD(&imxdmac->ld_free); 1044 INIT_LIST_HEAD(&imxdmac->ld_free);
@@ -949,6 +1064,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
949 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 1064 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
950 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1065 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
951 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1066 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1067 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
952 imxdma->dma_device.device_control = imxdma_control; 1068 imxdma->dma_device.device_control = imxdma_control;
953 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1069 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
954 1070