aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-02-10 13:39:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-02-10 13:39:37 -0500
commit68d94a8424352d723e4991de4745ce6795b23069 (patch)
tree04189cb9e9db3fb3d6cea0dc398a5fc0f94aeef5
parentaadaa8061189a9e5d8a1327b328453d663e8cbc9 (diff)
parent6454368a804c4955ccd116236037536f81e5b1f1 (diff)
Merge tag 'dmaengine-fix-5.0-rc6' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: - Fix in at_xdmac fr wrongful channel state - Fix for imx driver for wrong callback invocation - Fix to bcm driver for interrupt race & transaction abort. - Fix in dmatest to abort in mapping error * tag 'dmaengine-fix-5.0-rc6' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: dmatest: Abort test in case of mapping error dmaengine: bcm2835: Fix abort of transactions dmaengine: bcm2835: Fix interrupt race on RT dmaengine: imx-dma: fix wrong callback invoke dmaengine: at_xdmac: Fix wrongfull report of a channel as in use
-rw-r--r--drivers/dma/at_xdmac.c19
-rw-r--r--drivers/dma/bcm2835-dma.c70
-rw-r--r--drivers/dma/dmatest.c32
-rw-r--r--drivers/dma/imx-dma.c8
4 files changed, 53 insertions, 76 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4e557684f792..fe69dccfa0c0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@ struct at_xdmac_chan {
203 u32 save_cim; 203 u32 save_cim;
204 u32 save_cnda; 204 u32 save_cnda;
205 u32 save_cndc; 205 u32 save_cndc;
206 u32 irq_status;
206 unsigned long status; 207 unsigned long status;
207 struct tasklet_struct tasklet; 208 struct tasklet_struct tasklet;
208 struct dma_slave_config sconfig; 209 struct dma_slave_config sconfig;
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
1580 struct at_xdmac_desc *desc; 1581 struct at_xdmac_desc *desc;
1581 u32 error_mask; 1582 u32 error_mask;
1582 1583
1583 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", 1584 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1584 __func__, atchan->status); 1585 __func__, atchan->irq_status);
1585 1586
1586 error_mask = AT_XDMAC_CIS_RBEIS 1587 error_mask = AT_XDMAC_CIS_RBEIS
1587 | AT_XDMAC_CIS_WBEIS 1588 | AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
1589 1590
1590 if (at_xdmac_chan_is_cyclic(atchan)) { 1591 if (at_xdmac_chan_is_cyclic(atchan)) {
1591 at_xdmac_handle_cyclic(atchan); 1592 at_xdmac_handle_cyclic(atchan);
1592 } else if ((atchan->status & AT_XDMAC_CIS_LIS) 1593 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1593 || (atchan->status & error_mask)) { 1594 || (atchan->irq_status & error_mask)) {
1594 struct dma_async_tx_descriptor *txd; 1595 struct dma_async_tx_descriptor *txd;
1595 1596
1596 if (atchan->status & AT_XDMAC_CIS_RBEIS) 1597 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1597 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); 1598 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1598 if (atchan->status & AT_XDMAC_CIS_WBEIS) 1599 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1599 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); 1600 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1600 if (atchan->status & AT_XDMAC_CIS_ROIS) 1601 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1601 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); 1602 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1602 1603
1603 spin_lock(&atchan->lock); 1604 spin_lock(&atchan->lock);
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1652 atchan = &atxdmac->chan[i]; 1653 atchan = &atxdmac->chan[i];
1653 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1654 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1654 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); 1655 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1655 atchan->status = chan_status & chan_imr; 1656 atchan->irq_status = chan_status & chan_imr;
1656 dev_vdbg(atxdmac->dma.dev, 1657 dev_vdbg(atxdmac->dma.dev,
1657 "%s: chan%d: imr=0x%x, status=0x%x\n", 1658 "%s: chan%d: imr=0x%x, status=0x%x\n",
1658 __func__, i, chan_imr, chan_status); 1659 __func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1666 at_xdmac_chan_read(atchan, AT_XDMAC_CDA), 1667 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1667 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); 1668 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1668 1669
1669 if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1670 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1670 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1671 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1671 1672
1672 tasklet_schedule(&atchan->tasklet); 1673 tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 1a44c8086d77..ae10f5614f95 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
406 } 406 }
407} 407}
408 408
409static int bcm2835_dma_abort(void __iomem *chan_base) 409static int bcm2835_dma_abort(struct bcm2835_chan *c)
410{ 410{
411 unsigned long cs; 411 void __iomem *chan_base = c->chan_base;
412 long int timeout = 10000; 412 long int timeout = 10000;
413 413
414 cs = readl(chan_base + BCM2835_DMA_CS); 414 /*
415 if (!(cs & BCM2835_DMA_ACTIVE)) 415 * A zero control block address means the channel is idle.
416 * (The ACTIVE flag in the CS register is not a reliable indicator.)
417 */
418 if (!readl(chan_base + BCM2835_DMA_ADDR))
416 return 0; 419 return 0;
417 420
418 /* Write 0 to the active bit - Pause the DMA */ 421 /* Write 0 to the active bit - Pause the DMA */
419 writel(0, chan_base + BCM2835_DMA_CS); 422 writel(0, chan_base + BCM2835_DMA_CS);
420 423
421 /* Wait for any current AXI transfer to complete */ 424 /* Wait for any current AXI transfer to complete */
422 while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 425 while ((readl(chan_base + BCM2835_DMA_CS) &
426 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
423 cpu_relax(); 427 cpu_relax();
424 cs = readl(chan_base + BCM2835_DMA_CS);
425 }
426 428
427 /* We'll un-pause when we set of our next DMA */ 429 /* Peripheral might be stuck and fail to signal AXI write responses */
428 if (!timeout) 430 if (!timeout)
429 return -ETIMEDOUT; 431 dev_err(c->vc.chan.device->dev,
430 432 "failed to complete outstanding writes\n");
431 if (!(cs & BCM2835_DMA_ACTIVE))
432 return 0;
433
434 /* Terminate the control block chain */
435 writel(0, chan_base + BCM2835_DMA_NEXTCB);
436
437 /* Abort the whole DMA */
438 writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
439 chan_base + BCM2835_DMA_CS);
440 433
434 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
441 return 0; 435 return 0;
442} 436}
443 437
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
476 470
477 spin_lock_irqsave(&c->vc.lock, flags); 471 spin_lock_irqsave(&c->vc.lock, flags);
478 472
479 /* Acknowledge interrupt */ 473 /*
480 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 474 * Clear the INT flag to receive further interrupts. Keep the channel
475 * active in case the descriptor is cyclic or in case the client has
476 * already terminated the descriptor and issued a new one. (May happen
477 * if this IRQ handler is threaded.) If the channel is finished, it
478 * will remain idle despite the ACTIVE flag being set.
479 */
480 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
481 c->chan_base + BCM2835_DMA_CS);
481 482
482 d = c->desc; 483 d = c->desc;
483 484
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
485 if (d->cyclic) { 486 if (d->cyclic) {
486 /* call the cyclic callback */ 487 /* call the cyclic callback */
487 vchan_cyclic_callback(&d->vd); 488 vchan_cyclic_callback(&d->vd);
488 489 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
489 /* Keep the DMA engine running */
490 writel(BCM2835_DMA_ACTIVE,
491 c->chan_base + BCM2835_DMA_CS);
492 } else {
493 vchan_cookie_complete(&c->desc->vd); 490 vchan_cookie_complete(&c->desc->vd);
494 bcm2835_dma_start_desc(c); 491 bcm2835_dma_start_desc(c);
495 } 492 }
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
779 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 776 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
780 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 777 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
781 unsigned long flags; 778 unsigned long flags;
782 int timeout = 10000;
783 LIST_HEAD(head); 779 LIST_HEAD(head);
784 780
785 spin_lock_irqsave(&c->vc.lock, flags); 781 spin_lock_irqsave(&c->vc.lock, flags);
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
789 list_del_init(&c->node); 785 list_del_init(&c->node);
790 spin_unlock(&d->lock); 786 spin_unlock(&d->lock);
791 787
792 /* 788 /* stop DMA activity */
793 * Stop DMA activity: we assume the callback will not be called
794 * after bcm_dma_abort() returns (even if it does, it will see
795 * c->desc is NULL and exit.)
796 */
797 if (c->desc) { 789 if (c->desc) {
798 vchan_terminate_vdesc(&c->desc->vd); 790 vchan_terminate_vdesc(&c->desc->vd);
799 c->desc = NULL; 791 c->desc = NULL;
800 bcm2835_dma_abort(c->chan_base); 792 bcm2835_dma_abort(c);
801
802 /* Wait for stopping */
803 while (--timeout) {
804 if (!(readl(c->chan_base + BCM2835_DMA_CS) &
805 BCM2835_DMA_ACTIVE))
806 break;
807
808 cpu_relax();
809 }
810
811 if (!timeout)
812 dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
813 } 793 }
814 794
815 vchan_get_all_descriptors(&c->vc, &head); 795 vchan_get_all_descriptors(&c->vc, &head);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2eea4ef72915..6511928b4cdf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -711,11 +711,9 @@ static int dmatest_func(void *data)
711 srcs[i] = um->addr[i] + src_off; 711 srcs[i] = um->addr[i] + src_off;
712 ret = dma_mapping_error(dev->dev, um->addr[i]); 712 ret = dma_mapping_error(dev->dev, um->addr[i]);
713 if (ret) { 713 if (ret) {
714 dmaengine_unmap_put(um);
715 result("src mapping error", total_tests, 714 result("src mapping error", total_tests,
716 src_off, dst_off, len, ret); 715 src_off, dst_off, len, ret);
717 failed_tests++; 716 goto error_unmap_continue;
718 continue;
719 } 717 }
720 um->to_cnt++; 718 um->to_cnt++;
721 } 719 }
@@ -730,11 +728,9 @@ static int dmatest_func(void *data)
730 DMA_BIDIRECTIONAL); 728 DMA_BIDIRECTIONAL);
731 ret = dma_mapping_error(dev->dev, dsts[i]); 729 ret = dma_mapping_error(dev->dev, dsts[i]);
732 if (ret) { 730 if (ret) {
733 dmaengine_unmap_put(um);
734 result("dst mapping error", total_tests, 731 result("dst mapping error", total_tests,
735 src_off, dst_off, len, ret); 732 src_off, dst_off, len, ret);
736 failed_tests++; 733 goto error_unmap_continue;
737 continue;
738 } 734 }
739 um->bidi_cnt++; 735 um->bidi_cnt++;
740 } 736 }
@@ -762,12 +758,10 @@ static int dmatest_func(void *data)
762 } 758 }
763 759
764 if (!tx) { 760 if (!tx) {
765 dmaengine_unmap_put(um);
766 result("prep error", total_tests, src_off, 761 result("prep error", total_tests, src_off,
767 dst_off, len, ret); 762 dst_off, len, ret);
768 msleep(100); 763 msleep(100);
769 failed_tests++; 764 goto error_unmap_continue;
770 continue;
771 } 765 }
772 766
773 done->done = false; 767 done->done = false;
@@ -776,12 +770,10 @@ static int dmatest_func(void *data)
776 cookie = tx->tx_submit(tx); 770 cookie = tx->tx_submit(tx);
777 771
778 if (dma_submit_error(cookie)) { 772 if (dma_submit_error(cookie)) {
779 dmaengine_unmap_put(um);
780 result("submit error", total_tests, src_off, 773 result("submit error", total_tests, src_off,
781 dst_off, len, ret); 774 dst_off, len, ret);
782 msleep(100); 775 msleep(100);
783 failed_tests++; 776 goto error_unmap_continue;
784 continue;
785 } 777 }
786 dma_async_issue_pending(chan); 778 dma_async_issue_pending(chan);
787 779
@@ -790,22 +782,20 @@ static int dmatest_func(void *data)
790 782
791 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 783 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
792 784
793 dmaengine_unmap_put(um);
794
795 if (!done->done) { 785 if (!done->done) {
796 result("test timed out", total_tests, src_off, dst_off, 786 result("test timed out", total_tests, src_off, dst_off,
797 len, 0); 787 len, 0);
798 failed_tests++; 788 goto error_unmap_continue;
799 continue;
800 } else if (status != DMA_COMPLETE) { 789 } else if (status != DMA_COMPLETE) {
801 result(status == DMA_ERROR ? 790 result(status == DMA_ERROR ?
802 "completion error status" : 791 "completion error status" :
803 "completion busy status", total_tests, src_off, 792 "completion busy status", total_tests, src_off,
804 dst_off, len, ret); 793 dst_off, len, ret);
805 failed_tests++; 794 goto error_unmap_continue;
806 continue;
807 } 795 }
808 796
797 dmaengine_unmap_put(um);
798
809 if (params->noverify) { 799 if (params->noverify) {
810 verbose_result("test passed", total_tests, src_off, 800 verbose_result("test passed", total_tests, src_off,
811 dst_off, len, 0); 801 dst_off, len, 0);
@@ -846,6 +836,12 @@ static int dmatest_func(void *data)
846 verbose_result("test passed", total_tests, src_off, 836 verbose_result("test passed", total_tests, src_off,
847 dst_off, len, 0); 837 dst_off, len, 0);
848 } 838 }
839
840 continue;
841
842error_unmap_continue:
843 dmaengine_unmap_put(um);
844 failed_tests++;
849 } 845 }
850 ktime = ktime_sub(ktime_get(), ktime); 846 ktime = ktime_sub(ktime_get(), ktime);
851 ktime = ktime_sub(ktime, comparetime); 847 ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index c2fff3f6c9ca..4a09af3cd546 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
618{ 618{
619 struct imxdma_channel *imxdmac = (void *)data; 619 struct imxdma_channel *imxdmac = (void *)data;
620 struct imxdma_engine *imxdma = imxdmac->imxdma; 620 struct imxdma_engine *imxdma = imxdmac->imxdma;
621 struct imxdma_desc *desc; 621 struct imxdma_desc *desc, *next_desc;
622 unsigned long flags; 622 unsigned long flags;
623 623
624 spin_lock_irqsave(&imxdma->lock, flags); 624 spin_lock_irqsave(&imxdma->lock, flags);
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
649 649
650 if (!list_empty(&imxdmac->ld_queue)) { 650 if (!list_empty(&imxdmac->ld_queue)) {
651 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, 651 next_desc = list_first_entry(&imxdmac->ld_queue,
652 node); 652 struct imxdma_desc, node);
653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
654 if (imxdma_xfer_desc(desc) < 0) 654 if (imxdma_xfer_desc(next_desc) < 0)
655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
656 __func__, imxdmac->channel); 656 __func__, imxdmac->channel);
657 } 657 }