aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-20 12:19:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-20 12:19:56 -0500
commitda6b7366dbfc93f59873e252b8d53f17fb47a802 (patch)
tree65fcc30045b84c6eb6c13a5522fc8224a5165c76
parent37aa4dac99418efa17329d98c2a0ed372ed9aa9e (diff)
parentee1cdcdae59563535485a5f56ee72c894ab7d7ad (diff)
Merge tag 'dmaengine-fix-4.5-rc5' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: "A few fixes for drivers, nothing major here. Fixes are: iotdma fix to restart channels, new ID for wildcat PCH, residue fix for edma, disable irq for non-cyclic in dw" * tag 'dmaengine-fix-4.5-rc5' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: dw: disable BLOCK IRQs for non-cyclic xfer dmaengine: edma: fix residue race for cyclic dmaengine: dw: pci: add ID for WildcatPoint PCH dmaengine: IOATDMA: fix timer code that continues to restart channels during idle
-rw-r--r--drivers/dma/dw/core.c15
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/edma.c41
-rw-r--r--drivers/dma/ioat/dma.c34
4 files changed, 76 insertions, 18 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 159 channel_set_bit(dw, MASK.ERROR, dwc->mask);
161 160
162 dwc->initialized = true; 161 dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588 587
589 spin_unlock_irqrestore(&dwc->lock, flags); 588 spin_unlock_irqrestore(&dwc->lock, flags);
590 } 589 }
590
591 /* Re-enable interrupts */
592 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591} 593}
592 594
593/* ------------------------------------------------------------------------- */ 595/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
619 } 621 }
620 622
621 /* 623 /* Re-enable interrupts */
622 * Re-enable interrupts.
623 */
624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627} 626}
628 627
@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1261int dw_dma_cyclic_start(struct dma_chan *chan) 1260int dw_dma_cyclic_start(struct dma_chan *chan)
1262{ 1261{
1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1262 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263 struct dw_dma *dw = to_dw_dma(chan->device);
1264 unsigned long flags; 1264 unsigned long flags;
1265 1265
1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1269 } 1269 }
1270 1270
1271 spin_lock_irqsave(&dwc->lock, flags); 1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 /* Enable interrupts to perform cyclic transfer */
1274 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275
1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277
1273 spin_unlock_irqrestore(&dwc->lock, flags); 1278 spin_unlock_irqrestore(&dwc->lock, flags);
1274 1279
1275 return 0; 1280 return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108 108
109 /* Haswell */ 109 /* Haswell */
110 { PCI_VDEVICE(INTEL, 0x9c60) }, 110 { PCI_VDEVICE(INTEL, 0x9c60) },
111
112 /* Broadwell */
113 { PCI_VDEVICE(INTEL, 0x9ce0) },
114
111 { } 115 { }
112}; 116};
113MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 117MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d65549406..e3d7fcb69b4c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24) 114#define CHMAP_EXIST BIT(24)
115 115
116/* CCSTAT register */
117#define EDMA_CCSTAT_ACTV BIT(4)
118
116/* 119/*
117 * Max of 20 segments per channel to conserve PaRAM slots 120 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods 121 * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1681} 1684}
1682 1685
1686/*
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1692 */
1693#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694
1683static u32 edma_residue(struct edma_desc *edesc) 1695static u32 edma_residue(struct edma_desc *edesc)
1684{ 1696{
1685 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1686 struct edma_pset *pset = edesc->pset; 1700 struct edma_pset *pset = edesc->pset;
1687 dma_addr_t done, pos; 1701 dma_addr_t done, pos;
1688 int i; 1702 int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
1691 * We always read the dst/src position from the first RamPar 1705 * We always read the dst/src position from the first RamPar
1692 * pset. That's the one which is active now. 1706 * pset. That's the one which is active now.
1693 */ 1707 */
1694 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709
1710 /*
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1717 */
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1722 break;
1723 }
1724
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1728 __func__);
1729 break;
1730 }
1731
1732 cpu_relax();
1733 }
1695 1734
1696 /* 1735 /*
1697 * Cyclic is simple. Just subtract pset[0].addr from pos. 1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..21539d5c54c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861 return; 861 return;
862 } 862 }
863 863
864 spin_lock_bh(&ioat_chan->cleanup_lock);
865
866 /* handle the no-actives case */
867 if (!ioat_ring_active(ioat_chan)) {
868 spin_lock_bh(&ioat_chan->prep_lock);
869 check_active(ioat_chan);
870 spin_unlock_bh(&ioat_chan->prep_lock);
871 spin_unlock_bh(&ioat_chan->cleanup_lock);
872 return;
873 }
874
864 /* if we haven't made progress and we have already 875 /* if we haven't made progress and we have already
865 * acknowledged a pending completion once, then be more 876 * acknowledged a pending completion once, then be more
866 * forceful with a restart 877 * forceful with a restart
867 */ 878 */
868 spin_lock_bh(&ioat_chan->cleanup_lock);
869 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870 __cleanup(ioat_chan, phys_complete); 880 __cleanup(ioat_chan, phys_complete);
871 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882 u32 chanerr;
883
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887 status, chanerr);
888 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889 ioat_ring_active(ioat_chan));
890
872 spin_lock_bh(&ioat_chan->prep_lock); 891 spin_lock_bh(&ioat_chan->prep_lock);
873 ioat_restart_channel(ioat_chan); 892 ioat_restart_channel(ioat_chan);
874 spin_unlock_bh(&ioat_chan->prep_lock); 893 spin_unlock_bh(&ioat_chan->prep_lock);
875 spin_unlock_bh(&ioat_chan->cleanup_lock); 894 spin_unlock_bh(&ioat_chan->cleanup_lock);
876 return; 895 return;
877 } else { 896 } else
878 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880 }
881
882 898
883 if (ioat_ring_active(ioat_chan)) 899 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 else {
886 spin_lock_bh(&ioat_chan->prep_lock);
887 check_active(ioat_chan);
888 spin_unlock_bh(&ioat_chan->prep_lock);
889 }
890 spin_unlock_bh(&ioat_chan->cleanup_lock); 900 spin_unlock_bh(&ioat_chan->cleanup_lock);
891} 901}
892 902