aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/amba-pl08x.c52
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/at_hdmac_regs.h21
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dw_dmac.c26
-rw-r--r--drivers/dma/ep93xx_dma.c121
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c68
-rw-r--r--drivers/dma/intel_mid_dma.c8
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/ste_dma40.c2
16 files changed, 260 insertions, 103 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704abd791..49ecbbb8932 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver;
95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
96 * @channels: the number of channels available in this variant 96 * @channels: the number of channels available in this variant
97 * @dualmaster: whether this version supports dual AHB masters or not. 97 * @dualmaster: whether this version supports dual AHB masters or not.
98 * @nomadik: whether the channels have Nomadik security extension bits
99 * that need to be checked for permission before use and some registers are
100 * missing
98 */ 101 */
99struct vendor_data { 102struct vendor_data {
100 u8 channels; 103 u8 channels;
101 bool dualmaster; 104 bool dualmaster;
105 bool nomadik;
102}; 106};
103 107
104/* 108/*
@@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
385 389
386 spin_lock_irqsave(&ch->lock, flags); 390 spin_lock_irqsave(&ch->lock, flags);
387 391
388 if (!ch->serving) { 392 if (!ch->locked && !ch->serving) {
389 ch->serving = virt_chan; 393 ch->serving = virt_chan;
390 ch->signal = -1; 394 ch->signal = -1;
391 spin_unlock_irqrestore(&ch->lock, flags); 395 spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324 int ret, tmp; 1328 int ret, tmp;
1325 1329
1326 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1327 __func__, sgl->length, plchan->name); 1331 __func__, sg_dma_len(sgl), plchan->name);
1328 1332
1329 txd = pl08x_get_txd(plchan, flags); 1333 txd = pl08x_get_txd(plchan, flags);
1330 if (!txd) { 1334 if (!txd) {
@@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1378 1382
1379 dsg->len = sg_dma_len(sg); 1383 dsg->len = sg_dma_len(sg);
1380 if (direction == DMA_MEM_TO_DEV) { 1384 if (direction == DMA_MEM_TO_DEV) {
1381 dsg->src_addr = sg_phys(sg); 1385 dsg->src_addr = sg_dma_address(sg);
1382 dsg->dst_addr = slave_addr; 1386 dsg->dst_addr = slave_addr;
1383 } else { 1387 } else {
1384 dsg->src_addr = slave_addr; 1388 dsg->src_addr = slave_addr;
1385 dsg->dst_addr = sg_phys(sg); 1389 dsg->dst_addr = sg_dma_address(sg);
1386 } 1390 }
1387 } 1391 }
1388 1392
@@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1484 */ 1488 */
1485static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1489static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1486{ 1490{
1491 /* The Nomadik variant does not have the config register */
1492 if (pl08x->vd->nomadik)
1493 return;
1487 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1494 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1488} 1495}
1489 1496
@@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1616 __func__, err); 1623 __func__, err);
1617 writel(err, pl08x->base + PL080_ERR_CLEAR); 1624 writel(err, pl08x->base + PL080_ERR_CLEAR);
1618 } 1625 }
1619 tc = readl(pl08x->base + PL080_INT_STATUS); 1626 tc = readl(pl08x->base + PL080_TC_STATUS);
1620 if (tc) 1627 if (tc)
1621 writel(tc, pl08x->base + PL080_TC_CLEAR); 1628 writel(tc, pl08x->base + PL080_TC_CLEAR);
1622 1629
@@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1773 spin_lock_irqsave(&ch->lock, flags); 1780 spin_lock_irqsave(&ch->lock, flags);
1774 virt_chan = ch->serving; 1781 virt_chan = ch->serving;
1775 1782
1776 seq_printf(s, "%d\t\t%s\n", 1783 seq_printf(s, "%d\t\t%s%s\n",
1777 ch->id, virt_chan ? virt_chan->name : "(none)"); 1784 ch->id,
1785 virt_chan ? virt_chan->name : "(none)",
1786 ch->locked ? " LOCKED" : "");
1778 1787
1779 spin_unlock_irqrestore(&ch->lock, flags); 1788 spin_unlock_irqrestore(&ch->lock, flags);
1780 } 1789 }
@@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1918 } 1927 }
1919 1928
1920 /* Initialize physical channels */ 1929 /* Initialize physical channels */
1921 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1930 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1922 GFP_KERNEL); 1931 GFP_KERNEL);
1923 if (!pl08x->phy_chans) { 1932 if (!pl08x->phy_chans) {
1924 dev_err(&adev->dev, "%s failed to allocate " 1933 dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1933 ch->id = i; 1942 ch->id = i;
1934 ch->base = pl08x->base + PL080_Cx_BASE(i); 1943 ch->base = pl08x->base + PL080_Cx_BASE(i);
1935 spin_lock_init(&ch->lock); 1944 spin_lock_init(&ch->lock);
1936 ch->serving = NULL;
1937 ch->signal = -1; 1945 ch->signal = -1;
1946
1947 /*
1948 * Nomadik variants can have channels that are locked
1949 * down for the secure world only. Lock up these channels
1950 * by perpetually serving a dummy virtual channel.
1951 */
1952 if (vd->nomadik) {
1953 u32 val;
1954
1955 val = readl(ch->base + PL080_CH_CONFIG);
1956 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
1957 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
1958 ch->locked = true;
1959 }
1960 }
1961
1938 dev_dbg(&adev->dev, "physical channel %d is %s\n", 1962 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1939 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1963 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1940 } 1964 }
@@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = {
2017 .dualmaster = true, 2041 .dualmaster = true,
2018}; 2042};
2019 2043
2044static struct vendor_data vendor_nomadik = {
2045 .channels = 8,
2046 .dualmaster = true,
2047 .nomadik = true,
2048};
2049
2020static struct vendor_data vendor_pl081 = { 2050static struct vendor_data vendor_pl081 = {
2021 .channels = 2, 2051 .channels = 2,
2022 .dualmaster = false, 2052 .dualmaster = false,
@@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = {
2037 }, 2067 },
2038 /* Nomadik 8815 PL080 variant */ 2068 /* Nomadik 8815 PL080 variant */
2039 { 2069 {
2040 .id = 0x00280880, 2070 .id = 0x00280080,
2041 .mask = 0x00ffffff, 2071 .mask = 0x00ffffff,
2042 .data = &vendor_pl080, 2072 .data = &vendor_nomadik,
2043 }, 2073 },
2044 { 0, 0 }, 2074 { 0, 0 },
2045}; 2075};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 445fdf81169..7292aa87b2d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -39,7 +39,6 @@
39 */ 39 */
40 40
41#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 41#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42#define ATC_DEFAULT_CTRLA (0)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 42#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF)) 43 |ATC_DIF(AT_DMA_MEM_IF))
45 44
@@ -245,7 +244,9 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
245 dev_vdbg(chan2dev(&atchan->chan_common), 244 dev_vdbg(chan2dev(&atchan->chan_common),
246 "descriptor %u complete\n", txd->cookie); 245 "descriptor %u complete\n", txd->cookie);
247 246
248 dma_cookie_complete(txd); 247 /* mark the descriptor as complete for non cyclic cases only */
248 if (!atc_chan_is_cyclic(atchan))
249 dma_cookie_complete(txd);
249 250
250 /* move children to free_list */ 251 /* move children to free_list */
251 list_splice_init(&desc->tx_list, &atchan->free_list); 252 list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -572,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
572 return NULL; 573 return NULL;
573 } 574 }
574 575
575 ctrla = ATC_DEFAULT_CTRLA;
576 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 576 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
577 | ATC_SRC_ADDR_MODE_INCR 577 | ATC_SRC_ADDR_MODE_INCR
578 | ATC_DST_ADDR_MODE_INCR 578 | ATC_DST_ADDR_MODE_INCR
@@ -583,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
583 * of the most common optimization. 583 * of the most common optimization.
584 */ 584 */
585 if (!((src | dest | len) & 3)) { 585 if (!((src | dest | len) & 3)) {
586 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 586 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
587 src_width = dst_width = 2; 587 src_width = dst_width = 2;
588 } else if (!((src | dest | len) & 1)) { 588 } else if (!((src | dest | len) & 1)) {
589 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 589 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
590 src_width = dst_width = 1; 590 src_width = dst_width = 1;
591 } else { 591 } else {
592 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 592 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
593 src_width = dst_width = 0; 593 src_width = dst_width = 0;
594 } 594 }
595 595
@@ -666,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
666 return NULL; 666 return NULL;
667 } 667 }
668 668
669 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 669 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
670 | ATC_DCSIZE(sconfig->dst_maxburst);
670 ctrlb = ATC_IEN; 671 ctrlb = ATC_IEN;
671 672
672 switch (direction) { 673 switch (direction) {
@@ -794,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
794 enum dma_transfer_direction direction) 795 enum dma_transfer_direction direction)
795{ 796{
796 struct at_dma_chan *atchan = to_at_dma_chan(chan); 797 struct at_dma_chan *atchan = to_at_dma_chan(chan);
797 struct at_dma_slave *atslave = chan->private;
798 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 798 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
799 u32 ctrla; 799 u32 ctrla;
800 800
801 /* prepare common CRTLA value */ 801 /* prepare common CRTLA value */
802 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 802 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
803 | ATC_DCSIZE(sconfig->dst_maxburst)
803 | ATC_DST_WIDTH(reg_width) 804 | ATC_DST_WIDTH(reg_width)
804 | ATC_SRC_WIDTH(reg_width) 805 | ATC_SRC_WIDTH(reg_width)
805 | period_len >> reg_width; 806 | period_len >> reg_width;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 897a8bcaec9..8a6c8e8b294 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -87,7 +87,26 @@
87/* Bitfields in CTRLA */ 87/* Bitfields in CTRLA */
88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ 88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ 89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
90/* Chunck Tranfer size definitions are in at_hdmac.h */ 90#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
91#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16))
92#define ATC_SCSIZE_1 (0x0 << 16)
93#define ATC_SCSIZE_4 (0x1 << 16)
94#define ATC_SCSIZE_8 (0x2 << 16)
95#define ATC_SCSIZE_16 (0x3 << 16)
96#define ATC_SCSIZE_32 (0x4 << 16)
97#define ATC_SCSIZE_64 (0x5 << 16)
98#define ATC_SCSIZE_128 (0x6 << 16)
99#define ATC_SCSIZE_256 (0x7 << 16)
100#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
101#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20))
102#define ATC_DCSIZE_1 (0x0 << 20)
103#define ATC_DCSIZE_4 (0x1 << 20)
104#define ATC_DCSIZE_8 (0x2 << 20)
105#define ATC_DCSIZE_16 (0x3 << 20)
106#define ATC_DCSIZE_32 (0x4 << 20)
107#define ATC_DCSIZE_64 (0x5 << 20)
108#define ATC_DCSIZE_128 (0x6 << 20)
109#define ATC_DCSIZE_256 (0x7 << 20)
91#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ 110#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
92#define ATC_SRC_WIDTH(x) ((x) << 24) 111#define ATC_SRC_WIDTH(x) ((x) << 24)
93#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 750925f9638..e67b4e06a91 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1033 1033
1034 if (!sgl) 1034 if (!sgl)
1035 goto out; 1035 goto out;
1036 if (sgl->length == 0) 1036 if (sg_dma_len(sgl) == 0)
1037 goto out; 1037 goto out;
1038 1038
1039 spin_lock_irqsave(&cohc->lock, flg); 1039 spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 6c0e2d4c668..780e0429b38 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
270 270
271 if (dir == DMA_MEM_TO_DEV) 271 if (dir == DMA_MEM_TO_DEV)
272 /* increment source address */ 272 /* increment source address */
273 src = sg_phys(sg); 273 src = sg_dma_address(sg);
274 else 274 else
275 /* increment destination address */ 275 /* increment destination address */
276 dst = sg_phys(sg); 276 dst = sg_dma_address(sg);
277 277
278 bytes_to_transfer = sg_dma_len(sg); 278 bytes_to_transfer = sg_dma_len(sg);
279 279
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 7439079f5ee..e23dc82d43a 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/of.h>
20#include <linux/mm.h> 21#include <linux/mm.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
@@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
742 struct dw_desc *desc; 743 struct dw_desc *desc;
743 u32 len, dlen, mem; 744 u32 len, dlen, mem;
744 745
745 mem = sg_phys(sg); 746 mem = sg_dma_address(sg);
746 len = sg_dma_len(sg); 747 len = sg_dma_len(sg);
747 748
748 if (!((mem | len) & 7)) 749 if (!((mem | len) & 7))
@@ -809,7 +810,7 @@ slave_sg_todev_fill_desc:
809 struct dw_desc *desc; 810 struct dw_desc *desc;
810 u32 len, dlen, mem; 811 u32 len, dlen, mem;
811 812
812 mem = sg_phys(sg); 813 mem = sg_dma_address(sg);
813 len = sg_dma_len(sg); 814 len = sg_dma_len(sg);
814 815
815 if (!((mem | len) & 7)) 816 if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev)
1429 err = PTR_ERR(dw->clk); 1430 err = PTR_ERR(dw->clk);
1430 goto err_clk; 1431 goto err_clk;
1431 } 1432 }
1432 clk_enable(dw->clk); 1433 clk_prepare_enable(dw->clk);
1433 1434
1434 /* force dma off, just in case */ 1435 /* force dma off, just in case */
1435 dw_dma_off(dw); 1436 dw_dma_off(dw);
@@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev)
1510 return 0; 1511 return 0;
1511 1512
1512err_irq: 1513err_irq:
1513 clk_disable(dw->clk); 1514 clk_disable_unprepare(dw->clk);
1514 clk_put(dw->clk); 1515 clk_put(dw->clk);
1515err_clk: 1516err_clk:
1516 iounmap(dw->regs); 1517 iounmap(dw->regs);
@@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev)
1540 channel_clear_bit(dw, CH_EN, dwc->mask); 1541 channel_clear_bit(dw, CH_EN, dwc->mask);
1541 } 1542 }
1542 1543
1543 clk_disable(dw->clk); 1544 clk_disable_unprepare(dw->clk);
1544 clk_put(dw->clk); 1545 clk_put(dw->clk);
1545 1546
1546 iounmap(dw->regs); 1547 iounmap(dw->regs);
@@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev)
1559 struct dw_dma *dw = platform_get_drvdata(pdev); 1560 struct dw_dma *dw = platform_get_drvdata(pdev);
1560 1561
1561 dw_dma_off(platform_get_drvdata(pdev)); 1562 dw_dma_off(platform_get_drvdata(pdev));
1562 clk_disable(dw->clk); 1563 clk_disable_unprepare(dw->clk);
1563} 1564}
1564 1565
1565static int dw_suspend_noirq(struct device *dev) 1566static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev)
1568 struct dw_dma *dw = platform_get_drvdata(pdev); 1569 struct dw_dma *dw = platform_get_drvdata(pdev);
1569 1570
1570 dw_dma_off(platform_get_drvdata(pdev)); 1571 dw_dma_off(platform_get_drvdata(pdev));
1571 clk_disable(dw->clk); 1572 clk_disable_unprepare(dw->clk);
1572 1573
1573 return 0; 1574 return 0;
1574} 1575}
@@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev)
1578 struct platform_device *pdev = to_platform_device(dev); 1579 struct platform_device *pdev = to_platform_device(dev);
1579 struct dw_dma *dw = platform_get_drvdata(pdev); 1580 struct dw_dma *dw = platform_get_drvdata(pdev);
1580 1581
1581 clk_enable(dw->clk); 1582 clk_prepare_enable(dw->clk);
1582 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1583 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1583 return 0; 1584 return 0;
1584} 1585}
@@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
1592 .poweroff_noirq = dw_suspend_noirq, 1593 .poweroff_noirq = dw_suspend_noirq,
1593}; 1594};
1594 1595
1596#ifdef CONFIG_OF
1597static const struct of_device_id dw_dma_id_table[] = {
1598 { .compatible = "snps,dma-spear1340" },
1599 {}
1600};
1601MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1602#endif
1603
1595static struct platform_driver dw_driver = { 1604static struct platform_driver dw_driver = {
1596 .remove = __exit_p(dw_remove), 1605 .remove = __exit_p(dw_remove),
1597 .shutdown = dw_shutdown, 1606 .shutdown = dw_shutdown,
1598 .driver = { 1607 .driver = {
1599 .name = "dw_dmac", 1608 .name = "dw_dmac",
1600 .pm = &dw_dev_pm_ops, 1609 .pm = &dw_dev_pm_ops,
1610 .of_match_table = of_match_ptr(dw_dma_id_table),
1601 }, 1611 },
1602}; 1612};
1603 1613
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index e6f133b78dc..c64917ec313 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
71#define M2M_CONTROL_TM_SHIFT 13 71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) 72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) 73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
74#define M2M_CONTROL_RSS_SHIFT 22 75#define M2M_CONTROL_RSS_SHIFT 22
75#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) 76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
76#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) 77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
79#define M2M_CONTROL_PWSC_SHIFT 25 80#define M2M_CONTROL_PWSC_SHIFT 25
80 81
81#define M2M_INTERRUPT 0x0004 82#define M2M_INTERRUPT 0x0004
82#define M2M_INTERRUPT_DONEINT BIT(1) 83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
83 99
84#define M2M_BCR0 0x0010 100#define M2M_BCR0 0x0010
85#define M2M_BCR1 0x0014 101#define M2M_BCR1 0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
426 442
427/* 443/*
428 * M2M DMA implementation 444 * M2M DMA implementation
429 *
430 * For the M2M transfers we don't use NFB at all. This is because it simply
431 * doesn't work well with memcpy transfers. When you submit both buffers it is
432 * extremely unlikely that you get an NFB interrupt, but it instead reports
433 * DONE interrupt and both buffers are already transferred which means that we
434 * weren't able to update the next buffer.
435 *
436 * So for now we "simulate" NFB by just submitting buffer after buffer
437 * without double buffering.
438 */ 445 */
439 446
440static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) 447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
543 m2m_fill_desc(edmac); 550 m2m_fill_desc(edmac);
544 control |= M2M_CONTROL_DONEINT; 551 control |= M2M_CONTROL_DONEINT;
545 552
553 if (ep93xx_dma_advance_active(edmac)) {
554 m2m_fill_desc(edmac);
555 control |= M2M_CONTROL_NFBINT;
556 }
557
546 /* 558 /*
547 * Now we can finally enable the channel. For M2M channel this must be 559 * Now we can finally enable the channel. For M2M channel this must be
548 * done _after_ the BCRx registers are programmed. 560 * done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
560 } 572 }
561} 573}
562 574
575/*
576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
577 * M2M DMA controller transactions complete normally. This is not always the
578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581 * In effect, disabling the channel when only DONE bit is set could stop
582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
583 * Control FSM to check current state of DMA channel.
584 */
563static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) 585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
564{ 586{
587 u32 status = readl(edmac->regs + M2M_STATUS);
588 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 bool done = status & M2M_STATUS_DONE;
591 bool last_done;
565 u32 control; 592 u32 control;
593 struct ep93xx_dma_desc *desc;
566 594
567 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) 595 /* Accept only DONE and NFB interrupts */
596 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
568 return INTERRUPT_UNKNOWN; 597 return INTERRUPT_UNKNOWN;
569 598
570 /* Clear the DONE bit */ 599 if (done) {
571 writel(0, edmac->regs + M2M_INTERRUPT); 600 /* Clear the DONE bit */
601 writel(0, edmac->regs + M2M_INTERRUPT);
602 }
572 603
573 /* Disable interrupts and the channel */ 604 /*
574 control = readl(edmac->regs + M2M_CONTROL); 605 * Check whether we are done with descriptors or not. This, together
575 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); 606 * with DMA channel state, determines action to take in interrupt.
576 writel(control, edmac->regs + M2M_CONTROL); 607 */
608 desc = ep93xx_dma_get_active(edmac);
609 last_done = !desc || desc->txd.cookie;
577 610
578 /* 611 /*
579 * Since we only get DONE interrupt we have to find out ourselves 612 * Use M2M DMA Buffer FSM and Control FSM to check current state of
580 * whether there still is something to process. So we try to advance 613 * DMA channel. Using DONE and NFB bits from channel status register
581 * the chain an see whether it succeeds. 614 * or bits from channel interrupt register is not reliable.
582 */ 615 */
583 if (ep93xx_dma_advance_active(edmac)) { 616 if (!last_done &&
584 edmac->edma->hw_submit(edmac); 617 (buf_fsm == M2M_STATUS_BUF_NO ||
585 return INTERRUPT_NEXT_BUFFER; 618 buf_fsm == M2M_STATUS_BUF_ON)) {
619 /*
620 * Two buffers are ready for update when Buffer FSM is in
621 * DMA_NO_BUF state. Only one buffer can be prepared without
622 * disabling the channel or polling the DONE bit.
623 * To simplify things, always prepare only one buffer.
624 */
625 if (ep93xx_dma_advance_active(edmac)) {
626 m2m_fill_desc(edmac);
627 if (done && !edmac->chan.private) {
628 /* Software trigger for memcpy channel */
629 control = readl(edmac->regs + M2M_CONTROL);
630 control |= M2M_CONTROL_START;
631 writel(control, edmac->regs + M2M_CONTROL);
632 }
633 return INTERRUPT_NEXT_BUFFER;
634 } else {
635 last_done = true;
636 }
637 }
638
639 /*
640 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641 * and Control FSM is in DMA_STALL state.
642 */
643 if (last_done &&
644 buf_fsm == M2M_STATUS_BUF_NO &&
645 ctl_fsm == M2M_STATUS_CTL_STALL) {
646 /* Disable interrupts and the channel */
647 control = readl(edmac->regs + M2M_CONTROL);
648 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 | M2M_CONTROL_ENABLE);
650 writel(control, edmac->regs + M2M_CONTROL);
651 return INTERRUPT_DONE;
586 } 652 }
587 653
588 return INTERRUPT_DONE; 654 /*
655 * Nothing to do this time.
656 */
657 return INTERRUPT_NEXT_BUFFER;
589} 658}
590 659
591/* 660/*
@@ -703,7 +772,9 @@ static void ep93xx_dma_tasklet(unsigned long data)
703 desc = ep93xx_dma_get_active(edmac); 772 desc = ep93xx_dma_get_active(edmac);
704 if (desc) { 773 if (desc) {
705 if (desc->complete) { 774 if (desc->complete) {
706 dma_cookie_complete(&desc->txd); 775 /* mark descriptor complete for non cyclic case only */
776 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
777 dma_cookie_complete(&desc->txd);
707 list_splice_init(&edmac->active, &list); 778 list_splice_init(&edmac->active, &list);
708 } 779 }
709 callback = desc->txd.callback; 780 callback = desc->txd.callback;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8e152..fcfeb3cd8d3 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
227 struct scatterlist *sg = d->sg; 227 struct scatterlist *sg = d->sg;
228 unsigned long now; 228 unsigned long now;
229 229
230 now = min(d->len, sg->length); 230 now = min(d->len, sg_dma_len(sg));
231 if (d->len != IMX_DMA_LENGTH_LOOP) 231 if (d->len != IMX_DMA_LENGTH_LOOP)
232 d->len -= now; 232 d->len -= now;
233 233
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
763 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 763 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
764 764
765 for_each_sg(sgl, sg, sg_len, i) { 765 for_each_sg(sgl, sg, sg_len, i) {
766 dma_length += sg->length; 766 dma_length += sg_dma_len(sg);
767 } 767 }
768 768
769 switch (imxdmac->word_size) { 769 switch (imxdmac->word_size) {
770 case DMA_SLAVE_BUSWIDTH_4_BYTES: 770 case DMA_SLAVE_BUSWIDTH_4_BYTES:
771 if (sgl->length & 3 || sgl->dma_address & 3) 771 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
772 return NULL; 772 return NULL;
773 break; 773 break;
774 case DMA_SLAVE_BUSWIDTH_2_BYTES: 774 case DMA_SLAVE_BUSWIDTH_2_BYTES:
775 if (sgl->length & 1 || sgl->dma_address & 1) 775 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
776 return NULL; 776 return NULL;
777 break; 777 break;
778 case DMA_SLAVE_BUSWIDTH_1_BYTE: 778 case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
831 imxdmac->sg_list[i].page_link = 0; 831 imxdmac->sg_list[i].page_link = 0;
832 imxdmac->sg_list[i].offset = 0; 832 imxdmac->sg_list[i].offset = 0;
833 imxdmac->sg_list[i].dma_address = dma_addr; 833 imxdmac->sg_list[i].dma_address = dma_addr;
834 imxdmac->sg_list[i].length = period_len; 834 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
835 dma_addr += period_len; 835 dma_addr += period_len;
836 } 836 }
837 837
838 /* close the loop */ 838 /* close the loop */
839 imxdmac->sg_list[periods].offset = 0; 839 imxdmac->sg_list[periods].offset = 0;
840 imxdmac->sg_list[periods].length = 0; 840 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
841 imxdmac->sg_list[periods].page_link = 841 imxdmac->sg_list[periods].page_link =
842 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 842 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
843 843
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fddccae6b47..fb4f4990f5e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/wait.h> 27#include <linux/delay.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/semaphore.h> 29#include <linux/semaphore.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
@@ -271,6 +271,7 @@ struct sdma_channel {
271 enum dma_status status; 271 enum dma_status status;
272 unsigned int chn_count; 272 unsigned int chn_count;
273 unsigned int chn_real_count; 273 unsigned int chn_real_count;
274 struct tasklet_struct tasklet;
274}; 275};
275 276
276#define IMX_DMA_SG_LOOP BIT(0) 277#define IMX_DMA_SG_LOOP BIT(0)
@@ -324,7 +325,7 @@ struct sdma_engine {
324 struct dma_device dma_device; 325 struct dma_device dma_device;
325 struct clk *clk_ipg; 326 struct clk *clk_ipg;
326 struct clk *clk_ahb; 327 struct clk *clk_ahb;
327 struct mutex channel_0_lock; 328 spinlock_t channel_0_lock;
328 struct sdma_script_start_addrs *script_addrs; 329 struct sdma_script_start_addrs *script_addrs;
329}; 330};
330 331
@@ -402,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
402} 403}
403 404
404/* 405/*
405 * sdma_run_channel - run a channel and wait till it's done 406 * sdma_run_channel0 - run a channel and wait till it's done
406 */ 407 */
407static int sdma_run_channel(struct sdma_channel *sdmac) 408static int sdma_run_channel0(struct sdma_engine *sdma)
408{ 409{
409 struct sdma_engine *sdma = sdmac->sdma;
410 int channel = sdmac->channel;
411 int ret; 410 int ret;
411 unsigned long timeout = 500;
412 412
413 init_completion(&sdmac->done); 413 sdma_enable_channel(sdma, 0);
414 414
415 sdma_enable_channel(sdma, channel); 415 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
416 if (timeout-- <= 0)
417 break;
418 udelay(1);
419 }
416 420
417 ret = wait_for_completion_timeout(&sdmac->done, HZ); 421 if (ret) {
422 /* Clear the interrupt status */
423 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
424 } else {
425 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
426 }
418 427
419 return ret ? 0 : -ETIMEDOUT; 428 return ret ? 0 : -ETIMEDOUT;
420} 429}
@@ -426,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
426 void *buf_virt; 435 void *buf_virt;
427 dma_addr_t buf_phys; 436 dma_addr_t buf_phys;
428 int ret; 437 int ret;
429 438 unsigned long flags;
430 mutex_lock(&sdma->channel_0_lock);
431 439
432 buf_virt = dma_alloc_coherent(NULL, 440 buf_virt = dma_alloc_coherent(NULL,
433 size, 441 size,
434 &buf_phys, GFP_KERNEL); 442 &buf_phys, GFP_KERNEL);
435 if (!buf_virt) { 443 if (!buf_virt) {
436 ret = -ENOMEM; 444 return -ENOMEM;
437 goto err_out;
438 } 445 }
439 446
447 spin_lock_irqsave(&sdma->channel_0_lock, flags);
448
440 bd0->mode.command = C0_SETPM; 449 bd0->mode.command = C0_SETPM;
441 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 450 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
442 bd0->mode.count = size / 2; 451 bd0->mode.count = size / 2;
@@ -445,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
445 454
446 memcpy(buf_virt, buf, size); 455 memcpy(buf_virt, buf, size);
447 456
448 ret = sdma_run_channel(&sdma->channel[0]); 457 ret = sdma_run_channel0(sdma);
449 458
450 dma_free_coherent(NULL, size, buf_virt, buf_phys); 459 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
451 460
452err_out: 461 dma_free_coherent(NULL, size, buf_virt, buf_phys);
453 mutex_unlock(&sdma->channel_0_lock);
454 462
455 return ret; 463 return ret;
456} 464}
@@ -535,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
535 sdmac->desc.callback(sdmac->desc.callback_param); 543 sdmac->desc.callback(sdmac->desc.callback_param);
536} 544}
537 545
538static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) 546static void sdma_tasklet(unsigned long data)
539{ 547{
540 complete(&sdmac->done); 548 struct sdma_channel *sdmac = (struct sdma_channel *) data;
541 549
542 /* not interested in channel 0 interrupts */ 550 complete(&sdmac->done);
543 if (sdmac->channel == 0)
544 return;
545 551
546 if (sdmac->flags & IMX_DMA_SG_LOOP) 552 if (sdmac->flags & IMX_DMA_SG_LOOP)
547 sdma_handle_channel_loop(sdmac); 553 sdma_handle_channel_loop(sdmac);
@@ -555,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
555 unsigned long stat; 561 unsigned long stat;
556 562
557 stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 563 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
564 /* not interested in channel 0 interrupts */
565 stat &= ~1;
558 writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 566 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
559 567
560 while (stat) { 568 while (stat) {
561 int channel = fls(stat) - 1; 569 int channel = fls(stat) - 1;
562 struct sdma_channel *sdmac = &sdma->channel[channel]; 570 struct sdma_channel *sdmac = &sdma->channel[channel];
563 571
564 mxc_sdma_handle_channel(sdmac); 572 tasklet_schedule(&sdmac->tasklet);
565 573
566 __clear_bit(channel, &stat); 574 __clear_bit(channel, &stat);
567 } 575 }
@@ -660,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
660 struct sdma_context_data *context = sdma->context; 668 struct sdma_context_data *context = sdma->context;
661 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 669 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
662 int ret; 670 int ret;
671 unsigned long flags;
663 672
664 if (sdmac->direction == DMA_DEV_TO_MEM) { 673 if (sdmac->direction == DMA_DEV_TO_MEM) {
665 load_address = sdmac->pc_from_device; 674 load_address = sdmac->pc_from_device;
@@ -677,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
677 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 686 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
678 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 687 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
679 688
680 mutex_lock(&sdma->channel_0_lock); 689 spin_lock_irqsave(&sdma->channel_0_lock, flags);
681 690
682 memset(context, 0, sizeof(*context)); 691 memset(context, 0, sizeof(*context));
683 context->channel_state.pc = load_address; 692 context->channel_state.pc = load_address;
@@ -696,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
696 bd0->mode.count = sizeof(*context) / 4; 705 bd0->mode.count = sizeof(*context) / 4;
697 bd0->buffer_addr = sdma->context_phys; 706 bd0->buffer_addr = sdma->context_phys;
698 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 707 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
708 ret = sdma_run_channel0(sdma);
699 709
700 ret = sdma_run_channel(&sdma->channel[0]); 710 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
701
702 mutex_unlock(&sdma->channel_0_lock);
703 711
704 return ret; 712 return ret;
705} 713}
@@ -941,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
941 949
942 bd->buffer_addr = sg->dma_address; 950 bd->buffer_addr = sg->dma_address;
943 951
944 count = sg->length; 952 count = sg_dma_len(sg);
945 953
946 if (count > 0xffff) { 954 if (count > 0xffff) {
947 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 955 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1305,7 +1313,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1305 if (!sdma) 1313 if (!sdma)
1306 return -ENOMEM; 1314 return -ENOMEM;
1307 1315
1308 mutex_init(&sdma->channel_0_lock); 1316 spin_lock_init(&sdma->channel_0_lock);
1309 1317
1310 sdma->dev = &pdev->dev; 1318 sdma->dev = &pdev->dev;
1311 1319
@@ -1376,6 +1384,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1376 dma_cookie_init(&sdmac->chan); 1384 dma_cookie_init(&sdmac->chan);
1377 sdmac->channel = i; 1385 sdmac->channel = i;
1378 1386
1387 tasklet_init(&sdmac->tasklet, sdma_tasklet,
1388 (unsigned long) sdmac);
1379 /* 1389 /*
1380 * Add the channel to the DMAC list. Do not add channel 0 though 1390 * Add the channel to the DMAC list. Do not add channel 0 though
1381 * because we need it internally in the SDMA driver. This also means 1391 * because we need it internally in the SDMA driver. This also means
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c900ca7aaec..222e907bfaa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
394 } 394 }
395 } 395 }
396 /*Populate CTL_HI values*/ 396 /*Populate CTL_HI values*/
397 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
398 desc->width, 398 desc->width,
399 midc->dma->block_size); 399 midc->dma->block_size);
400 /*Populate SAR and DAR values*/ 400 /*Populate SAR and DAR values*/
401 sg_phy_addr = sg_phys(sg); 401 sg_phy_addr = sg_dma_address(sg);
402 if (desc->dirn == DMA_MEM_TO_DEV) { 402 if (desc->dirn == DMA_MEM_TO_DEV) {
403 lli_bloc_desc->sar = sg_phy_addr; 403 lli_bloc_desc->sar = sg_phy_addr;
404 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 404 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
747 txd = intel_mid_dma_prep_memcpy(chan, 747 txd = intel_mid_dma_prep_memcpy(chan,
748 mids->dma_slave.dst_addr, 748 mids->dma_slave.dst_addr,
749 mids->dma_slave.src_addr, 749 mids->dma_slave.src_addr,
750 sgl->length, 750 sg_dma_len(sgl),
751 flags); 751 flags);
752 return txd; 752 return txd;
753 } else { 753 } else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
760 sg_len, direction, flags); 760 sg_len, direction, flags);
761 761
762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
763 if (NULL == txd) { 763 if (NULL == txd) {
764 pr_err("MDMA: Prep memcpy failed\n"); 764 pr_err("MDMA: Prep memcpy failed\n");
765 return NULL; 765 return NULL;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa5d55fea46..0b12e68bf79 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <linux/clk.h>
28#include <plat/mv_xor.h> 29#include <plat/mv_xor.h>
29 30
30#include "dmaengine.h" 31#include "dmaengine.h"
@@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
1307 if (dram) 1308 if (dram)
1308 mv_xor_conf_mbus_windows(msp, dram); 1309 mv_xor_conf_mbus_windows(msp, dram);
1309 1310
1311 /* Not all platforms can gate the clock, so it is not
1312 * an error if the clock does not exists.
1313 */
1314 msp->clk = clk_get(&pdev->dev, NULL);
1315 if (!IS_ERR(msp->clk))
1316 clk_prepare_enable(msp->clk);
1317
1310 return 0; 1318 return 0;
1311} 1319}
1312 1320
1313static int mv_xor_shared_remove(struct platform_device *pdev) 1321static int mv_xor_shared_remove(struct platform_device *pdev)
1314{ 1322{
1323 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
1324
1325 if (!IS_ERR(msp->clk)) {
1326 clk_disable_unprepare(msp->clk);
1327 clk_put(msp->clk);
1328 }
1329
1315 return 0; 1330 return 0;
1316} 1331}
1317 1332
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 654876b7ba1..a5b422f5a8a 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -55,6 +55,7 @@
55struct mv_xor_shared_private { 55struct mv_xor_shared_private {
56 void __iomem *xor_base; 56 void __iomem *xor_base;
57 void __iomem *xor_high_base; 57 void __iomem *xor_high_base;
58 struct clk *clk;
58}; 59};
59 60
60 61
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 1cb9b974493..c96ab15319f 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -489,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
489 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 489 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
490 } else { 490 } else {
491 for_each_sg(sgl, sg, sg_len, i) { 491 for_each_sg(sgl, sg, sg_len, i) {
492 if (sg->length > MAX_XFER_BYTES) { 492 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
493 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 493 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
494 sg->length, MAX_XFER_BYTES); 494 sg_dma_len(sg), MAX_XFER_BYTES);
495 goto err_out; 495 goto err_out;
496 } 496 }
497 497
@@ -499,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
499 499
500 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 500 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
501 ccw->bufaddr = sg->dma_address; 501 ccw->bufaddr = sg->dma_address;
502 ccw->xfer_bytes = sg->length; 502 ccw->xfer_bytes = sg_dma_len(sg);
503 503
504 ccw->bits = 0; 504 ccw->bits = 0;
505 ccw->bits |= CCW_CHAIN; 505 ccw->bits |= CCW_CHAIN;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 65c0495a6d4..987ab5cd261 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
621 goto err_desc_get; 621 goto err_desc_get;
622 622
623 desc->regs.dev_addr = reg; 623 desc->regs.dev_addr = reg;
624 desc->regs.mem_addr = sg_phys(sg); 624 desc->regs.mem_addr = sg_dma_address(sg);
625 desc->regs.size = sg_dma_len(sg); 625 desc->regs.size = sg_dma_len(sg);
626 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; 626 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
627 627
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 2ee6e23930a..cbcc28e79be 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -21,7 +21,6 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25#include <linux/amba/bus.h> 24#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h> 25#include <linux/amba/pl330.h>
27#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
@@ -2322,7 +2321,8 @@ static void pl330_tasklet(unsigned long data)
2322 /* Pick up ripe tomatoes */ 2321 /* Pick up ripe tomatoes */
2323 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2322 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2324 if (desc->status == DONE) { 2323 if (desc->status == DONE) {
2325 dma_cookie_complete(&desc->txd); 2324 if (pch->cyclic)
2325 dma_cookie_complete(&desc->txd);
2326 list_move_tail(&desc->node, &list); 2326 list_move_tail(&desc->node, &list);
2327 } 2327 }
2328 2328
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f..000d309602b 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2362 } 2362 }
2363 2363
2364 sg[periods].offset = 0; 2364 sg[periods].offset = 0;
2365 sg[periods].length = 0; 2365 sg_dma_len(&sg[periods]) = 0;
2366 sg[periods].page_link = 2366 sg[periods].page_link =
2367 ((unsigned long)sg | 0x01) & ~0x02; 2367 ((unsigned long)sg | 0x01) & ~0x02;
2368 2368