aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorRabin Vincent <rabin.vincent@stericsson.com>2011-01-25 05:18:18 -0500
committerDan Williams <dan.j.williams@intel.com>2011-01-31 01:27:18 -0500
commit5f81158f90db4bc8a79e91736aa3afce8e590e46 (patch)
treedcf4ac40513e7ccb73032a8b8ace7a42e5444523 /drivers/dma/ste_dma40.c
parent95944c6ef5b5214508273992416adb836b63c73f (diff)
dma40: combine desc init functions
The desc init code can be shared between the mem and slave prep routines. Acked-by: Per Forlin <per.forlin@stericsson.com> Acked-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c76
1 files changed, 32 insertions, 44 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 0a20179349b0..5259a9832435 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1617,6 +1617,35 @@ static u32 stedma40_residue(struct dma_chan *chan)
1617 return bytes_left; 1617 return bytes_left;
1618} 1618}
1619 1619
1620static struct d40_desc *
1621d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1622 unsigned int sg_len, unsigned long dma_flags)
1623{
1624 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1625 struct d40_desc *desc;
1626
1627 desc = d40_desc_get(chan);
1628 if (!desc)
1629 return NULL;
1630
1631 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
1632 cfg->dst_info.data_width);
1633 if (desc->lli_len < 0) {
1634 chan_err(chan, "Unaligned size\n");
1635 d40_desc_free(chan, desc);
1636
1637 return NULL;
1638 }
1639
1640 desc->lli_current = 0;
1641 desc->txd.flags = dma_flags;
1642 desc->txd.tx_submit = d40_tx_submit;
1643
1644 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
1645
1646 return desc;
1647}
1648
1620struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1649struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1621 struct scatterlist *sgl_dst, 1650 struct scatterlist *sgl_dst,
1622 struct scatterlist *sgl_src, 1651 struct scatterlist *sgl_src,
@@ -1635,22 +1664,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1635 } 1664 }
1636 1665
1637 spin_lock_irqsave(&d40c->lock, flags); 1666 spin_lock_irqsave(&d40c->lock, flags);
1638 d40d = d40_desc_get(d40c);
1639 1667
1640 if (d40d == NULL) 1668 d40d = d40_prep_desc(d40c, sgl_dst, sgl_len, dma_flags);
1669 if (!d40d)
1641 goto err; 1670 goto err;
1642 1671
1643 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1644 d40c->dma_cfg.src_info.data_width,
1645 d40c->dma_cfg.dst_info.data_width);
1646 if (d40d->lli_len < 0) {
1647 chan_err(d40c, "Unaligned size\n");
1648 goto err;
1649 }
1650
1651 d40d->lli_current = 0;
1652 d40d->txd.flags = dma_flags;
1653
1654 if (chan_is_logical(d40c)) { 1672 if (chan_is_logical(d40c)) {
1655 1673
1656 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { 1674 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
@@ -1708,10 +1726,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1708 d40d->lli_pool.size, DMA_TO_DEVICE); 1726 d40d->lli_pool.size, DMA_TO_DEVICE);
1709 } 1727 }
1710 1728
1711 dma_async_tx_descriptor_init(&d40d->txd, chan);
1712
1713 d40d->txd.tx_submit = d40_tx_submit;
1714
1715 spin_unlock_irqrestore(&d40c->lock, flags); 1729 spin_unlock_irqrestore(&d40c->lock, flags);
1716 1730
1717 return &d40d->txd; 1731 return &d40d->txd;
@@ -1900,21 +1914,11 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1900 dma_addr_t dev_addr = 0; 1914 dma_addr_t dev_addr = 0;
1901 int total_size; 1915 int total_size;
1902 1916
1903 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
1904 d40c->dma_cfg.src_info.data_width,
1905 d40c->dma_cfg.dst_info.data_width);
1906 if (d40d->lli_len < 0) {
1907 chan_err(d40c, "Unaligned size\n");
1908 return -EINVAL;
1909 }
1910
1911 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { 1917 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
1912 chan_err(d40c, "Out of memory\n"); 1918 chan_err(d40c, "Out of memory\n");
1913 return -ENOMEM; 1919 return -ENOMEM;
1914 } 1920 }
1915 1921
1916 d40d->lli_current = 0;
1917
1918 if (direction == DMA_FROM_DEVICE) 1922 if (direction == DMA_FROM_DEVICE)
1919 if (d40c->runtime_addr) 1923 if (d40c->runtime_addr)
1920 dev_addr = d40c->runtime_addr; 1924 dev_addr = d40c->runtime_addr;
@@ -1954,21 +1958,11 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1954 dma_addr_t dst_dev_addr; 1958 dma_addr_t dst_dev_addr;
1955 int res; 1959 int res;
1956 1960
1957 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
1958 d40c->dma_cfg.src_info.data_width,
1959 d40c->dma_cfg.dst_info.data_width);
1960 if (d40d->lli_len < 0) {
1961 chan_err(d40c, "Unaligned size\n");
1962 return -EINVAL;
1963 }
1964
1965 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { 1961 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
1966 chan_err(d40c, "Out of memory\n"); 1962 chan_err(d40c, "Out of memory\n");
1967 return -ENOMEM; 1963 return -ENOMEM;
1968 } 1964 }
1969 1965
1970 d40d->lli_current = 0;
1971
1972 if (direction == DMA_FROM_DEVICE) { 1966 if (direction == DMA_FROM_DEVICE) {
1973 dst_dev_addr = 0; 1967 dst_dev_addr = 0;
1974 if (d40c->runtime_addr) 1968 if (d40c->runtime_addr)
@@ -2031,8 +2025,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2031 } 2025 }
2032 2026
2033 spin_lock_irqsave(&d40c->lock, flags); 2027 spin_lock_irqsave(&d40c->lock, flags);
2034 d40d = d40_desc_get(d40c);
2035 2028
2029 d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags);
2036 if (d40d == NULL) 2030 if (d40d == NULL)
2037 goto err; 2031 goto err;
2038 2032
@@ -2048,12 +2042,6 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2048 goto err; 2042 goto err;
2049 } 2043 }
2050 2044
2051 d40d->txd.flags = dma_flags;
2052
2053 dma_async_tx_descriptor_init(&d40d->txd, chan);
2054
2055 d40d->txd.tx_submit = d40_tx_submit;
2056
2057 spin_unlock_irqrestore(&d40c->lock, flags); 2045 spin_unlock_irqrestore(&d40c->lock, flags);
2058 return &d40d->txd; 2046 return &d40d->txd;
2059 2047