aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorPer Friden <per.friden@stericsson.com>2010-06-20 17:24:45 -0400
committerDan Williams <dan.j.williams@intel.com>2010-06-22 21:01:53 -0400
commit941b77a3b6946dd6223a029007f695aa841b6d34 (patch)
tree25b25c68e24c42271f13aeba82f5424829a84d35 /drivers/dma
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
DMAENGINE: ste_dma40: fixed lli_max=1 issue
Fixed lli_max=1 issue in case of full lcla, currently this case is not properly handled. Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ste_dma40.c62
1 files changed, 29 insertions, 33 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c426829f6ab8..4618d6c727c8 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -81,9 +81,10 @@ struct d40_lli_pool {
81 * lli_len equals one. 81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels. 82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated. 83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of LLI's in lli_pool 84 * @lli_len: Number of llis of current descriptor.
85 * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len 85 * @lli_count: Number of transfered llis.
86 * then this transfer job is done. 86 * @lli_tx_len: Max number of LLIs per transfer, there can be
87 * many transfer for one descriptor.
87 * @txd: DMA engine struct. Used for among other things for communication 88 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer. 89 * during a transfer.
89 * @node: List entry. 90 * @node: List entry.
@@ -100,8 +101,9 @@ struct d40_desc {
100 struct d40_log_lli_bidir lli_log; 101 struct d40_log_lli_bidir lli_log;
101 102
102 struct d40_lli_pool lli_pool; 103 struct d40_lli_pool lli_pool;
103 u32 lli_len; 104 int lli_len;
104 u32 lli_tcount; 105 int lli_count;
106 u32 lli_tx_len;
105 107
106 struct dma_async_tx_descriptor txd; 108 struct dma_async_tx_descriptor txd;
107 struct list_head node; 109 struct list_head node;
@@ -365,11 +367,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
365 return cookie; 367 return cookie;
366} 368}
367 369
368static void d40_desc_reset(struct d40_desc *d40d)
369{
370 d40d->lli_tcount = 0;
371}
372
373static void d40_desc_remove(struct d40_desc *d40d) 370static void d40_desc_remove(struct d40_desc *d40d)
374{ 371{
375 list_del(&d40d->node); 372 list_del(&d40d->node);
@@ -738,25 +735,18 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
738 d40c->phy_chan->num, 735 d40c->phy_chan->num,
739 d40d->lli_phy.dst, 736 d40d->lli_phy.dst,
740 d40d->lli_phy.src); 737 d40d->lli_phy.src);
741 d40d->lli_tcount = d40d->lli_len;
742 } else if (d40d->lli_log.dst && d40d->lli_log.src) { 738 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
743 u32 lli_len;
744 struct d40_log_lli *src = d40d->lli_log.src; 739 struct d40_log_lli *src = d40d->lli_log.src;
745 struct d40_log_lli *dst = d40d->lli_log.dst; 740 struct d40_log_lli *dst = d40d->lli_log.dst;
746 741
747 src += d40d->lli_tcount; 742 src += d40d->lli_count;
748 dst += d40d->lli_tcount; 743 dst += d40d->lli_count;
749
750 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
751 lli_len = d40d->lli_len;
752 else
753 lli_len = d40c->base->plat_data->llis_per_log;
754 d40d->lli_tcount += lli_len;
755 d40_log_lli_write(d40c->lcpa, d40c->lcla.src, 744 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
756 d40c->lcla.dst, 745 d40c->lcla.dst,
757 dst, src, 746 dst, src,
758 d40c->base->plat_data->llis_per_log); 747 d40c->base->plat_data->llis_per_log);
759 } 748 }
749 d40d->lli_count += d40d->lli_tx_len;
760} 750}
761 751
762static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 752static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -838,7 +828,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
838 if (d40d == NULL) 828 if (d40d == NULL)
839 return; 829 return;
840 830
841 if (d40d->lli_tcount < d40d->lli_len) { 831 if (d40d->lli_count < d40d->lli_len) {
842 832
843 d40_desc_load(d40c, d40d); 833 d40_desc_load(d40c, d40d);
844 /* Start dma job */ 834 /* Start dma job */
@@ -891,7 +881,6 @@ static void dma_tasklet(unsigned long data)
891 /* Return desc to free-list */ 881 /* Return desc to free-list */
892 d40_desc_free(d40c, d40d_fin); 882 d40_desc_free(d40c, d40d_fin);
893 } else { 883 } else {
894 d40_desc_reset(d40d_fin);
895 if (!d40d_fin->is_in_client_list) { 884 if (!d40d_fin->is_in_client_list) {
896 d40_desc_remove(d40d_fin); 885 d40_desc_remove(d40d_fin);
897 list_add_tail(&d40d_fin->node, &d40c->client); 886 list_add_tail(&d40d_fin->node, &d40c->client);
@@ -1573,7 +1562,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1573 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1562 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1574 chan); 1563 chan);
1575 unsigned long flg; 1564 unsigned long flg;
1576 int lli_max = d40c->base->plat_data->llis_per_log;
1577 1565
1578 1566
1579 spin_lock_irqsave(&d40c->lock, flg); 1567 spin_lock_irqsave(&d40c->lock, flg);
@@ -1584,10 +1572,13 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1584 1572
1585 memset(d40d, 0, sizeof(struct d40_desc)); 1573 memset(d40d, 0, sizeof(struct d40_desc));
1586 d40d->lli_len = sgl_len; 1574 d40d->lli_len = sgl_len;
1587 1575 d40d->lli_tx_len = d40d->lli_len;
1588 d40d->txd.flags = flags; 1576 d40d->txd.flags = flags;
1589 1577
1590 if (d40c->log_num != D40_PHY_CHAN) { 1578 if (d40c->log_num != D40_PHY_CHAN) {
1579 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1580 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1581
1591 if (sgl_len > 1) 1582 if (sgl_len > 1)
1592 /* 1583 /*
1593 * Check if there is space available in lcla. If not, 1584 * Check if there is space available in lcla. If not,
@@ -1596,7 +1587,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1596 */ 1587 */
1597 if (d40_lcla_id_get(d40c, 1588 if (d40_lcla_id_get(d40c,
1598 &d40c->base->lcla_pool) != 0) 1589 &d40c->base->lcla_pool) != 0)
1599 lli_max = 1; 1590 d40d->lli_tx_len = 1;
1600 1591
1601 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1592 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1602 dev_err(&d40c->chan.dev->device, 1593 dev_err(&d40c->chan.dev->device,
@@ -1610,7 +1601,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1610 d40d->lli_log.src, 1601 d40d->lli_log.src,
1611 d40c->log_def.lcsp1, 1602 d40c->log_def.lcsp1,
1612 d40c->dma_cfg.src_info.data_width, 1603 d40c->dma_cfg.src_info.data_width,
1613 flags & DMA_PREP_INTERRUPT, lli_max, 1604 flags & DMA_PREP_INTERRUPT,
1605 d40d->lli_tx_len,
1614 d40c->base->plat_data->llis_per_log); 1606 d40c->base->plat_data->llis_per_log);
1615 1607
1616 (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1608 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
@@ -1619,7 +1611,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1619 d40d->lli_log.dst, 1611 d40d->lli_log.dst,
1620 d40c->log_def.lcsp3, 1612 d40c->log_def.lcsp3,
1621 d40c->dma_cfg.dst_info.data_width, 1613 d40c->dma_cfg.dst_info.data_width,
1622 flags & DMA_PREP_INTERRUPT, lli_max, 1614 flags & DMA_PREP_INTERRUPT,
1615 d40d->lli_tx_len,
1623 d40c->base->plat_data->llis_per_log); 1616 d40c->base->plat_data->llis_per_log);
1624 1617
1625 1618
@@ -1794,6 +1787,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1794 goto err; 1787 goto err;
1795 } 1788 }
1796 d40d->lli_len = 1; 1789 d40d->lli_len = 1;
1790 d40d->lli_tx_len = 1;
1797 1791
1798 d40_log_fill_lli(d40d->lli_log.src, 1792 d40_log_fill_lli(d40d->lli_log.src,
1799 src, 1793 src,
@@ -1869,7 +1863,6 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1869{ 1863{
1870 dma_addr_t dev_addr = 0; 1864 dma_addr_t dev_addr = 0;
1871 int total_size; 1865 int total_size;
1872 int lli_max = d40c->base->plat_data->llis_per_log;
1873 1866
1874 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 1867 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1875 dev_err(&d40c->chan.dev->device, 1868 dev_err(&d40c->chan.dev->device,
@@ -1878,7 +1871,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1878 } 1871 }
1879 1872
1880 d40d->lli_len = sg_len; 1873 d40d->lli_len = sg_len;
1881 d40d->lli_tcount = 0; 1874 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1875 d40d->lli_tx_len = d40d->lli_len;
1876 else
1877 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1882 1878
1883 if (sg_len > 1) 1879 if (sg_len > 1)
1884 /* 1880 /*
@@ -1887,7 +1883,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1887 * in lcpa space. 1883 * in lcpa space.
1888 */ 1884 */
1889 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) 1885 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1890 lli_max = 1; 1886 d40d->lli_tx_len = 1;
1891 1887
1892 if (direction == DMA_FROM_DEVICE) { 1888 if (direction == DMA_FROM_DEVICE) {
1893 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1889 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
@@ -1899,7 +1895,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1899 d40c->dma_cfg.dst_info.data_width, 1895 d40c->dma_cfg.dst_info.data_width,
1900 direction, 1896 direction,
1901 flags & DMA_PREP_INTERRUPT, 1897 flags & DMA_PREP_INTERRUPT,
1902 dev_addr, lli_max, 1898 dev_addr, d40d->lli_tx_len,
1903 d40c->base->plat_data->llis_per_log); 1899 d40c->base->plat_data->llis_per_log);
1904 } else if (direction == DMA_TO_DEVICE) { 1900 } else if (direction == DMA_TO_DEVICE) {
1905 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1901 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
@@ -1911,7 +1907,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1911 d40c->dma_cfg.dst_info.data_width, 1907 d40c->dma_cfg.dst_info.data_width,
1912 direction, 1908 direction,
1913 flags & DMA_PREP_INTERRUPT, 1909 flags & DMA_PREP_INTERRUPT,
1914 dev_addr, lli_max, 1910 dev_addr, d40d->lli_tx_len,
1915 d40c->base->plat_data->llis_per_log); 1911 d40c->base->plat_data->llis_per_log);
1916 } else 1912 } else
1917 return -EINVAL; 1913 return -EINVAL;
@@ -1939,7 +1935,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1939 } 1935 }
1940 1936
1941 d40d->lli_len = sgl_len; 1937 d40d->lli_len = sgl_len;
1942 d40d->lli_tcount = 0; 1938 d40d->lli_tx_len = sgl_len;
1943 1939
1944 if (direction == DMA_FROM_DEVICE) { 1940 if (direction == DMA_FROM_DEVICE) {
1945 dst_dev_addr = 0; 1941 dst_dev_addr = 0;