aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ste_dma40.c191
-rw-r--r--drivers/dma/ste_dma40_ll.c246
-rw-r--r--drivers/dma/ste_dma40_ll.h36
3 files changed, 343 insertions, 130 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a553205..6e1d46a65d0e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010 2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
554 return d; 555 return d;
555} 556}
556 557
557/* Support functions for logical channels */ 558static int d40_psize_2_burst_size(bool is_log, int psize)
559{
560 if (is_log) {
561 if (psize == STEDMA40_PSIZE_LOG_1)
562 return 1;
563 } else {
564 if (psize == STEDMA40_PSIZE_PHY_1)
565 return 1;
566 }
567
568 return 2 << psize;
569}
570
571/*
572 * The dma only supports transmitting packages up to
573 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
574 * dma elements required to send the entire sg list
575 */
576static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
577{
578 int dmalen;
579 u32 max_w = max(data_width1, data_width2);
580 u32 min_w = min(data_width1, data_width2);
581 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
582
583 if (seg_max > STEDMA40_MAX_SEG_SIZE)
584 seg_max -= (1 << max_w);
585
586 if (!IS_ALIGNED(size, 1 << max_w))
587 return -EINVAL;
588
589 if (size <= seg_max)
590 dmalen = 1;
591 else {
592 dmalen = size / seg_max;
593 if (dmalen * seg_max < size)
594 dmalen++;
595 }
596 return dmalen;
597}
598
599static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
600 u32 data_width1, u32 data_width2)
601{
602 struct scatterlist *sg;
603 int i;
604 int len = 0;
605 int ret;
606
607 for_each_sg(sgl, sg, sg_len, i) {
608 ret = d40_size_2_dmalen(sg_dma_len(sg),
609 data_width1, data_width2);
610 if (ret < 0)
611 return ret;
612 len += ret;
613 }
614 return len;
615}
558 616
617/* Support functions for logical channels */
559 618
560static int d40_channel_execute_command(struct d40_chan *d40c, 619static int d40_channel_execute_command(struct d40_chan *d40c,
561 enum d40_command command) 620 enum d40_command command)
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
1241 res = -EINVAL; 1300 res = -EINVAL;
1242 } 1301 }
1243 1302
1303 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1304 (1 << conf->src_info.data_width) !=
1305 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1306 (1 << conf->dst_info.data_width)) {
1307 /*
1308 * The DMAC hardware only supports
1309 * src (burst x width) == dst (burst x width)
1310 */
1311
1312 dev_err(&d40c->chan.dev->device,
1313 "[%s] src (burst x width) != dst (burst x width)\n",
1314 __func__);
1315 res = -EINVAL;
1316 }
1317
1244 return res; 1318 return res;
1245} 1319}
1246 1320
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1638 if (d40d == NULL) 1712 if (d40d == NULL)
1639 goto err; 1713 goto err;
1640 1714
1641 d40d->lli_len = sgl_len; 1715 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1716 d40c->dma_cfg.src_info.data_width,
1717 d40c->dma_cfg.dst_info.data_width);
1718 if (d40d->lli_len < 0) {
1719 dev_err(&d40c->chan.dev->device,
1720 "[%s] Unaligned size\n", __func__);
1721 goto err;
1722 }
1723
1642 d40d->lli_current = 0; 1724 d40d->lli_current = 0;
1643 d40d->txd.flags = dma_flags; 1725 d40d->txd.flags = dma_flags;
1644 1726
1645 if (d40c->log_num != D40_PHY_CHAN) { 1727 if (d40c->log_num != D40_PHY_CHAN) {
1646 1728
1647 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1729 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1648 dev_err(&d40c->chan.dev->device, 1730 dev_err(&d40c->chan.dev->device,
1649 "[%s] Out of memory\n", __func__); 1731 "[%s] Out of memory\n", __func__);
1650 goto err; 1732 goto err;
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1654 sgl_len, 1736 sgl_len,
1655 d40d->lli_log.src, 1737 d40d->lli_log.src,
1656 d40c->log_def.lcsp1, 1738 d40c->log_def.lcsp1,
1657 d40c->dma_cfg.src_info.data_width); 1739 d40c->dma_cfg.src_info.data_width,
1740 d40c->dma_cfg.dst_info.data_width);
1658 1741
1659 (void) d40_log_sg_to_lli(sgl_dst, 1742 (void) d40_log_sg_to_lli(sgl_dst,
1660 sgl_len, 1743 sgl_len,
1661 d40d->lli_log.dst, 1744 d40d->lli_log.dst,
1662 d40c->log_def.lcsp3, 1745 d40c->log_def.lcsp3,
1663 d40c->dma_cfg.dst_info.data_width); 1746 d40c->dma_cfg.dst_info.data_width,
1747 d40c->dma_cfg.src_info.data_width);
1664 } else { 1748 } else {
1665 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1749 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1666 dev_err(&d40c->chan.dev->device, 1750 dev_err(&d40c->chan.dev->device,
1667 "[%s] Out of memory\n", __func__); 1751 "[%s] Out of memory\n", __func__);
1668 goto err; 1752 goto err;
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1675 virt_to_phys(d40d->lli_phy.src), 1759 virt_to_phys(d40d->lli_phy.src),
1676 d40c->src_def_cfg, 1760 d40c->src_def_cfg,
1677 d40c->dma_cfg.src_info.data_width, 1761 d40c->dma_cfg.src_info.data_width,
1762 d40c->dma_cfg.dst_info.data_width,
1678 d40c->dma_cfg.src_info.psize); 1763 d40c->dma_cfg.src_info.psize);
1679 1764
1680 if (res < 0) 1765 if (res < 0)
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1687 virt_to_phys(d40d->lli_phy.dst), 1772 virt_to_phys(d40d->lli_phy.dst),
1688 d40c->dst_def_cfg, 1773 d40c->dst_def_cfg,
1689 d40c->dma_cfg.dst_info.data_width, 1774 d40c->dma_cfg.dst_info.data_width,
1775 d40c->dma_cfg.src_info.data_width,
1690 d40c->dma_cfg.dst_info.psize); 1776 d40c->dma_cfg.dst_info.psize);
1691 1777
1692 if (res < 0) 1778 if (res < 0)
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1826 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1912 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1827 chan); 1913 chan);
1828 unsigned long flags; 1914 unsigned long flags;
1829 int err = 0;
1830 1915
1831 if (d40c->phy_chan == NULL) { 1916 if (d40c->phy_chan == NULL) {
1832 dev_err(&d40c->chan.dev->device, 1917 dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1844 } 1929 }
1845 1930
1846 d40d->txd.flags = dma_flags; 1931 d40d->txd.flags = dma_flags;
1932 d40d->lli_len = d40_size_2_dmalen(size,
1933 d40c->dma_cfg.src_info.data_width,
1934 d40c->dma_cfg.dst_info.data_width);
1935 if (d40d->lli_len < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Unaligned size\n", __func__);
1938 goto err;
1939 }
1940
1847 1941
1848 dma_async_tx_descriptor_init(&d40d->txd, chan); 1942 dma_async_tx_descriptor_init(&d40d->txd, chan);
1849 1943
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1851 1945
1852 if (d40c->log_num != D40_PHY_CHAN) { 1946 if (d40c->log_num != D40_PHY_CHAN) {
1853 1947
1854 if (d40_pool_lli_alloc(d40d, 1, true) < 0) { 1948 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1855 dev_err(&d40c->chan.dev->device, 1949 dev_err(&d40c->chan.dev->device,
1856 "[%s] Out of memory\n", __func__); 1950 "[%s] Out of memory\n", __func__);
1857 goto err; 1951 goto err;
1858 } 1952 }
1859 d40d->lli_len = 1;
1860 d40d->lli_current = 0; 1953 d40d->lli_current = 0;
1861 1954
1862 d40_log_fill_lli(d40d->lli_log.src, 1955 if (d40_log_buf_to_lli(d40d->lli_log.src,
1863 src, 1956 src,
1864 size, 1957 size,
1865 d40c->log_def.lcsp1, 1958 d40c->log_def.lcsp1,
1866 d40c->dma_cfg.src_info.data_width, 1959 d40c->dma_cfg.src_info.data_width,
1867 true); 1960 d40c->dma_cfg.dst_info.data_width,
1961 true) == NULL)
1962 goto err;
1868 1963
1869 d40_log_fill_lli(d40d->lli_log.dst, 1964 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1870 dst, 1965 dst,
1871 size, 1966 size,
1872 d40c->log_def.lcsp3, 1967 d40c->log_def.lcsp3,
1873 d40c->dma_cfg.dst_info.data_width, 1968 d40c->dma_cfg.dst_info.data_width,
1874 true); 1969 d40c->dma_cfg.src_info.data_width,
1970 true) == NULL)
1971 goto err;
1875 1972
1876 } else { 1973 } else {
1877 1974
1878 if (d40_pool_lli_alloc(d40d, 1, false) < 0) { 1975 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1879 dev_err(&d40c->chan.dev->device, 1976 dev_err(&d40c->chan.dev->device,
1880 "[%s] Out of memory\n", __func__); 1977 "[%s] Out of memory\n", __func__);
1881 goto err; 1978 goto err;
1882 } 1979 }
1883 1980
1884 err = d40_phy_fill_lli(d40d->lli_phy.src, 1981 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1885 src, 1982 src,
1886 size, 1983 size,
1887 d40c->dma_cfg.src_info.psize, 1984 d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1889 d40c->src_def_cfg, 1986 d40c->src_def_cfg,
1890 true, 1987 true,
1891 d40c->dma_cfg.src_info.data_width, 1988 d40c->dma_cfg.src_info.data_width,
1892 false); 1989 d40c->dma_cfg.dst_info.data_width,
1893 if (err) 1990 false) == NULL)
1894 goto err_fill_lli; 1991 goto err;
1895 1992
1896 err = d40_phy_fill_lli(d40d->lli_phy.dst, 1993 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1897 dst, 1994 dst,
1898 size, 1995 size,
1899 d40c->dma_cfg.dst_info.psize, 1996 d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1901 d40c->dst_def_cfg, 1998 d40c->dst_def_cfg,
1902 true, 1999 true,
1903 d40c->dma_cfg.dst_info.data_width, 2000 d40c->dma_cfg.dst_info.data_width,
1904 false); 2001 d40c->dma_cfg.src_info.data_width,
1905 2002 false) == NULL)
1906 if (err) 2003 goto err;
1907 goto err_fill_lli;
1908 2004
1909 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 2005 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1910 d40d->lli_pool.size, DMA_TO_DEVICE); 2006 d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1913 spin_unlock_irqrestore(&d40c->lock, flags); 2009 spin_unlock_irqrestore(&d40c->lock, flags);
1914 return &d40d->txd; 2010 return &d40d->txd;
1915 2011
1916err_fill_lli:
1917 dev_err(&d40c->chan.dev->device,
1918 "[%s] Failed filling in PHY LLI\n", __func__);
1919err: 2012err:
1920 if (d40d) 2013 if (d40d)
1921 d40_desc_free(d40c, d40d); 2014 d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1945 dma_addr_t dev_addr = 0; 2038 dma_addr_t dev_addr = 0;
1946 int total_size; 2039 int total_size;
1947 2040
1948 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 2041 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
2042 d40c->dma_cfg.src_info.data_width,
2043 d40c->dma_cfg.dst_info.data_width);
2044 if (d40d->lli_len < 0) {
2045 dev_err(&d40c->chan.dev->device,
2046 "[%s] Unaligned size\n", __func__);
2047 return -EINVAL;
2048 }
2049
2050 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1949 dev_err(&d40c->chan.dev->device, 2051 dev_err(&d40c->chan.dev->device,
1950 "[%s] Out of memory\n", __func__); 2052 "[%s] Out of memory\n", __func__);
1951 return -ENOMEM; 2053 return -ENOMEM;
1952 } 2054 }
1953 2055
1954 d40d->lli_len = sg_len;
1955 d40d->lli_current = 0; 2056 d40d->lli_current = 0;
1956 2057
1957 if (direction == DMA_FROM_DEVICE) 2058 if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1993 dma_addr_t dst_dev_addr; 2094 dma_addr_t dst_dev_addr;
1994 int res; 2095 int res;
1995 2096
1996 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 2097 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2098 d40c->dma_cfg.src_info.data_width,
2099 d40c->dma_cfg.dst_info.data_width);
2100 if (d40d->lli_len < 0) {
2101 dev_err(&d40c->chan.dev->device,
2102 "[%s] Unaligned size\n", __func__);
2103 return -EINVAL;
2104 }
2105
2106 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1997 dev_err(&d40c->chan.dev->device, 2107 dev_err(&d40c->chan.dev->device,
1998 "[%s] Out of memory\n", __func__); 2108 "[%s] Out of memory\n", __func__);
1999 return -ENOMEM; 2109 return -ENOMEM;
2000 } 2110 }
2001 2111
2002 d40d->lli_len = sgl_len;
2003 d40d->lli_current = 0; 2112 d40d->lli_current = 0;
2004 2113
2005 if (direction == DMA_FROM_DEVICE) { 2114 if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2024 virt_to_phys(d40d->lli_phy.src), 2133 virt_to_phys(d40d->lli_phy.src),
2025 d40c->src_def_cfg, 2134 d40c->src_def_cfg,
2026 d40c->dma_cfg.src_info.data_width, 2135 d40c->dma_cfg.src_info.data_width,
2136 d40c->dma_cfg.dst_info.data_width,
2027 d40c->dma_cfg.src_info.psize); 2137 d40c->dma_cfg.src_info.psize);
2028 if (res < 0) 2138 if (res < 0)
2029 return res; 2139 return res;
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2035 virt_to_phys(d40d->lli_phy.dst), 2145 virt_to_phys(d40d->lli_phy.dst),
2036 d40c->dst_def_cfg, 2146 d40c->dst_def_cfg,
2037 d40c->dma_cfg.dst_info.data_width, 2147 d40c->dma_cfg.dst_info.data_width,
2148 d40c->dma_cfg.src_info.data_width,
2038 d40c->dma_cfg.dst_info.psize); 2149 d40c->dma_cfg.dst_info.psize);
2039 if (res < 0) 2150 if (res < 0)
2040 return res; 2151 return res;
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2244 psize = STEDMA40_PSIZE_PHY_8; 2355 psize = STEDMA40_PSIZE_PHY_8;
2245 else if (config_maxburst >= 4) 2356 else if (config_maxburst >= 4)
2246 psize = STEDMA40_PSIZE_PHY_4; 2357 psize = STEDMA40_PSIZE_PHY_4;
2358 else if (config_maxburst >= 2)
2359 psize = STEDMA40_PSIZE_PHY_2;
2247 else 2360 else
2248 psize = STEDMA40_PSIZE_PHY_1; 2361 psize = STEDMA40_PSIZE_PHY_1;
2249 } 2362 }
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb88b255..0b096a38322d 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson 3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
@@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
122 *dst_cfg = dst; 122 *dst_cfg = dst;
123} 123}
124 124
125int d40_phy_fill_lli(struct d40_phy_lli *lli, 125static int d40_phy_fill_lli(struct d40_phy_lli *lli,
126 dma_addr_t data, 126 dma_addr_t data,
127 u32 data_size, 127 u32 data_size,
128 int psize, 128 int psize,
129 dma_addr_t next_lli, 129 dma_addr_t next_lli,
130 u32 reg_cfg, 130 u32 reg_cfg,
131 bool term_int, 131 bool term_int,
132 u32 data_width, 132 u32 data_width,
133 bool is_device) 133 bool is_device)
134{ 134{
135 int num_elems; 135 int num_elems;
136 136
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
139 else 139 else
140 num_elems = 2 << psize; 140 num_elems = 2 << psize;
141 141
142 /*
143 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
144 * Block large than 64 KiB must be split.
145 */
146 if (data_size > (0xffff << data_width))
147 return -EINVAL;
148
149 /* Must be aligned */ 142 /* Must be aligned */
150 if (!IS_ALIGNED(data, 0x1 << data_width)) 143 if (!IS_ALIGNED(data, 0x1 << data_width))
151 return -EINVAL; 144 return -EINVAL;
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
187 return 0; 180 return 0;
188} 181}
189 182
183static int d40_seg_size(int size, int data_width1, int data_width2)
184{
185 u32 max_w = max(data_width1, data_width2);
186 u32 min_w = min(data_width1, data_width2);
187 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
188
189 if (seg_max > STEDMA40_MAX_SEG_SIZE)
190 seg_max -= (1 << max_w);
191
192 if (size <= seg_max)
193 return size;
194
195 if (size <= 2 * seg_max)
196 return ALIGN(size / 2, 1 << max_w);
197
198 return seg_max;
199}
200
201struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
202 dma_addr_t addr,
203 u32 size,
204 int psize,
205 dma_addr_t lli_phys,
206 u32 reg_cfg,
207 bool term_int,
208 u32 data_width1,
209 u32 data_width2,
210 bool is_device)
211{
212 int err;
213 dma_addr_t next = lli_phys;
214 int size_rest = size;
215 int size_seg = 0;
216
217 do {
218 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
219 size_rest -= size_seg;
220
221 if (term_int && size_rest == 0)
222 next = 0;
223 else
224 next = ALIGN(next + sizeof(struct d40_phy_lli),
225 D40_LLI_ALIGN);
226
227 err = d40_phy_fill_lli(lli,
228 addr,
229 size_seg,
230 psize,
231 next,
232 reg_cfg,
233 !next,
234 data_width1,
235 is_device);
236
237 if (err)
238 goto err;
239
240 lli++;
241 if (!is_device)
242 addr += size_seg;
243 } while (size_rest);
244
245 return lli;
246
247 err:
248 return NULL;
249}
250
190int d40_phy_sg_to_lli(struct scatterlist *sg, 251int d40_phy_sg_to_lli(struct scatterlist *sg,
191 int sg_len, 252 int sg_len,
192 dma_addr_t target, 253 dma_addr_t target,
193 struct d40_phy_lli *lli, 254 struct d40_phy_lli *lli_sg,
194 dma_addr_t lli_phys, 255 dma_addr_t lli_phys,
195 u32 reg_cfg, 256 u32 reg_cfg,
196 u32 data_width, 257 u32 data_width1,
258 u32 data_width2,
197 int psize) 259 int psize)
198{ 260{
199 int total_size = 0; 261 int total_size = 0;
200 int i; 262 int i;
201 struct scatterlist *current_sg = sg; 263 struct scatterlist *current_sg = sg;
202 dma_addr_t next_lli_phys;
203 dma_addr_t dst; 264 dma_addr_t dst;
204 int err = 0; 265 struct d40_phy_lli *lli = lli_sg;
266 dma_addr_t l_phys = lli_phys;
205 267
206 for_each_sg(sg, current_sg, sg_len, i) { 268 for_each_sg(sg, current_sg, sg_len, i) {
207 269
208 total_size += sg_dma_len(current_sg); 270 total_size += sg_dma_len(current_sg);
209 271
210 /* If this scatter list entry is the last one, no next link */
211 if (sg_len - 1 == i)
212 next_lli_phys = 0;
213 else
214 next_lli_phys = ALIGN(lli_phys + (i + 1) *
215 sizeof(struct d40_phy_lli),
216 D40_LLI_ALIGN);
217
218 if (target) 272 if (target)
219 dst = target; 273 dst = target;
220 else 274 else
221 dst = sg_phys(current_sg); 275 dst = sg_phys(current_sg);
222 276
223 err = d40_phy_fill_lli(&lli[i], 277 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
224 dst, 278 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
225 sg_dma_len(current_sg), 279
226 psize, 280 lli = d40_phy_buf_to_lli(lli,
227 next_lli_phys, 281 dst,
228 reg_cfg, 282 sg_dma_len(current_sg),
229 !next_lli_phys, 283 psize,
230 data_width, 284 l_phys,
231 target == dst); 285 reg_cfg,
232 if (err) 286 sg_len - 1 == i,
233 goto err; 287 data_width1,
288 data_width2,
289 target == dst);
290 if (lli == NULL)
291 return -EINVAL;
234 } 292 }
235 293
236 return total_size; 294 return total_size;
237err:
238 return err;
239} 295}
240 296
241 297
@@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
315 writel(lli_dst->lcsp13, &lcla[1].lcsp13); 371 writel(lli_dst->lcsp13, &lcla[1].lcsp13);
316} 372}
317 373
318void d40_log_fill_lli(struct d40_log_lli *lli, 374static void d40_log_fill_lli(struct d40_log_lli *lli,
319 dma_addr_t data, u32 data_size, 375 dma_addr_t data, u32 data_size,
320 u32 reg_cfg, 376 u32 reg_cfg,
321 u32 data_width, 377 u32 data_width,
322 bool addr_inc) 378 bool addr_inc)
323{ 379{
324 lli->lcsp13 = reg_cfg; 380 lli->lcsp13 = reg_cfg;
325 381
326 /* The number of elements to transfer */ 382 /* The number of elements to transfer */
327 lli->lcsp02 = ((data_size >> data_width) << 383 lli->lcsp02 = ((data_size >> data_width) <<
328 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 384 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
385
386 BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
387
329 /* 16 LSBs address of the current element */ 388 /* 16 LSBs address of the current element */
330 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; 389 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
331 /* 16 MSBs address of the current element */ 390 /* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
348 int total_size = 0; 407 int total_size = 0;
349 struct scatterlist *current_sg = sg; 408 struct scatterlist *current_sg = sg;
350 int i; 409 int i;
410 struct d40_log_lli *lli_src = lli->src;
411 struct d40_log_lli *lli_dst = lli->dst;
351 412
352 for_each_sg(sg, current_sg, sg_len, i) { 413 for_each_sg(sg, current_sg, sg_len, i) {
353 total_size += sg_dma_len(current_sg); 414 total_size += sg_dma_len(current_sg);
354 415
355 if (direction == DMA_TO_DEVICE) { 416 if (direction == DMA_TO_DEVICE) {
356 d40_log_fill_lli(&lli->src[i], 417 lli_src =
357 sg_phys(current_sg), 418 d40_log_buf_to_lli(lli_src,
358 sg_dma_len(current_sg), 419 sg_phys(current_sg),
359 lcsp->lcsp1, src_data_width, 420 sg_dma_len(current_sg),
360 true); 421 lcsp->lcsp1, src_data_width,
361 d40_log_fill_lli(&lli->dst[i], 422 dst_data_width,
362 dev_addr, 423 true);
363 sg_dma_len(current_sg), 424 lli_dst =
364 lcsp->lcsp3, dst_data_width, 425 d40_log_buf_to_lli(lli_dst,
365 false); 426 dev_addr,
427 sg_dma_len(current_sg),
428 lcsp->lcsp3, dst_data_width,
429 src_data_width,
430 false);
366 } else { 431 } else {
367 d40_log_fill_lli(&lli->dst[i], 432 lli_dst =
368 sg_phys(current_sg), 433 d40_log_buf_to_lli(lli_dst,
369 sg_dma_len(current_sg), 434 sg_phys(current_sg),
370 lcsp->lcsp3, dst_data_width, 435 sg_dma_len(current_sg),
371 true); 436 lcsp->lcsp3, dst_data_width,
372 d40_log_fill_lli(&lli->src[i], 437 src_data_width,
373 dev_addr, 438 true);
374 sg_dma_len(current_sg), 439 lli_src =
375 lcsp->lcsp1, src_data_width, 440 d40_log_buf_to_lli(lli_src,
376 false); 441 dev_addr,
442 sg_dma_len(current_sg),
443 lcsp->lcsp1, src_data_width,
444 dst_data_width,
445 false);
377 } 446 }
378 } 447 }
379 return total_size; 448 return total_size;
380} 449}
381 450
451struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
452 dma_addr_t addr,
453 int size,
454 u32 lcsp13, /* src or dst*/
455 u32 data_width1,
456 u32 data_width2,
457 bool addr_inc)
458{
459 struct d40_log_lli *lli = lli_sg;
460 int size_rest = size;
461 int size_seg = 0;
462
463 do {
464 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
465 size_rest -= size_seg;
466
467 d40_log_fill_lli(lli,
468 addr,
469 size_seg,
470 lcsp13, data_width1,
471 addr_inc);
472 if (addr_inc)
473 addr += size_seg;
474 lli++;
475 } while (size_rest);
476
477 return lli;
478}
479
382int d40_log_sg_to_lli(struct scatterlist *sg, 480int d40_log_sg_to_lli(struct scatterlist *sg,
383 int sg_len, 481 int sg_len,
384 struct d40_log_lli *lli_sg, 482 struct d40_log_lli *lli_sg,
385 u32 lcsp13, /* src or dst*/ 483 u32 lcsp13, /* src or dst*/
386 u32 data_width) 484 u32 data_width1, u32 data_width2)
387{ 485{
388 int total_size = 0; 486 int total_size = 0;
389 struct scatterlist *current_sg = sg; 487 struct scatterlist *current_sg = sg;
390 int i; 488 int i;
489 struct d40_log_lli *lli = lli_sg;
391 490
392 for_each_sg(sg, current_sg, sg_len, i) { 491 for_each_sg(sg, current_sg, sg_len, i) {
393 total_size += sg_dma_len(current_sg); 492 total_size += sg_dma_len(current_sg);
394 493 lli = d40_log_buf_to_lli(lli,
395 d40_log_fill_lli(&lli_sg[i], 494 sg_phys(current_sg),
396 sg_phys(current_sg), 495 sg_dma_len(current_sg),
397 sg_dma_len(current_sg), 496 lcsp13,
398 lcsp13, data_width, 497 data_width1, data_width2, true);
399 true);
400 } 498 }
401 return total_size; 499 return total_size;
402} 500}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b907544..9cc43495bea2 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
292 struct d40_phy_lli *lli, 292 struct d40_phy_lli *lli,
293 dma_addr_t lli_phys, 293 dma_addr_t lli_phys,
294 u32 reg_cfg, 294 u32 reg_cfg,
295 u32 data_width, 295 u32 data_width1,
296 u32 data_width2,
296 int psize); 297 int psize);
297 298
298int d40_phy_fill_lli(struct d40_phy_lli *lli, 299struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
299 dma_addr_t data, 300 dma_addr_t data,
300 u32 data_size, 301 u32 data_size,
301 int psize, 302 int psize,
302 dma_addr_t next_lli, 303 dma_addr_t next_lli,
303 u32 reg_cfg, 304 u32 reg_cfg,
304 bool term_int, 305 bool term_int,
305 u32 data_width, 306 u32 data_width1,
306 bool is_device); 307 u32 data_width2,
308 bool is_device);
307 309
308void d40_phy_lli_write(void __iomem *virtbase, 310void d40_phy_lli_write(void __iomem *virtbase,
309 u32 phy_chan_num, 311 u32 phy_chan_num,
@@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase,
312 314
313/* Logical channels */ 315/* Logical channels */
314 316
315void d40_log_fill_lli(struct d40_log_lli *lli, 317struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
316 dma_addr_t data, 318 dma_addr_t addr,
317 u32 data_size, 319 int size,
318 u32 reg_cfg, 320 u32 lcsp13, /* src or dst*/
319 u32 data_width, 321 u32 data_width1, u32 data_width2,
320 bool addr_inc); 322 bool addr_inc);
321 323
322int d40_log_sg_to_dev(struct scatterlist *sg, 324int d40_log_sg_to_dev(struct scatterlist *sg,
323 int sg_len, 325 int sg_len,
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
332 int sg_len, 334 int sg_len,
333 struct d40_log_lli *lli_sg, 335 struct d40_log_lli *lli_sg,
334 u32 lcsp13, /* src or dst*/ 336 u32 lcsp13, /* src or dst*/
335 u32 data_width); 337 u32 data_width1, u32 data_width2);
336 338
337void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 339void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
338 struct d40_log_lli *lli_dst, 340 struct d40_log_lli *lli_dst,