aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorJonas Aaberg <jonas.aberg@stericsson.com>2010-06-20 17:25:24 -0400
committerDan Williams <dan.j.williams@intel.com>2010-06-22 21:01:54 -0400
commit2a6143407d9114a0c5d16a7eed1a0892a4ce9f19 (patch)
tree1f3bdfaf0a679890ec449569868c74eec92e94a6 /drivers/dma
parentff0b12baa50390ba6a963cb6f6162a94ed4fc333 (diff)
DMAENGINE: ste_dma40: various cosmetic clean-ups
This cleans up some extra newlines, removes some code duplication and moves the code to comply better with checkpatch. Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ste_dma40.c95
-rw-r--r--drivers/dma/ste_dma40_ll.c24
2 files changed, 52 insertions, 67 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8ed154779bbf..1d176642e523 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -161,7 +161,8 @@ struct d40_base;
161 * @pending_tx: The number of pending transfers. Used between interrupt handler 161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet. 162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel. 163 * @busy: Set to true when transfer is ongoing on this channel.
164 * @phy_chan: Pointer to physical channel which this instance runs on. 164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated.
165 * @chan: DMA engine handle. 166 * @chan: DMA engine handle.
166 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
167 * transfer and call client callback. 168 * transfer and call client callback.
@@ -1236,7 +1237,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1236 return -EINVAL; 1237 return -EINVAL;
1237 } 1238 }
1238 1239
1239
1240 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1240 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1241 if (res) { 1241 if (res) {
1242 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1242 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
@@ -1305,8 +1305,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1305 d40c->base->lookup_phy_chans[phy->num] = NULL; 1305 d40c->base->lookup_phy_chans[phy->num] = NULL;
1306 1306
1307 return 0; 1307 return 0;
1308
1309
1310} 1308}
1311 1309
1312static int d40_pause(struct dma_chan *chan) 1310static int d40_pause(struct dma_chan *chan)
@@ -1314,7 +1312,6 @@ static int d40_pause(struct dma_chan *chan)
1314 struct d40_chan *d40c = 1312 struct d40_chan *d40c =
1315 container_of(chan, struct d40_chan, chan); 1313 container_of(chan, struct d40_chan, chan);
1316 int res; 1314 int res;
1317
1318 unsigned long flags; 1315 unsigned long flags;
1319 1316
1320 spin_lock_irqsave(&d40c->lock, flags); 1317 spin_lock_irqsave(&d40c->lock, flags);
@@ -1510,25 +1507,23 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1510 struct scatterlist *sgl_dst, 1507 struct scatterlist *sgl_dst,
1511 struct scatterlist *sgl_src, 1508 struct scatterlist *sgl_src,
1512 unsigned int sgl_len, 1509 unsigned int sgl_len,
1513 unsigned long flags) 1510 unsigned long dma_flags)
1514{ 1511{
1515 int res; 1512 int res;
1516 struct d40_desc *d40d; 1513 struct d40_desc *d40d;
1517 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1514 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1518 chan); 1515 chan);
1519 unsigned long flg; 1516 unsigned long flags;
1520
1521 1517
1522 spin_lock_irqsave(&d40c->lock, flg); 1518 spin_lock_irqsave(&d40c->lock, flags);
1523 d40d = d40_desc_get(d40c); 1519 d40d = d40_desc_get(d40c);
1524 1520
1525 if (d40d == NULL) 1521 if (d40d == NULL)
1526 goto err; 1522 goto err;
1527 1523
1528 memset(d40d, 0, sizeof(struct d40_desc));
1529 d40d->lli_len = sgl_len; 1524 d40d->lli_len = sgl_len;
1530 d40d->lli_tx_len = d40d->lli_len; 1525 d40d->lli_tx_len = d40d->lli_len;
1531 d40d->txd.flags = flags; 1526 d40d->txd.flags = dma_flags;
1532 1527
1533 if (d40c->log_num != D40_PHY_CHAN) { 1528 if (d40c->log_num != D40_PHY_CHAN) {
1534 if (d40d->lli_len > d40c->base->plat_data->llis_per_log) 1529 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
@@ -1556,7 +1551,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1556 d40d->lli_log.src, 1551 d40d->lli_log.src,
1557 d40c->log_def.lcsp1, 1552 d40c->log_def.lcsp1,
1558 d40c->dma_cfg.src_info.data_width, 1553 d40c->dma_cfg.src_info.data_width,
1559 flags & DMA_PREP_INTERRUPT, 1554 dma_flags & DMA_PREP_INTERRUPT,
1560 d40d->lli_tx_len, 1555 d40d->lli_tx_len,
1561 d40c->base->plat_data->llis_per_log); 1556 d40c->base->plat_data->llis_per_log);
1562 1557
@@ -1566,7 +1561,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1566 d40d->lli_log.dst, 1561 d40d->lli_log.dst,
1567 d40c->log_def.lcsp3, 1562 d40c->log_def.lcsp3,
1568 d40c->dma_cfg.dst_info.data_width, 1563 d40c->dma_cfg.dst_info.data_width,
1569 flags & DMA_PREP_INTERRUPT, 1564 dma_flags & DMA_PREP_INTERRUPT,
1570 d40d->lli_tx_len, 1565 d40d->lli_tx_len,
1571 d40c->base->plat_data->llis_per_log); 1566 d40c->base->plat_data->llis_per_log);
1572 1567
@@ -1612,11 +1607,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1612 1607
1613 d40d->txd.tx_submit = d40_tx_submit; 1608 d40d->txd.tx_submit = d40_tx_submit;
1614 1609
1615 spin_unlock_irqrestore(&d40c->lock, flg); 1610 spin_unlock_irqrestore(&d40c->lock, flags);
1616 1611
1617 return &d40d->txd; 1612 return &d40d->txd;
1618err: 1613err:
1619 spin_unlock_irqrestore(&d40c->lock, flg); 1614 spin_unlock_irqrestore(&d40c->lock, flags);
1620 return NULL; 1615 return NULL;
1621} 1616}
1622EXPORT_SYMBOL(stedma40_memcpy_sg); 1617EXPORT_SYMBOL(stedma40_memcpy_sg);
@@ -1729,15 +1724,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1729 dma_addr_t dst, 1724 dma_addr_t dst,
1730 dma_addr_t src, 1725 dma_addr_t src,
1731 size_t size, 1726 size_t size,
1732 unsigned long flags) 1727 unsigned long dma_flags)
1733{ 1728{
1734 struct d40_desc *d40d; 1729 struct d40_desc *d40d;
1735 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1730 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1736 chan); 1731 chan);
1737 unsigned long flg; 1732 unsigned long flags;
1738 int err = 0; 1733 int err = 0;
1739 1734
1740 spin_lock_irqsave(&d40c->lock, flg); 1735 spin_lock_irqsave(&d40c->lock, flags);
1741 d40d = d40_desc_get(d40c); 1736 d40d = d40_desc_get(d40c);
1742 1737
1743 if (d40d == NULL) { 1738 if (d40d == NULL) {
@@ -1746,9 +1741,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1746 goto err; 1741 goto err;
1747 } 1742 }
1748 1743
1749 memset(d40d, 0, sizeof(struct d40_desc)); 1744 d40d->txd.flags = dma_flags;
1750
1751 d40d->txd.flags = flags;
1752 1745
1753 dma_async_tx_descriptor_init(&d40d->txd, chan); 1746 dma_async_tx_descriptor_init(&d40d->txd, chan);
1754 1747
@@ -1817,7 +1810,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1817 d40d->lli_pool.size, DMA_TO_DEVICE); 1810 d40d->lli_pool.size, DMA_TO_DEVICE);
1818 } 1811 }
1819 1812
1820 spin_unlock_irqrestore(&d40c->lock, flg); 1813 spin_unlock_irqrestore(&d40c->lock, flags);
1821 return &d40d->txd; 1814 return &d40d->txd;
1822 1815
1823err_fill_lli: 1816err_fill_lli:
@@ -1825,7 +1818,7 @@ err_fill_lli:
1825 "[%s] Failed filling in PHY LLI\n", __func__); 1818 "[%s] Failed filling in PHY LLI\n", __func__);
1826 d40_pool_lli_free(d40d); 1819 d40_pool_lli_free(d40d);
1827err: 1820err:
1828 spin_unlock_irqrestore(&d40c->lock, flg); 1821 spin_unlock_irqrestore(&d40c->lock, flags);
1829 return NULL; 1822 return NULL;
1830} 1823}
1831 1824
@@ -1834,7 +1827,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1834 struct scatterlist *sgl, 1827 struct scatterlist *sgl,
1835 unsigned int sg_len, 1828 unsigned int sg_len,
1836 enum dma_data_direction direction, 1829 enum dma_data_direction direction,
1837 unsigned long flags) 1830 unsigned long dma_flags)
1838{ 1831{
1839 dma_addr_t dev_addr = 0; 1832 dma_addr_t dev_addr = 0;
1840 int total_size; 1833 int total_size;
@@ -1860,32 +1853,24 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1860 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) 1853 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1861 d40d->lli_tx_len = 1; 1854 d40d->lli_tx_len = 1;
1862 1855
1863 if (direction == DMA_FROM_DEVICE) { 1856 if (direction == DMA_FROM_DEVICE)
1864 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1857 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1865 total_size = d40_log_sg_to_dev(&d40c->lcla, 1858 else if (direction == DMA_TO_DEVICE)
1866 sgl, sg_len,
1867 &d40d->lli_log,
1868 &d40c->log_def,
1869 d40c->dma_cfg.src_info.data_width,
1870 d40c->dma_cfg.dst_info.data_width,
1871 direction,
1872 flags & DMA_PREP_INTERRUPT,
1873 dev_addr, d40d->lli_tx_len,
1874 d40c->base->plat_data->llis_per_log);
1875 } else if (direction == DMA_TO_DEVICE) {
1876 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1859 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1877 total_size = d40_log_sg_to_dev(&d40c->lcla, 1860 else
1878 sgl, sg_len,
1879 &d40d->lli_log,
1880 &d40c->log_def,
1881 d40c->dma_cfg.src_info.data_width,
1882 d40c->dma_cfg.dst_info.data_width,
1883 direction,
1884 flags & DMA_PREP_INTERRUPT,
1885 dev_addr, d40d->lli_tx_len,
1886 d40c->base->plat_data->llis_per_log);
1887 } else
1888 return -EINVAL; 1861 return -EINVAL;
1862
1863 total_size = d40_log_sg_to_dev(&d40c->lcla,
1864 sgl, sg_len,
1865 &d40d->lli_log,
1866 &d40c->log_def,
1867 d40c->dma_cfg.src_info.data_width,
1868 d40c->dma_cfg.dst_info.data_width,
1869 direction,
1870 dma_flags & DMA_PREP_INTERRUPT,
1871 dev_addr, d40d->lli_tx_len,
1872 d40c->base->plat_data->llis_per_log);
1873
1889 if (total_size < 0) 1874 if (total_size < 0)
1890 return -EINVAL; 1875 return -EINVAL;
1891 1876
@@ -1897,7 +1882,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1897 struct scatterlist *sgl, 1882 struct scatterlist *sgl,
1898 unsigned int sgl_len, 1883 unsigned int sgl_len,
1899 enum dma_data_direction direction, 1884 enum dma_data_direction direction,
1900 unsigned long flags) 1885 unsigned long dma_flags)
1901{ 1886{
1902 dma_addr_t src_dev_addr; 1887 dma_addr_t src_dev_addr;
1903 dma_addr_t dst_dev_addr; 1888 dma_addr_t dst_dev_addr;
@@ -1954,12 +1939,12 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1954 struct scatterlist *sgl, 1939 struct scatterlist *sgl,
1955 unsigned int sg_len, 1940 unsigned int sg_len,
1956 enum dma_data_direction direction, 1941 enum dma_data_direction direction,
1957 unsigned long flags) 1942 unsigned long dma_flags)
1958{ 1943{
1959 struct d40_desc *d40d; 1944 struct d40_desc *d40d;
1960 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1945 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1961 chan); 1946 chan);
1962 unsigned long flg; 1947 unsigned long flags;
1963 int err; 1948 int err;
1964 1949
1965 if (d40c->dma_cfg.pre_transfer) 1950 if (d40c->dma_cfg.pre_transfer)
@@ -1967,9 +1952,9 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1967 d40c->dma_cfg.pre_transfer_data, 1952 d40c->dma_cfg.pre_transfer_data,
1968 sg_dma_len(sgl)); 1953 sg_dma_len(sgl));
1969 1954
1970 spin_lock_irqsave(&d40c->lock, flg); 1955 spin_lock_irqsave(&d40c->lock, flags);
1971 d40d = d40_desc_get(d40c); 1956 d40d = d40_desc_get(d40c);
1972 spin_unlock_irqrestore(&d40c->lock, flg); 1957 spin_unlock_irqrestore(&d40c->lock, flags);
1973 1958
1974 if (d40d == NULL) 1959 if (d40d == NULL)
1975 return NULL; 1960 return NULL;
@@ -1978,10 +1963,10 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1978 1963
1979 if (d40c->log_num != D40_PHY_CHAN) 1964 if (d40c->log_num != D40_PHY_CHAN)
1980 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 1965 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
1981 direction, flags); 1966 direction, dma_flags);
1982 else 1967 else
1983 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 1968 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
1984 direction, flags); 1969 direction, dma_flags);
1985 if (err) { 1970 if (err) {
1986 dev_err(&d40c->chan.dev->device, 1971 dev_err(&d40c->chan.dev->device,
1987 "[%s] Failed to prepare %s slave sg job: %d\n", 1972 "[%s] Failed to prepare %s slave sg job: %d\n",
@@ -1990,7 +1975,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1990 return NULL; 1975 return NULL;
1991 } 1976 }
1992 1977
1993 d40d->txd.flags = flags; 1978 d40d->txd.flags = dma_flags;
1994 1979
1995 dma_async_tx_descriptor_init(&d40d->txd, chan); 1980 dma_async_tx_descriptor_init(&d40d->txd, chan);
1996 1981
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 561fdd8a80c1..e0194e4fd86c 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -430,25 +430,25 @@ void d40_log_lli_write(struct d40_log_lli_full *lcpa,
430 struct d40_log_lli *lli_src, 430 struct d40_log_lli *lli_src,
431 int llis_per_log) 431 int llis_per_log)
432{ 432{
433 u32 slos = 0; 433 u32 slos;
434 u32 dlos = 0; 434 u32 dlos;
435 int i; 435 int i;
436 436
437 lcpa->lcsp0 = lli_src->lcsp02; 437 writel(lli_src->lcsp02, &lcpa->lcsp0);
438 lcpa->lcsp1 = lli_src->lcsp13; 438 writel(lli_src->lcsp13, &lcpa->lcsp1);
439 lcpa->lcsp2 = lli_dst->lcsp02; 439 writel(lli_dst->lcsp02, &lcpa->lcsp2);
440 lcpa->lcsp3 = lli_dst->lcsp13; 440 writel(lli_dst->lcsp13, &lcpa->lcsp3);
441 441
442 slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; 442 slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
443 dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; 443 dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
444 444
445 for (i = 0; (i < llis_per_log) && slos && dlos; i++) { 445 for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
446 writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02); 446 writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
447 writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13); 447 writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
448 writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02); 448 writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
449 writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13); 449 writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
450 450
451 slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; 451 slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
452 dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; 452 dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
453 } 453 }
454} 454}