aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorRabin Vincent <rabin.vincent@stericsson.com>2011-01-25 05:18:15 -0500
committerDan Williams <dan.j.williams@intel.com>2011-01-31 01:27:17 -0500
commitb00f938c8cf5ba8e7a692519548a256aa3ea1203 (patch)
treea4e9845783f335fed5fc394040dafe1330b98e78 /drivers/dma/ste_dma40.c
parent026cbc424a162e495ad29e91d354fb8fc2da2657 (diff)
dma40: fix DMA API usage for LLIs
Map and unmap the LLIs and use dma_sync_single_for_device() appropriately instead of mapping and never unmapping them. Acked-by: Per Forlin <per.forlin@stericsson.com> Acked-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c58
1 files changed, 41 insertions, 17 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index f08e5c49c5d2..b5856864d48d 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -68,6 +68,7 @@ enum d40_command {
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large 68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used. 70 * pre_alloc_lli is used.
71 * @dma_addr: DMA address, if mapped
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer. 74 * one buffer to one buffer.
@@ -75,6 +76,7 @@ enum d40_command {
75struct d40_lli_pool { 76struct d40_lli_pool {
76 void *base; 77 void *base;
77 int size; 78 int size;
79 dma_addr_t dma_addr;
78 /* Space for dst and src, plus an extra for padding */ 80 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 81 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
80}; 82};
@@ -329,7 +331,7 @@ static void __iomem *chan_base(struct d40_chan *chan)
329#define chan_err(d40c, format, arg...) \ 331#define chan_err(d40c, format, arg...) \
330 d40_err(chan2dev(d40c), format, ## arg) 332 d40_err(chan2dev(d40c), format, ## arg)
331 333
332static int d40_pool_lli_alloc(struct d40_desc *d40d, 334static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
333 int lli_len, bool is_log) 335 int lli_len, bool is_log)
334{ 336{
335 u32 align; 337 u32 align;
@@ -358,17 +360,36 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
358 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 360 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
359 align); 361 align);
360 d40d->lli_log.dst = d40d->lli_log.src + lli_len; 362 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
363
364 d40d->lli_pool.dma_addr = 0;
361 } else { 365 } else {
362 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 366 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
363 align); 367 align);
364 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; 368 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
369
370 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
371 d40d->lli_phy.src,
372 d40d->lli_pool.size,
373 DMA_TO_DEVICE);
374
375 if (dma_mapping_error(d40c->base->dev,
376 d40d->lli_pool.dma_addr)) {
377 kfree(d40d->lli_pool.base);
378 d40d->lli_pool.base = NULL;
379 d40d->lli_pool.dma_addr = 0;
380 return -ENOMEM;
381 }
365 } 382 }
366 383
367 return 0; 384 return 0;
368} 385}
369 386
370static void d40_pool_lli_free(struct d40_desc *d40d) 387static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
371{ 388{
389 if (d40d->lli_pool.dma_addr)
390 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
391 d40d->lli_pool.size, DMA_TO_DEVICE);
392
372 kfree(d40d->lli_pool.base); 393 kfree(d40d->lli_pool.base);
373 d40d->lli_pool.base = NULL; 394 d40d->lli_pool.base = NULL;
374 d40d->lli_pool.size = 0; 395 d40d->lli_pool.size = 0;
@@ -454,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
454 475
455 list_for_each_entry_safe(d, _d, &d40c->client, node) 476 list_for_each_entry_safe(d, _d, &d40c->client, node)
456 if (async_tx_test_ack(&d->txd)) { 477 if (async_tx_test_ack(&d->txd)) {
457 d40_pool_lli_free(d); 478 d40_pool_lli_free(d40c, d);
458 d40_desc_remove(d); 479 d40_desc_remove(d);
459 desc = d; 480 desc = d;
460 memset(desc, 0, sizeof(*desc)); 481 memset(desc, 0, sizeof(*desc));
@@ -474,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
474static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 495static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
475{ 496{
476 497
498 d40_pool_lli_free(d40c, d40d);
477 d40_lcla_free_all(d40c, d40d); 499 d40_lcla_free_all(d40c, d40d);
478 kmem_cache_free(d40c->base->desc_slab, d40d); 500 kmem_cache_free(d40c->base->desc_slab, d40d);
479} 501}
@@ -1063,7 +1085,7 @@ static void dma_tasklet(unsigned long data)
1063 callback_param = d40d->txd.callback_param; 1085 callback_param = d40d->txd.callback_param;
1064 1086
1065 if (async_tx_test_ack(&d40d->txd)) { 1087 if (async_tx_test_ack(&d40d->txd)) {
1066 d40_pool_lli_free(d40d); 1088 d40_pool_lli_free(d40c, d40d);
1067 d40_desc_remove(d40d); 1089 d40_desc_remove(d40d);
1068 d40_desc_free(d40c, d40d); 1090 d40_desc_free(d40c, d40d);
1069 } else { 1091 } else {
@@ -1459,7 +1481,7 @@ static int d40_free_dma(struct d40_chan *d40c)
1459 /* Release client owned descriptors */ 1481 /* Release client owned descriptors */
1460 if (!list_empty(&d40c->client)) 1482 if (!list_empty(&d40c->client))
1461 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1483 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1462 d40_pool_lli_free(d); 1484 d40_pool_lli_free(d40c, d);
1463 d40_desc_remove(d); 1485 d40_desc_remove(d);
1464 d40_desc_free(d40c, d); 1486 d40_desc_free(d40c, d);
1465 } 1487 }
@@ -1633,7 +1655,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1633 1655
1634 if (chan_is_logical(d40c)) { 1656 if (chan_is_logical(d40c)) {
1635 1657
1636 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1658 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
1637 chan_err(d40c, "Out of memory\n"); 1659 chan_err(d40c, "Out of memory\n");
1638 goto err; 1660 goto err;
1639 } 1661 }
@@ -1652,7 +1674,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1652 d40c->dma_cfg.dst_info.data_width, 1674 d40c->dma_cfg.dst_info.data_width,
1653 d40c->dma_cfg.src_info.data_width); 1675 d40c->dma_cfg.src_info.data_width);
1654 } else { 1676 } else {
1655 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 1677 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
1656 chan_err(d40c, "Out of memory\n"); 1678 chan_err(d40c, "Out of memory\n");
1657 goto err; 1679 goto err;
1658 } 1680 }
@@ -1683,8 +1705,9 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1683 if (res < 0) 1705 if (res < 0)
1684 goto err; 1706 goto err;
1685 1707
1686 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1708 dma_sync_single_for_device(d40c->base->dev,
1687 d40d->lli_pool.size, DMA_TO_DEVICE); 1709 d40d->lli_pool.dma_addr,
1710 d40d->lli_pool.size, DMA_TO_DEVICE);
1688 } 1711 }
1689 1712
1690 dma_async_tx_descriptor_init(&d40d->txd, chan); 1713 dma_async_tx_descriptor_init(&d40d->txd, chan);
@@ -1876,7 +1899,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1876 1899
1877 if (chan_is_logical(d40c)) { 1900 if (chan_is_logical(d40c)) {
1878 1901
1879 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1902 if (d40_pool_lli_alloc(d40c,d40d, d40d->lli_len, true) < 0) {
1880 chan_err(d40c, "Out of memory\n"); 1903 chan_err(d40c, "Out of memory\n");
1881 goto err; 1904 goto err;
1882 } 1905 }
@@ -1902,7 +1925,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1902 1925
1903 } else { 1926 } else {
1904 1927
1905 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 1928 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
1906 chan_err(d40c, "Out of memory\n"); 1929 chan_err(d40c, "Out of memory\n");
1907 goto err; 1930 goto err;
1908 } 1931 }
@@ -1931,8 +1954,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1931 false) == NULL) 1954 false) == NULL)
1932 goto err; 1955 goto err;
1933 1956
1934 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1957 dma_sync_single_for_device(d40c->base->dev,
1935 d40d->lli_pool.size, DMA_TO_DEVICE); 1958 d40d->lli_pool.dma_addr,
1959 d40d->lli_pool.size, DMA_TO_DEVICE);
1936 } 1960 }
1937 1961
1938 spin_unlock_irqrestore(&d40c->lock, flags); 1962 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1975,7 +1999,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1975 return -EINVAL; 1999 return -EINVAL;
1976 } 2000 }
1977 2001
1978 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 2002 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
1979 chan_err(d40c, "Out of memory\n"); 2003 chan_err(d40c, "Out of memory\n");
1980 return -ENOMEM; 2004 return -ENOMEM;
1981 } 2005 }
@@ -2029,7 +2053,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2029 return -EINVAL; 2053 return -EINVAL;
2030 } 2054 }
2031 2055
2032 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 2056 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
2033 chan_err(d40c, "Out of memory\n"); 2057 chan_err(d40c, "Out of memory\n");
2034 return -ENOMEM; 2058 return -ENOMEM;
2035 } 2059 }
@@ -2075,8 +2099,8 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2075 if (res < 0) 2099 if (res < 0)
2076 return res; 2100 return res;
2077 2101
2078 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 2102 dma_sync_single_for_device(d40c->base->dev, d40d->lli_pool.dma_addr,
2079 d40d->lli_pool.size, DMA_TO_DEVICE); 2103 d40d->lli_pool.size, DMA_TO_DEVICE);
2080 return 0; 2104 return 0;
2081} 2105}
2082 2106