aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorJonas Aaberg <jonas.aberg@stericsson.com>2010-08-09 08:08:56 -0400
committerDan Williams <dan.j.williams@intel.com>2010-09-22 17:53:46 -0400
commit698e4732e7c9cf9f1f3eac2b8cdce8d4fe2b90bd (patch)
tree9716c813accd1f8f5f5fe6d4ad389fd64396c26d /drivers/dma/ste_dma40.c
parent69f93faa57ed6c91b32aae1dcff7282fcb2872f5 (diff)
DMAENGINE: ste_dma40: rewrote LCLA entries allocation code
LLI allocation is now done on job level instead of channel level. Previously the maximum length of a linked job in hw on a logical channel was 8, since the LLIs where evenly divided. Now only executing jobs have allocated LLIs which increase the length to a maximum of 64 links in HW. Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c315
1 files changed, 148 insertions, 167 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index ac325e918171..c9f485e3baeb 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/err.h>
14 15
15#include <plat/ste_dma40.h> 16#include <plat/ste_dma40.h>
16 17
@@ -29,6 +30,11 @@
29 30
30/* Hardware requirement on LCLA alignment */ 31/* Hardware requirement on LCLA alignment */
31#define LCLA_ALIGNMENT 0x40000 32#define LCLA_ALIGNMENT 0x40000
33
34/* Max number of links per event group */
35#define D40_LCLA_LINK_PER_EVENT_GRP 128
36#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37
32/* Attempts before giving up to trying to get pages that are aligned */ 38/* Attempts before giving up to trying to get pages that are aligned */
33#define MAX_LCLA_ALLOC_ATTEMPTS 256 39#define MAX_LCLA_ALLOC_ATTEMPTS 256
34 40
@@ -81,9 +87,8 @@ struct d40_lli_pool {
81 * @lli_log: Same as above but for logical channels. 87 * @lli_log: Same as above but for logical channels.
82 * @lli_pool: The pool with two entries pre-allocated. 88 * @lli_pool: The pool with two entries pre-allocated.
83 * @lli_len: Number of llis of current descriptor. 89 * @lli_len: Number of llis of current descriptor.
84 * @lli_count: Number of transfered llis. 90 * @lli_current: Number of transfered llis.
85 * @lli_tx_len: Max number of LLIs per transfer, there can be 91 * @lcla_alloc: Number of LCLA entries allocated.
86 * many transfer for one descriptor.
87 * @txd: DMA engine struct. Used for among other things for communication 92 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer. 93 * during a transfer.
89 * @node: List entry. 94 * @node: List entry.
@@ -93,7 +98,6 @@ struct d40_lli_pool {
93 * 98 *
94 * This descriptor is used for both logical and physical transfers. 99 * This descriptor is used for both logical and physical transfers.
95 */ 100 */
96
97struct d40_desc { 101struct d40_desc {
98 /* LLI physical */ 102 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy; 103 struct d40_phy_lli_bidir lli_phy;
@@ -102,8 +106,8 @@ struct d40_desc {
102 106
103 struct d40_lli_pool lli_pool; 107 struct d40_lli_pool lli_pool;
104 int lli_len; 108 int lli_len;
105 int lli_count; 109 int lli_current;
106 u32 lli_tx_len; 110 int lcla_alloc;
107 111
108 struct dma_async_tx_descriptor txd; 112 struct dma_async_tx_descriptor txd;
109 struct list_head node; 113 struct list_head node;
@@ -121,17 +125,14 @@ struct d40_desc {
121 * @pages: The number of pages needed for all physical channels. 125 * @pages: The number of pages needed for all physical channels.
122 * Only used later for clean-up on error 126 * Only used later for clean-up on error
123 * @lock: Lock to protect the content in this struct. 127 * @lock: Lock to protect the content in this struct.
124 * @alloc_map: Bitmap mapping between physical channel and LCLA entries. 128 * @alloc_map: big map over which LCLA entry is own by which job.
125 * @num_blocks: The number of entries of alloc_map. Equals to the
126 * number of physical channels.
127 */ 129 */
128struct d40_lcla_pool { 130struct d40_lcla_pool {
129 void *base; 131 void *base;
130 void *base_unaligned; 132 void *base_unaligned;
131 int pages; 133 int pages;
132 spinlock_t lock; 134 spinlock_t lock;
133 u32 *alloc_map; 135 struct d40_desc **alloc_map;
134 int num_blocks;
135}; 136};
136 137
137/** 138/**
@@ -202,7 +203,6 @@ struct d40_chan {
202 u32 src_def_cfg; 203 u32 src_def_cfg;
203 u32 dst_def_cfg; 204 u32 dst_def_cfg;
204 struct d40_def_lcsp log_def; 205 struct d40_def_lcsp log_def;
205 struct d40_lcla_elem lcla;
206 struct d40_log_lli_full *lcpa; 206 struct d40_log_lli_full *lcpa;
207 /* Runtime reconfiguration */ 207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr; 208 dma_addr_t runtime_addr;
@@ -351,6 +351,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
351 d40d->lli_phy.dst = NULL; 351 d40d->lli_phy.dst = NULL;
352} 352}
353 353
354static int d40_lcla_alloc_one(struct d40_chan *d40c,
355 struct d40_desc *d40d)
356{
357 unsigned long flags;
358 int i;
359 int ret = -EINVAL;
360 int p;
361
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365
366 /*
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
369 */
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373 d40d->lcla_alloc++;
374 ret = i;
375 break;
376 }
377 }
378
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380
381 return ret;
382}
383
384static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
386{
387 unsigned long flags;
388 int i;
389 int ret = -EINVAL;
390
391 if (d40c->log_num == D40_PHY_CHAN)
392 return 0;
393
394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
395
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401 d40d->lcla_alloc--;
402 if (d40d->lcla_alloc == 0) {
403 ret = 0;
404 break;
405 }
406 }
407 }
408
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410
411 return ret;
412
413}
414
354static void d40_desc_remove(struct d40_desc *d40d) 415static void d40_desc_remove(struct d40_desc *d40d)
355{ 416{
356 list_del(&d40d->node); 417 list_del(&d40d->node);
@@ -380,6 +441,8 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
380 441
381static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 442static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
382{ 443{
444
445 d40_lcla_free_all(d40c, d40d);
383 kmem_cache_free(d40c->base->desc_slab, d40d); 446 kmem_cache_free(d40c->base->desc_slab, d40d);
384} 447}
385 448
@@ -388,6 +451,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
388 list_add_tail(&desc->node, &d40c->active); 451 list_add_tail(&desc->node, &d40c->active);
389} 452}
390 453
454static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
455{
456 int curr_lcla = -EINVAL, next_lcla;
457
458 if (d40c->log_num == D40_PHY_CHAN) {
459 d40_phy_lli_write(d40c->base->virtbase,
460 d40c->phy_chan->num,
461 d40d->lli_phy.dst,
462 d40d->lli_phy.src);
463 d40d->lli_current = d40d->lli_len;
464 } else {
465
466 if ((d40d->lli_len - d40d->lli_current) > 1)
467 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
468
469 d40_log_lli_lcpa_write(d40c->lcpa,
470 &d40d->lli_log.dst[d40d->lli_current],
471 &d40d->lli_log.src[d40d->lli_current],
472 curr_lcla);
473
474 d40d->lli_current++;
475 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
476 struct d40_log_lli *lcla;
477
478 if (d40d->lli_current + 1 < d40d->lli_len)
479 next_lcla = d40_lcla_alloc_one(d40c, d40d);
480 else
481 next_lcla = -EINVAL;
482
483 lcla = d40c->base->lcla_pool.base +
484 d40c->phy_chan->num * 1024 +
485 8 * curr_lcla * 2;
486
487 d40_log_lli_lcla_write(lcla,
488 &d40d->lli_log.dst[d40d->lli_current],
489 &d40d->lli_log.src[d40d->lli_current],
490 next_lcla);
491
492 (void) dma_map_single(d40c->base->dev, lcla,
493 2 * sizeof(struct d40_log_lli),
494 DMA_TO_DEVICE);
495
496 curr_lcla = next_lcla;
497
498 if (curr_lcla == -EINVAL) {
499 d40d->lli_current++;
500 break;
501 }
502
503 }
504 }
505}
506
391static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 507static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
392{ 508{
393 struct d40_desc *d; 509 struct d40_desc *d;
@@ -433,61 +549,6 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
433 549
434/* Support functions for logical channels */ 550/* Support functions for logical channels */
435 551
436static int d40_lcla_id_get(struct d40_chan *d40c)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
444 unsigned long flags;
445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
449 if (d40c->base->lcla_pool.num_blocks > 32)
450 return -EINVAL;
451
452 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
453
454 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
455 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
456 (0x1 << i))) {
457 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
458 (0x1 << i);
459 break;
460 }
461 }
462 src_id = i;
463 if (src_id >= d40c->base->lcla_pool.num_blocks)
464 goto err;
465
466 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
468 (0x1 << i))) {
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
470 (0x1 << i);
471 break;
472 }
473 }
474
475 dst_id = i;
476 if (dst_id == src_id)
477 goto err;
478
479 d40c->lcla.src_id = src_id;
480 d40c->lcla.dst_id = dst_id;
481 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
482 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
483
484 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
485 return 0;
486err:
487 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
488 return -EINVAL;
489}
490
491 552
492static int d40_channel_execute_command(struct d40_chan *d40c, 553static int d40_channel_execute_command(struct d40_chan *d40c,
493 enum d40_command command) 554 enum d40_command command)
@@ -556,7 +617,6 @@ done:
556static void d40_term_all(struct d40_chan *d40c) 617static void d40_term_all(struct d40_chan *d40c)
557{ 618{
558 struct d40_desc *d40d; 619 struct d40_desc *d40d;
559 unsigned long flags;
560 620
561 /* Release active descriptors */ 621 /* Release active descriptors */
562 while ((d40d = d40_first_active_get(d40c))) { 622 while ((d40d = d40_first_active_get(d40c))) {
@@ -570,17 +630,6 @@ static void d40_term_all(struct d40_chan *d40c)
570 d40_desc_free(d40c, d40d); 630 d40_desc_free(d40c, d40d);
571 } 631 }
572 632
573 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
574
575 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
576 (~(0x1 << d40c->lcla.dst_id));
577 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
578 (~(0x1 << d40c->lcla.src_id));
579
580 d40c->lcla.src_id = -1;
581 d40c->lcla.dst_id = -1;
582
583 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
584 633
585 d40c->pending_tx = 0; 634 d40c->pending_tx = 0;
586 d40c->busy = false; 635 d40c->busy = false;
@@ -682,38 +731,6 @@ static void d40_config_write(struct d40_chan *d40c)
682 } 731 }
683} 732}
684 733
685static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
686{
687 if (d40c->log_num == D40_PHY_CHAN) {
688 d40_phy_lli_write(d40c->base->virtbase,
689 d40c->phy_chan->num,
690 d40d->lli_phy.dst,
691 d40d->lli_phy.src);
692 } else {
693 struct d40_log_lli *src = d40d->lli_log.src;
694 struct d40_log_lli *dst = d40d->lli_log.dst;
695 int s;
696
697 src += d40d->lli_count;
698 dst += d40d->lli_count;
699 s = d40_log_lli_write(d40c->lcpa,
700 d40c->lcla.src, d40c->lcla.dst,
701 dst, src,
702 d40c->base->plat_data->llis_per_log);
703
704 /* If s equals to zero, the job is not linked */
705 if (s > 0) {
706 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
707 s * sizeof(struct d40_log_lli),
708 DMA_TO_DEVICE);
709 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
710 s * sizeof(struct d40_log_lli),
711 DMA_TO_DEVICE);
712 }
713 }
714 d40d->lli_count += d40d->lli_tx_len;
715}
716
717static u32 d40_residue(struct d40_chan *d40c) 734static u32 d40_residue(struct d40_chan *d40c)
718{ 735{
719 u32 num_elt; 736 u32 num_elt;
@@ -942,6 +959,7 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
942 * If this job is already linked in hw, 959 * If this job is already linked in hw,
943 * do not submit it. 960 * do not submit it.
944 */ 961 */
962
945 if (!d40d->is_hw_linked) { 963 if (!d40d->is_hw_linked) {
946 /* Initiate DMA job */ 964 /* Initiate DMA job */
947 d40_desc_load(d40c, d40d); 965 d40_desc_load(d40c, d40d);
@@ -968,8 +986,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
968 if (d40d == NULL) 986 if (d40d == NULL)
969 return; 987 return;
970 988
971 if (d40d->lli_count < d40d->lli_len) { 989 d40_lcla_free_all(d40c, d40d);
972 990
991 if (d40d->lli_current < d40d->lli_len) {
973 d40_desc_load(d40c, d40d); 992 d40_desc_load(d40c, d40d);
974 /* Start dma job */ 993 /* Start dma job */
975 (void) d40_start(d40c); 994 (void) d40_start(d40c);
@@ -1022,6 +1041,7 @@ static void dma_tasklet(unsigned long data)
1022 } else { 1041 } else {
1023 if (!d40d->is_in_client_list) { 1042 if (!d40d->is_in_client_list) {
1024 d40_desc_remove(d40d); 1043 d40_desc_remove(d40d);
1044 d40_lcla_free_all(d40c, d40d);
1025 list_add_tail(&d40d->node, &d40c->client); 1045 list_add_tail(&d40d->node, &d40c->client);
1026 d40d->is_in_client_list = true; 1046 d40d->is_in_client_list = true;
1027 } 1047 }
@@ -1247,7 +1267,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1247 1267
1248 spin_lock_irqsave(&phy->lock, flags); 1268 spin_lock_irqsave(&phy->lock, flags);
1249 if (!log_event_line) { 1269 if (!log_event_line) {
1250 /* Physical interrupts are masked per physical full channel */
1251 phy->allocated_dst = D40_ALLOC_FREE; 1270 phy->allocated_dst = D40_ALLOC_FREE;
1252 phy->allocated_src = D40_ALLOC_FREE; 1271 phy->allocated_src = D40_ALLOC_FREE;
1253 is_free = true; 1272 is_free = true;
@@ -1633,21 +1652,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1633 goto err; 1652 goto err;
1634 1653
1635 d40d->lli_len = sgl_len; 1654 d40d->lli_len = sgl_len;
1636 d40d->lli_tx_len = d40d->lli_len; 1655 d40d->lli_current = 0;
1637 d40d->txd.flags = dma_flags; 1656 d40d->txd.flags = dma_flags;
1638 1657
1639 if (d40c->log_num != D40_PHY_CHAN) { 1658 if (d40c->log_num != D40_PHY_CHAN) {
1640 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1641 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1642
1643 if (sgl_len > 1)
1644 /*
1645 * Check if there is space available in lcla. If not,
1646 * split list into 1-length and run only in lcpa
1647 * space.
1648 */
1649 if (d40_lcla_id_get(d40c) != 0)
1650 d40d->lli_tx_len = 1;
1651 1659
1652 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1660 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1653 dev_err(&d40c->chan.dev->device, 1661 dev_err(&d40c->chan.dev->device,
@@ -1655,25 +1663,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1655 goto err; 1663 goto err;
1656 } 1664 }
1657 1665
1658 (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1666 (void) d40_log_sg_to_lli(sgl_src,
1659 sgl_src,
1660 sgl_len, 1667 sgl_len,
1661 d40d->lli_log.src, 1668 d40d->lli_log.src,
1662 d40c->log_def.lcsp1, 1669 d40c->log_def.lcsp1,
1663 d40c->dma_cfg.src_info.data_width, 1670 d40c->dma_cfg.src_info.data_width);
1664 d40d->lli_tx_len,
1665 d40c->base->plat_data->llis_per_log);
1666 1671
1667 (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1672 (void) d40_log_sg_to_lli(sgl_dst,
1668 sgl_dst,
1669 sgl_len, 1673 sgl_len,
1670 d40d->lli_log.dst, 1674 d40d->lli_log.dst,
1671 d40c->log_def.lcsp3, 1675 d40c->log_def.lcsp3,
1672 d40c->dma_cfg.dst_info.data_width, 1676 d40c->dma_cfg.dst_info.data_width);
1673 d40d->lli_tx_len,
1674 d40c->base->plat_data->llis_per_log);
1675
1676
1677 } else { 1677 } else {
1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1679 dev_err(&d40c->chan.dev->device, 1679 dev_err(&d40c->chan.dev->device,
@@ -1869,23 +1869,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1869 goto err; 1869 goto err;
1870 } 1870 }
1871 d40d->lli_len = 1; 1871 d40d->lli_len = 1;
1872 d40d->lli_tx_len = 1; 1872 d40d->lli_current = 0;
1873 1873
1874 d40_log_fill_lli(d40d->lli_log.src, 1874 d40_log_fill_lli(d40d->lli_log.src,
1875 src, 1875 src,
1876 size, 1876 size,
1877 0,
1878 d40c->log_def.lcsp1, 1877 d40c->log_def.lcsp1,
1879 d40c->dma_cfg.src_info.data_width, 1878 d40c->dma_cfg.src_info.data_width,
1880 false, true); 1879 true);
1881 1880
1882 d40_log_fill_lli(d40d->lli_log.dst, 1881 d40_log_fill_lli(d40d->lli_log.dst,
1883 dst, 1882 dst,
1884 size, 1883 size,
1885 0,
1886 d40c->log_def.lcsp3, 1884 d40c->log_def.lcsp3,
1887 d40c->dma_cfg.dst_info.data_width, 1885 d40c->dma_cfg.dst_info.data_width,
1888 true, true); 1886 true);
1889 1887
1890 } else { 1888 } else {
1891 1889
@@ -1953,19 +1951,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1953 } 1951 }
1954 1952
1955 d40d->lli_len = sg_len; 1953 d40d->lli_len = sg_len;
1956 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 1954 d40d->lli_current = 0;
1957 d40d->lli_tx_len = d40d->lli_len;
1958 else
1959 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1960
1961 if (sg_len > 1)
1962 /*
1963 * Check if there is space available in lcla.
1964 * If not, split list into 1-length and run only
1965 * in lcpa space.
1966 */
1967 if (d40_lcla_id_get(d40c) != 0)
1968 d40d->lli_tx_len = 1;
1969 1955
1970 if (direction == DMA_FROM_DEVICE) 1956 if (direction == DMA_FROM_DEVICE)
1971 if (d40c->runtime_addr) 1957 if (d40c->runtime_addr)
@@ -1981,15 +1967,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1981 else 1967 else
1982 return -EINVAL; 1968 return -EINVAL;
1983 1969
1984 total_size = d40_log_sg_to_dev(&d40c->lcla, 1970 total_size = d40_log_sg_to_dev(sgl, sg_len,
1985 sgl, sg_len,
1986 &d40d->lli_log, 1971 &d40d->lli_log,
1987 &d40c->log_def, 1972 &d40c->log_def,
1988 d40c->dma_cfg.src_info.data_width, 1973 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width, 1974 d40c->dma_cfg.dst_info.data_width,
1990 direction, 1975 direction,
1991 dev_addr, d40d->lli_tx_len, 1976 dev_addr);
1992 d40c->base->plat_data->llis_per_log);
1993 1977
1994 if (total_size < 0) 1978 if (total_size < 0)
1995 return -EINVAL; 1979 return -EINVAL;
@@ -2015,7 +1999,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2015 } 1999 }
2016 2000
2017 d40d->lli_len = sgl_len; 2001 d40d->lli_len = sgl_len;
2018 d40d->lli_tx_len = sgl_len; 2002 d40d->lli_current = 0;
2019 2003
2020 if (direction == DMA_FROM_DEVICE) { 2004 if (direction == DMA_FROM_DEVICE) {
2021 dst_dev_addr = 0; 2005 dst_dev_addr = 0;
@@ -2323,10 +2307,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2323 d40c->base = base; 2307 d40c->base = base;
2324 d40c->chan.device = dma; 2308 d40c->chan.device = dma;
2325 2309
2326 /* Invalidate lcla element */
2327 d40c->lcla.src_id = -1;
2328 d40c->lcla.dst_id = -1;
2329
2330 spin_lock_init(&d40c->lock); 2310 spin_lock_init(&d40c->lock);
2331 2311
2332 d40c->log_num = D40_PHY_CHAN; 2312 d40c->log_num = D40_PHY_CHAN;
@@ -2631,7 +2611,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2631 if (!base->lookup_log_chans) 2611 if (!base->lookup_log_chans)
2632 goto failure; 2612 goto failure;
2633 } 2613 }
2634 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2614
2615 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2616 sizeof(struct d40_desc *) *
2617 D40_LCLA_LINK_PER_EVENT_GRP,
2635 GFP_KERNEL); 2618 GFP_KERNEL);
2636 if (!base->lcla_pool.alloc_map) 2619 if (!base->lcla_pool.alloc_map)
2637 goto failure; 2620 goto failure;
@@ -2878,8 +2861,6 @@ static int __init d40_probe(struct platform_device *pdev)
2878 2861
2879 spin_lock_init(&base->lcla_pool.lock); 2862 spin_lock_init(&base->lcla_pool.lock);
2880 2863
2881 base->lcla_pool.num_blocks = base->num_phy_chans;
2882
2883 base->irq = platform_get_irq(pdev, 0); 2864 base->irq = platform_get_irq(pdev, 0);
2884 2865
2885 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2866 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);