aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonas Aaberg <jonas.aberg@stericsson.com>2010-08-09 08:08:56 -0400
committerDan Williams <dan.j.williams@intel.com>2010-09-22 17:53:46 -0400
commit698e4732e7c9cf9f1f3eac2b8cdce8d4fe2b90bd (patch)
tree9716c813accd1f8f5f5fe6d4ad389fd64396c26d
parent69f93faa57ed6c91b32aae1dcff7282fcb2872f5 (diff)
DMAENGINE: ste_dma40: rewrote LCLA entries allocation code
LLI allocation is now done on job level instead of channel level. Previously the maximum length of a linked job in hw on a logical channel was 8, since the LLIs where evenly divided. Now only executing jobs have allocated LLIs which increase the length to a maximum of 64 links in HW. Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/ste_dma40.c315
-rw-r--r--drivers/dma/ste_dma40_ll.c161
-rw-r--r--drivers/dma/ste_dma40_ll.h51
3 files changed, 218 insertions, 309 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index ac325e918171..c9f485e3baeb 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/err.h>
14 15
15#include <plat/ste_dma40.h> 16#include <plat/ste_dma40.h>
16 17
@@ -29,6 +30,11 @@
29 30
30/* Hardware requirement on LCLA alignment */ 31/* Hardware requirement on LCLA alignment */
31#define LCLA_ALIGNMENT 0x40000 32#define LCLA_ALIGNMENT 0x40000
33
34/* Max number of links per event group */
35#define D40_LCLA_LINK_PER_EVENT_GRP 128
36#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37
32/* Attempts before giving up to trying to get pages that are aligned */ 38/* Attempts before giving up to trying to get pages that are aligned */
33#define MAX_LCLA_ALLOC_ATTEMPTS 256 39#define MAX_LCLA_ALLOC_ATTEMPTS 256
34 40
@@ -81,9 +87,8 @@ struct d40_lli_pool {
81 * @lli_log: Same as above but for logical channels. 87 * @lli_log: Same as above but for logical channels.
82 * @lli_pool: The pool with two entries pre-allocated. 88 * @lli_pool: The pool with two entries pre-allocated.
83 * @lli_len: Number of llis of current descriptor. 89 * @lli_len: Number of llis of current descriptor.
84 * @lli_count: Number of transfered llis. 90 * @lli_current: Number of transfered llis.
85 * @lli_tx_len: Max number of LLIs per transfer, there can be 91 * @lcla_alloc: Number of LCLA entries allocated.
86 * many transfer for one descriptor.
87 * @txd: DMA engine struct. Used for among other things for communication 92 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer. 93 * during a transfer.
89 * @node: List entry. 94 * @node: List entry.
@@ -93,7 +98,6 @@ struct d40_lli_pool {
93 * 98 *
94 * This descriptor is used for both logical and physical transfers. 99 * This descriptor is used for both logical and physical transfers.
95 */ 100 */
96
97struct d40_desc { 101struct d40_desc {
98 /* LLI physical */ 102 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy; 103 struct d40_phy_lli_bidir lli_phy;
@@ -102,8 +106,8 @@ struct d40_desc {
102 106
103 struct d40_lli_pool lli_pool; 107 struct d40_lli_pool lli_pool;
104 int lli_len; 108 int lli_len;
105 int lli_count; 109 int lli_current;
106 u32 lli_tx_len; 110 int lcla_alloc;
107 111
108 struct dma_async_tx_descriptor txd; 112 struct dma_async_tx_descriptor txd;
109 struct list_head node; 113 struct list_head node;
@@ -121,17 +125,14 @@ struct d40_desc {
121 * @pages: The number of pages needed for all physical channels. 125 * @pages: The number of pages needed for all physical channels.
122 * Only used later for clean-up on error 126 * Only used later for clean-up on error
123 * @lock: Lock to protect the content in this struct. 127 * @lock: Lock to protect the content in this struct.
124 * @alloc_map: Bitmap mapping between physical channel and LCLA entries. 128 * @alloc_map: big map over which LCLA entry is own by which job.
125 * @num_blocks: The number of entries of alloc_map. Equals to the
126 * number of physical channels.
127 */ 129 */
128struct d40_lcla_pool { 130struct d40_lcla_pool {
129 void *base; 131 void *base;
130 void *base_unaligned; 132 void *base_unaligned;
131 int pages; 133 int pages;
132 spinlock_t lock; 134 spinlock_t lock;
133 u32 *alloc_map; 135 struct d40_desc **alloc_map;
134 int num_blocks;
135}; 136};
136 137
137/** 138/**
@@ -202,7 +203,6 @@ struct d40_chan {
202 u32 src_def_cfg; 203 u32 src_def_cfg;
203 u32 dst_def_cfg; 204 u32 dst_def_cfg;
204 struct d40_def_lcsp log_def; 205 struct d40_def_lcsp log_def;
205 struct d40_lcla_elem lcla;
206 struct d40_log_lli_full *lcpa; 206 struct d40_log_lli_full *lcpa;
207 /* Runtime reconfiguration */ 207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr; 208 dma_addr_t runtime_addr;
@@ -351,6 +351,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
351 d40d->lli_phy.dst = NULL; 351 d40d->lli_phy.dst = NULL;
352} 352}
353 353
354static int d40_lcla_alloc_one(struct d40_chan *d40c,
355 struct d40_desc *d40d)
356{
357 unsigned long flags;
358 int i;
359 int ret = -EINVAL;
360 int p;
361
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365
366 /*
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
369 */
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373 d40d->lcla_alloc++;
374 ret = i;
375 break;
376 }
377 }
378
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380
381 return ret;
382}
383
384static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
386{
387 unsigned long flags;
388 int i;
389 int ret = -EINVAL;
390
391 if (d40c->log_num == D40_PHY_CHAN)
392 return 0;
393
394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
395
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401 d40d->lcla_alloc--;
402 if (d40d->lcla_alloc == 0) {
403 ret = 0;
404 break;
405 }
406 }
407 }
408
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410
411 return ret;
412
413}
414
354static void d40_desc_remove(struct d40_desc *d40d) 415static void d40_desc_remove(struct d40_desc *d40d)
355{ 416{
356 list_del(&d40d->node); 417 list_del(&d40d->node);
@@ -380,6 +441,8 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
380 441
381static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 442static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
382{ 443{
444
445 d40_lcla_free_all(d40c, d40d);
383 kmem_cache_free(d40c->base->desc_slab, d40d); 446 kmem_cache_free(d40c->base->desc_slab, d40d);
384} 447}
385 448
@@ -388,6 +451,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
388 list_add_tail(&desc->node, &d40c->active); 451 list_add_tail(&desc->node, &d40c->active);
389} 452}
390 453
454static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
455{
456 int curr_lcla = -EINVAL, next_lcla;
457
458 if (d40c->log_num == D40_PHY_CHAN) {
459 d40_phy_lli_write(d40c->base->virtbase,
460 d40c->phy_chan->num,
461 d40d->lli_phy.dst,
462 d40d->lli_phy.src);
463 d40d->lli_current = d40d->lli_len;
464 } else {
465
466 if ((d40d->lli_len - d40d->lli_current) > 1)
467 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
468
469 d40_log_lli_lcpa_write(d40c->lcpa,
470 &d40d->lli_log.dst[d40d->lli_current],
471 &d40d->lli_log.src[d40d->lli_current],
472 curr_lcla);
473
474 d40d->lli_current++;
475 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
476 struct d40_log_lli *lcla;
477
478 if (d40d->lli_current + 1 < d40d->lli_len)
479 next_lcla = d40_lcla_alloc_one(d40c, d40d);
480 else
481 next_lcla = -EINVAL;
482
483 lcla = d40c->base->lcla_pool.base +
484 d40c->phy_chan->num * 1024 +
485 8 * curr_lcla * 2;
486
487 d40_log_lli_lcla_write(lcla,
488 &d40d->lli_log.dst[d40d->lli_current],
489 &d40d->lli_log.src[d40d->lli_current],
490 next_lcla);
491
492 (void) dma_map_single(d40c->base->dev, lcla,
493 2 * sizeof(struct d40_log_lli),
494 DMA_TO_DEVICE);
495
496 curr_lcla = next_lcla;
497
498 if (curr_lcla == -EINVAL) {
499 d40d->lli_current++;
500 break;
501 }
502
503 }
504 }
505}
506
391static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 507static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
392{ 508{
393 struct d40_desc *d; 509 struct d40_desc *d;
@@ -433,61 +549,6 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
433 549
434/* Support functions for logical channels */ 550/* Support functions for logical channels */
435 551
436static int d40_lcla_id_get(struct d40_chan *d40c)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
444 unsigned long flags;
445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
449 if (d40c->base->lcla_pool.num_blocks > 32)
450 return -EINVAL;
451
452 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
453
454 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
455 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
456 (0x1 << i))) {
457 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
458 (0x1 << i);
459 break;
460 }
461 }
462 src_id = i;
463 if (src_id >= d40c->base->lcla_pool.num_blocks)
464 goto err;
465
466 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
468 (0x1 << i))) {
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
470 (0x1 << i);
471 break;
472 }
473 }
474
475 dst_id = i;
476 if (dst_id == src_id)
477 goto err;
478
479 d40c->lcla.src_id = src_id;
480 d40c->lcla.dst_id = dst_id;
481 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
482 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
483
484 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
485 return 0;
486err:
487 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
488 return -EINVAL;
489}
490
491 552
492static int d40_channel_execute_command(struct d40_chan *d40c, 553static int d40_channel_execute_command(struct d40_chan *d40c,
493 enum d40_command command) 554 enum d40_command command)
@@ -556,7 +617,6 @@ done:
556static void d40_term_all(struct d40_chan *d40c) 617static void d40_term_all(struct d40_chan *d40c)
557{ 618{
558 struct d40_desc *d40d; 619 struct d40_desc *d40d;
559 unsigned long flags;
560 620
561 /* Release active descriptors */ 621 /* Release active descriptors */
562 while ((d40d = d40_first_active_get(d40c))) { 622 while ((d40d = d40_first_active_get(d40c))) {
@@ -570,17 +630,6 @@ static void d40_term_all(struct d40_chan *d40c)
570 d40_desc_free(d40c, d40d); 630 d40_desc_free(d40c, d40d);
571 } 631 }
572 632
573 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
574
575 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
576 (~(0x1 << d40c->lcla.dst_id));
577 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
578 (~(0x1 << d40c->lcla.src_id));
579
580 d40c->lcla.src_id = -1;
581 d40c->lcla.dst_id = -1;
582
583 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
584 633
585 d40c->pending_tx = 0; 634 d40c->pending_tx = 0;
586 d40c->busy = false; 635 d40c->busy = false;
@@ -682,38 +731,6 @@ static void d40_config_write(struct d40_chan *d40c)
682 } 731 }
683} 732}
684 733
685static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
686{
687 if (d40c->log_num == D40_PHY_CHAN) {
688 d40_phy_lli_write(d40c->base->virtbase,
689 d40c->phy_chan->num,
690 d40d->lli_phy.dst,
691 d40d->lli_phy.src);
692 } else {
693 struct d40_log_lli *src = d40d->lli_log.src;
694 struct d40_log_lli *dst = d40d->lli_log.dst;
695 int s;
696
697 src += d40d->lli_count;
698 dst += d40d->lli_count;
699 s = d40_log_lli_write(d40c->lcpa,
700 d40c->lcla.src, d40c->lcla.dst,
701 dst, src,
702 d40c->base->plat_data->llis_per_log);
703
704 /* If s equals to zero, the job is not linked */
705 if (s > 0) {
706 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
707 s * sizeof(struct d40_log_lli),
708 DMA_TO_DEVICE);
709 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
710 s * sizeof(struct d40_log_lli),
711 DMA_TO_DEVICE);
712 }
713 }
714 d40d->lli_count += d40d->lli_tx_len;
715}
716
717static u32 d40_residue(struct d40_chan *d40c) 734static u32 d40_residue(struct d40_chan *d40c)
718{ 735{
719 u32 num_elt; 736 u32 num_elt;
@@ -942,6 +959,7 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
942 * If this job is already linked in hw, 959 * If this job is already linked in hw,
943 * do not submit it. 960 * do not submit it.
944 */ 961 */
962
945 if (!d40d->is_hw_linked) { 963 if (!d40d->is_hw_linked) {
946 /* Initiate DMA job */ 964 /* Initiate DMA job */
947 d40_desc_load(d40c, d40d); 965 d40_desc_load(d40c, d40d);
@@ -968,8 +986,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
968 if (d40d == NULL) 986 if (d40d == NULL)
969 return; 987 return;
970 988
971 if (d40d->lli_count < d40d->lli_len) { 989 d40_lcla_free_all(d40c, d40d);
972 990
991 if (d40d->lli_current < d40d->lli_len) {
973 d40_desc_load(d40c, d40d); 992 d40_desc_load(d40c, d40d);
974 /* Start dma job */ 993 /* Start dma job */
975 (void) d40_start(d40c); 994 (void) d40_start(d40c);
@@ -1022,6 +1041,7 @@ static void dma_tasklet(unsigned long data)
1022 } else { 1041 } else {
1023 if (!d40d->is_in_client_list) { 1042 if (!d40d->is_in_client_list) {
1024 d40_desc_remove(d40d); 1043 d40_desc_remove(d40d);
1044 d40_lcla_free_all(d40c, d40d);
1025 list_add_tail(&d40d->node, &d40c->client); 1045 list_add_tail(&d40d->node, &d40c->client);
1026 d40d->is_in_client_list = true; 1046 d40d->is_in_client_list = true;
1027 } 1047 }
@@ -1247,7 +1267,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1247 1267
1248 spin_lock_irqsave(&phy->lock, flags); 1268 spin_lock_irqsave(&phy->lock, flags);
1249 if (!log_event_line) { 1269 if (!log_event_line) {
1250 /* Physical interrupts are masked per physical full channel */
1251 phy->allocated_dst = D40_ALLOC_FREE; 1270 phy->allocated_dst = D40_ALLOC_FREE;
1252 phy->allocated_src = D40_ALLOC_FREE; 1271 phy->allocated_src = D40_ALLOC_FREE;
1253 is_free = true; 1272 is_free = true;
@@ -1633,21 +1652,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1633 goto err; 1652 goto err;
1634 1653
1635 d40d->lli_len = sgl_len; 1654 d40d->lli_len = sgl_len;
1636 d40d->lli_tx_len = d40d->lli_len; 1655 d40d->lli_current = 0;
1637 d40d->txd.flags = dma_flags; 1656 d40d->txd.flags = dma_flags;
1638 1657
1639 if (d40c->log_num != D40_PHY_CHAN) { 1658 if (d40c->log_num != D40_PHY_CHAN) {
1640 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1641 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1642
1643 if (sgl_len > 1)
1644 /*
1645 * Check if there is space available in lcla. If not,
1646 * split list into 1-length and run only in lcpa
1647 * space.
1648 */
1649 if (d40_lcla_id_get(d40c) != 0)
1650 d40d->lli_tx_len = 1;
1651 1659
1652 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1660 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1653 dev_err(&d40c->chan.dev->device, 1661 dev_err(&d40c->chan.dev->device,
@@ -1655,25 +1663,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1655 goto err; 1663 goto err;
1656 } 1664 }
1657 1665
1658 (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1666 (void) d40_log_sg_to_lli(sgl_src,
1659 sgl_src,
1660 sgl_len, 1667 sgl_len,
1661 d40d->lli_log.src, 1668 d40d->lli_log.src,
1662 d40c->log_def.lcsp1, 1669 d40c->log_def.lcsp1,
1663 d40c->dma_cfg.src_info.data_width, 1670 d40c->dma_cfg.src_info.data_width);
1664 d40d->lli_tx_len,
1665 d40c->base->plat_data->llis_per_log);
1666 1671
1667 (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1672 (void) d40_log_sg_to_lli(sgl_dst,
1668 sgl_dst,
1669 sgl_len, 1673 sgl_len,
1670 d40d->lli_log.dst, 1674 d40d->lli_log.dst,
1671 d40c->log_def.lcsp3, 1675 d40c->log_def.lcsp3,
1672 d40c->dma_cfg.dst_info.data_width, 1676 d40c->dma_cfg.dst_info.data_width);
1673 d40d->lli_tx_len,
1674 d40c->base->plat_data->llis_per_log);
1675
1676
1677 } else { 1677 } else {
1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1679 dev_err(&d40c->chan.dev->device, 1679 dev_err(&d40c->chan.dev->device,
@@ -1869,23 +1869,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1869 goto err; 1869 goto err;
1870 } 1870 }
1871 d40d->lli_len = 1; 1871 d40d->lli_len = 1;
1872 d40d->lli_tx_len = 1; 1872 d40d->lli_current = 0;
1873 1873
1874 d40_log_fill_lli(d40d->lli_log.src, 1874 d40_log_fill_lli(d40d->lli_log.src,
1875 src, 1875 src,
1876 size, 1876 size,
1877 0,
1878 d40c->log_def.lcsp1, 1877 d40c->log_def.lcsp1,
1879 d40c->dma_cfg.src_info.data_width, 1878 d40c->dma_cfg.src_info.data_width,
1880 false, true); 1879 true);
1881 1880
1882 d40_log_fill_lli(d40d->lli_log.dst, 1881 d40_log_fill_lli(d40d->lli_log.dst,
1883 dst, 1882 dst,
1884 size, 1883 size,
1885 0,
1886 d40c->log_def.lcsp3, 1884 d40c->log_def.lcsp3,
1887 d40c->dma_cfg.dst_info.data_width, 1885 d40c->dma_cfg.dst_info.data_width,
1888 true, true); 1886 true);
1889 1887
1890 } else { 1888 } else {
1891 1889
@@ -1953,19 +1951,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1953 } 1951 }
1954 1952
1955 d40d->lli_len = sg_len; 1953 d40d->lli_len = sg_len;
1956 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 1954 d40d->lli_current = 0;
1957 d40d->lli_tx_len = d40d->lli_len;
1958 else
1959 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1960
1961 if (sg_len > 1)
1962 /*
1963 * Check if there is space available in lcla.
1964 * If not, split list into 1-length and run only
1965 * in lcpa space.
1966 */
1967 if (d40_lcla_id_get(d40c) != 0)
1968 d40d->lli_tx_len = 1;
1969 1955
1970 if (direction == DMA_FROM_DEVICE) 1956 if (direction == DMA_FROM_DEVICE)
1971 if (d40c->runtime_addr) 1957 if (d40c->runtime_addr)
@@ -1981,15 +1967,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1981 else 1967 else
1982 return -EINVAL; 1968 return -EINVAL;
1983 1969
1984 total_size = d40_log_sg_to_dev(&d40c->lcla, 1970 total_size = d40_log_sg_to_dev(sgl, sg_len,
1985 sgl, sg_len,
1986 &d40d->lli_log, 1971 &d40d->lli_log,
1987 &d40c->log_def, 1972 &d40c->log_def,
1988 d40c->dma_cfg.src_info.data_width, 1973 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width, 1974 d40c->dma_cfg.dst_info.data_width,
1990 direction, 1975 direction,
1991 dev_addr, d40d->lli_tx_len, 1976 dev_addr);
1992 d40c->base->plat_data->llis_per_log);
1993 1977
1994 if (total_size < 0) 1978 if (total_size < 0)
1995 return -EINVAL; 1979 return -EINVAL;
@@ -2015,7 +1999,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2015 } 1999 }
2016 2000
2017 d40d->lli_len = sgl_len; 2001 d40d->lli_len = sgl_len;
2018 d40d->lli_tx_len = sgl_len; 2002 d40d->lli_current = 0;
2019 2003
2020 if (direction == DMA_FROM_DEVICE) { 2004 if (direction == DMA_FROM_DEVICE) {
2021 dst_dev_addr = 0; 2005 dst_dev_addr = 0;
@@ -2323,10 +2307,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2323 d40c->base = base; 2307 d40c->base = base;
2324 d40c->chan.device = dma; 2308 d40c->chan.device = dma;
2325 2309
2326 /* Invalidate lcla element */
2327 d40c->lcla.src_id = -1;
2328 d40c->lcla.dst_id = -1;
2329
2330 spin_lock_init(&d40c->lock); 2310 spin_lock_init(&d40c->lock);
2331 2311
2332 d40c->log_num = D40_PHY_CHAN; 2312 d40c->log_num = D40_PHY_CHAN;
@@ -2631,7 +2611,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2631 if (!base->lookup_log_chans) 2611 if (!base->lookup_log_chans)
2632 goto failure; 2612 goto failure;
2633 } 2613 }
2634 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2614
2615 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2616 sizeof(struct d40_desc *) *
2617 D40_LCLA_LINK_PER_EVENT_GRP,
2635 GFP_KERNEL); 2618 GFP_KERNEL);
2636 if (!base->lcla_pool.alloc_map) 2619 if (!base->lcla_pool.alloc_map)
2637 goto failure; 2620 goto failure;
@@ -2878,8 +2861,6 @@ static int __init d40_probe(struct platform_device *pdev)
2878 2861
2879 spin_lock_init(&base->lcla_pool.lock); 2862 spin_lock_init(&base->lcla_pool.lock);
2880 2863
2881 base->lcla_pool.num_blocks = base->num_phy_chans;
2882
2883 base->irq = platform_get_irq(pdev, 0); 2864 base->irq = platform_get_irq(pdev, 0);
2884 2865
2885 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2866 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 92a0960fba08..86a306dbe1b4 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -37,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
37 cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 37 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
38 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; 38 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
39 39
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
41 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; 40 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
42 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 41 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
43 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; 42 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
44 l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
45 43
46 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; 44 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
47 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 45 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
48 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; 46 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
49 l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
50 47
51 *lcsp1 = l1; 48 *lcsp1 = l1;
52 *lcsp3 = l3; 49 *lcsp3 = l3;
@@ -235,7 +232,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
235 } 232 }
236 233
237 return total_size; 234 return total_size;
238 err: 235err:
239 return err; 236 return err;
240} 237}
241 238
@@ -268,11 +265,59 @@ void d40_phy_lli_write(void __iomem *virtbase,
268 265
269/* DMA logical lli operations */ 266/* DMA logical lli operations */
270 267
268static void d40_log_lli_link(struct d40_log_lli *lli_dst,
269 struct d40_log_lli *lli_src,
270 int next)
271{
272 u32 slos = 0;
273 u32 dlos = 0;
274
275 if (next != -EINVAL) {
276 slos = next * 2;
277 dlos = next * 2 + 1;
278 } else {
279 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
280 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
281 }
282
283 lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
284 (slos << D40_MEM_LCSP1_SLOS_POS);
285
286 lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
287 (dlos << D40_MEM_LCSP1_SLOS_POS);
288}
289
290void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
291 struct d40_log_lli *lli_dst,
292 struct d40_log_lli *lli_src,
293 int next)
294{
295 d40_log_lli_link(lli_dst, lli_src, next);
296
297 writel(lli_src->lcsp02, &lcpa[0].lcsp0);
298 writel(lli_src->lcsp13, &lcpa[0].lcsp1);
299 writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
300 writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
301}
302
303void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
304 struct d40_log_lli *lli_dst,
305 struct d40_log_lli *lli_src,
306 int next)
307{
308 d40_log_lli_link(lli_dst, lli_src, next);
309
310 writel(lli_src->lcsp02, &lcla[0].lcsp02);
311 writel(lli_src->lcsp13, &lcla[0].lcsp13);
312 writel(lli_dst->lcsp02, &lcla[1].lcsp02);
313 writel(lli_dst->lcsp13, &lcla[1].lcsp13);
314}
315
271void d40_log_fill_lli(struct d40_log_lli *lli, 316void d40_log_fill_lli(struct d40_log_lli *lli,
272 dma_addr_t data, u32 data_size, 317 dma_addr_t data, u32 data_size,
273 u32 lli_next_off, u32 reg_cfg, 318 u32 reg_cfg,
274 u32 data_width, 319 u32 data_width,
275 bool term_int, bool addr_inc) 320 bool addr_inc)
276{ 321{
277 lli->lcsp13 = reg_cfg; 322 lli->lcsp13 = reg_cfg;
278 323
@@ -287,165 +332,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
287 if (addr_inc) 332 if (addr_inc)
288 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; 333 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
289 334
290 lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
291 /* If this scatter list entry is the last one, no next link */
292 lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
293 D40_MEM_LCSP1_SLOS_MASK;
294
295 if (term_int)
296 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
297 else
298 lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
299} 335}
300 336
301int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 337int d40_log_sg_to_dev(struct scatterlist *sg,
302 struct scatterlist *sg,
303 int sg_len, 338 int sg_len,
304 struct d40_log_lli_bidir *lli, 339 struct d40_log_lli_bidir *lli,
305 struct d40_def_lcsp *lcsp, 340 struct d40_def_lcsp *lcsp,
306 u32 src_data_width, 341 u32 src_data_width,
307 u32 dst_data_width, 342 u32 dst_data_width,
308 enum dma_data_direction direction, 343 enum dma_data_direction direction,
309 dma_addr_t dev_addr, int max_len, 344 dma_addr_t dev_addr)
310 int llis_per_log)
311{ 345{
312 int total_size = 0; 346 int total_size = 0;
313 struct scatterlist *current_sg = sg; 347 struct scatterlist *current_sg = sg;
314 int i; 348 int i;
315 u32 next_lli_off_dst = 0;
316 u32 next_lli_off_src = 0;
317 349
318 for_each_sg(sg, current_sg, sg_len, i) { 350 for_each_sg(sg, current_sg, sg_len, i) {
319 total_size += sg_dma_len(current_sg); 351 total_size += sg_dma_len(current_sg);
320 352
321 /*
322 * If this scatter list entry is the last one or
323 * max length, terminate link.
324 */
325 if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
326 next_lli_off_src = 0;
327 next_lli_off_dst = 0;
328 } else {
329 if (next_lli_off_dst == 0 &&
330 next_lli_off_src == 0) {
331 /* The first lli will be at next_lli_off */
332 next_lli_off_dst = (lcla->dst_id *
333 llis_per_log + 1);
334 next_lli_off_src = (lcla->src_id *
335 llis_per_log + 1);
336 } else {
337 next_lli_off_dst++;
338 next_lli_off_src++;
339 }
340 }
341
342 if (direction == DMA_TO_DEVICE) { 353 if (direction == DMA_TO_DEVICE) {
343 d40_log_fill_lli(&lli->src[i], 354 d40_log_fill_lli(&lli->src[i],
344 sg_phys(current_sg), 355 sg_phys(current_sg),
345 sg_dma_len(current_sg), 356 sg_dma_len(current_sg),
346 next_lli_off_src,
347 lcsp->lcsp1, src_data_width, 357 lcsp->lcsp1, src_data_width,
348 false,
349 true); 358 true);
350 d40_log_fill_lli(&lli->dst[i], 359 d40_log_fill_lli(&lli->dst[i],
351 dev_addr, 360 dev_addr,
352 sg_dma_len(current_sg), 361 sg_dma_len(current_sg),
353 next_lli_off_dst,
354 lcsp->lcsp3, dst_data_width, 362 lcsp->lcsp3, dst_data_width,
355 /* No next == terminal interrupt */
356 !next_lli_off_dst,
357 false); 363 false);
358 } else { 364 } else {
359 d40_log_fill_lli(&lli->dst[i], 365 d40_log_fill_lli(&lli->dst[i],
360 sg_phys(current_sg), 366 sg_phys(current_sg),
361 sg_dma_len(current_sg), 367 sg_dma_len(current_sg),
362 next_lli_off_dst,
363 lcsp->lcsp3, dst_data_width, 368 lcsp->lcsp3, dst_data_width,
364 /* No next == terminal interrupt */
365 !next_lli_off_dst,
366 true); 369 true);
367 d40_log_fill_lli(&lli->src[i], 370 d40_log_fill_lli(&lli->src[i],
368 dev_addr, 371 dev_addr,
369 sg_dma_len(current_sg), 372 sg_dma_len(current_sg),
370 next_lli_off_src,
371 lcsp->lcsp1, src_data_width, 373 lcsp->lcsp1, src_data_width,
372 false,
373 false); 374 false);
374 } 375 }
375 } 376 }
376 return total_size; 377 return total_size;
377} 378}
378 379
379int d40_log_sg_to_lli(int lcla_id, 380int d40_log_sg_to_lli(struct scatterlist *sg,
380 struct scatterlist *sg,
381 int sg_len, 381 int sg_len,
382 struct d40_log_lli *lli_sg, 382 struct d40_log_lli *lli_sg,
383 u32 lcsp13, /* src or dst*/ 383 u32 lcsp13, /* src or dst*/
384 u32 data_width, 384 u32 data_width)
385 int max_len, int llis_per_log)
386{ 385{
387 int total_size = 0; 386 int total_size = 0;
388 struct scatterlist *current_sg = sg; 387 struct scatterlist *current_sg = sg;
389 int i; 388 int i;
390 u32 next_lli_off = 0;
391 389
392 for_each_sg(sg, current_sg, sg_len, i) { 390 for_each_sg(sg, current_sg, sg_len, i) {
393 total_size += sg_dma_len(current_sg); 391 total_size += sg_dma_len(current_sg);
394 392
395 /*
396 * If this scatter list entry is the last one or
397 * max length, terminate link.
398 */
399 if (sg_len - 1 == i || ((i+1) % max_len == 0))
400 next_lli_off = 0;
401 else {
402 if (next_lli_off == 0)
403 /* The first lli will be at next_lli_off */
404 next_lli_off = lcla_id * llis_per_log + 1;
405 else
406 next_lli_off++;
407 }
408
409 d40_log_fill_lli(&lli_sg[i], 393 d40_log_fill_lli(&lli_sg[i],
410 sg_phys(current_sg), 394 sg_phys(current_sg),
411 sg_dma_len(current_sg), 395 sg_dma_len(current_sg),
412 next_lli_off,
413 lcsp13, data_width, 396 lcsp13, data_width,
414 !next_lli_off,
415 true); 397 true);
416 } 398 }
417 return total_size; 399 return total_size;
418} 400}
419
420int d40_log_lli_write(struct d40_log_lli_full *lcpa,
421 struct d40_log_lli *lcla_src,
422 struct d40_log_lli *lcla_dst,
423 struct d40_log_lli *lli_dst,
424 struct d40_log_lli *lli_src,
425 int llis_per_log)
426{
427 u32 slos;
428 u32 dlos;
429 int i;
430
431 writel(lli_src->lcsp02, &lcpa->lcsp0);
432 writel(lli_src->lcsp13, &lcpa->lcsp1);
433 writel(lli_dst->lcsp02, &lcpa->lcsp2);
434 writel(lli_dst->lcsp13, &lcpa->lcsp3);
435
436 slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
437 dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
438
439 for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
440 writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
441 writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
442 writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
443 writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
444
445 slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
446 dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
447 }
448
449 return i;
450
451}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index a51ec187b5cf..37f81e84cd13 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -268,22 +268,6 @@ struct d40_def_lcsp {
268 u32 lcsp1; 268 u32 lcsp1;
269}; 269};
270 270
271/**
272 * struct d40_lcla_elem - Info for one LCA element.
273 *
274 * @src_id: logical channel src id
275 * @dst_id: logical channel dst id
276 * @src: LCPA formated src parameters
277 * @dst: LCPA formated dst parameters
278 *
279 */
280struct d40_lcla_elem {
281 int src_id;
282 int dst_id;
283 struct d40_log_lli *src;
284 struct d40_log_lli *dst;
285};
286
287/* Physical channels */ 271/* Physical channels */
288 272
289void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 273void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
@@ -324,38 +308,33 @@ void d40_phy_lli_write(void __iomem *virtbase,
324void d40_log_fill_lli(struct d40_log_lli *lli, 308void d40_log_fill_lli(struct d40_log_lli *lli,
325 dma_addr_t data, 309 dma_addr_t data,
326 u32 data_size, 310 u32 data_size,
327 u32 lli_next_off,
328 u32 reg_cfg, 311 u32 reg_cfg,
329 u32 data_width, 312 u32 data_width,
330 bool term_int,
331 bool addr_inc); 313 bool addr_inc);
332 314
333int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 315int d40_log_sg_to_dev(struct scatterlist *sg,
334 struct scatterlist *sg,
335 int sg_len, 316 int sg_len,
336 struct d40_log_lli_bidir *lli, 317 struct d40_log_lli_bidir *lli,
337 struct d40_def_lcsp *lcsp, 318 struct d40_def_lcsp *lcsp,
338 u32 src_data_width, 319 u32 src_data_width,
339 u32 dst_data_width, 320 u32 dst_data_width,
340 enum dma_data_direction direction, 321 enum dma_data_direction direction,
341 dma_addr_t dev_addr, 322 dma_addr_t dev_addr);
342 int max_len, 323
343 int llis_per_log); 324int d40_log_sg_to_lli(struct scatterlist *sg,
344
345int d40_log_lli_write(struct d40_log_lli_full *lcpa,
346 struct d40_log_lli *lcla_src,
347 struct d40_log_lli *lcla_dst,
348 struct d40_log_lli *lli_dst,
349 struct d40_log_lli *lli_src,
350 int llis_per_log);
351
352int d40_log_sg_to_lli(int lcla_id,
353 struct scatterlist *sg,
354 int sg_len, 325 int sg_len,
355 struct d40_log_lli *lli_sg, 326 struct d40_log_lli *lli_sg,
356 u32 lcsp13, /* src or dst*/ 327 u32 lcsp13, /* src or dst*/
357 u32 data_width, 328 u32 data_width);
358 int max_len, 329
359 int llis_per_log); 330void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
331 struct d40_log_lli *lli_dst,
332 struct d40_log_lli *lli_src,
333 int next);
334
335void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
336 struct d40_log_lli *lli_dst,
337 struct d40_log_lli *lli_src,
338 int next);
360 339
361#endif /* STE_DMA40_LLI_H */ 340#endif /* STE_DMA40_LLI_H */