diff options
author | Rabin Vincent <rabin.vincent@stericsson.com> | 2011-01-25 05:18:35 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-01-31 01:27:21 -0500 |
commit | 0c842b551063c5f7382ac9b457992f3b34972801 (patch) | |
tree | da606f382493e6cf3540b4245b86a784a78e46b4 /drivers/dma | |
parent | 86eb5fb61125e4646c9447a1f2ce130817dab34e (diff) |
dma40: cyclic xfer support
Support cyclic transfers, which are useful for ALSA drivers.
Acked-by: Per Forlin <per.forlin@stericsson.com>
Acked-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ste_dma40.c | 172 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 35 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 9 |
3 files changed, 167 insertions, 49 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8fd0bb94e777..af955de035f4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -115,6 +115,7 @@ struct d40_desc { | |||
115 | struct list_head node; | 115 | struct list_head node; |
116 | 116 | ||
117 | bool is_in_client_list; | 117 | bool is_in_client_list; |
118 | bool cyclic; | ||
118 | }; | 119 | }; |
119 | 120 | ||
120 | /** | 121 | /** |
@@ -527,17 +528,45 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
527 | struct d40_log_lli_bidir *lli = &desc->lli_log; | 528 | struct d40_log_lli_bidir *lli = &desc->lli_log; |
528 | int lli_current = desc->lli_current; | 529 | int lli_current = desc->lli_current; |
529 | int lli_len = desc->lli_len; | 530 | int lli_len = desc->lli_len; |
531 | bool cyclic = desc->cyclic; | ||
530 | int curr_lcla = -EINVAL; | 532 | int curr_lcla = -EINVAL; |
533 | int first_lcla = 0; | ||
534 | bool linkback; | ||
531 | 535 | ||
532 | if (lli_len - lli_current > 1) | 536 | /* |
537 | * We may have partially running cyclic transfers, in case we did't get | ||
538 | * enough LCLA entries. | ||
539 | */ | ||
540 | linkback = cyclic && lli_current == 0; | ||
541 | |||
542 | /* | ||
543 | * For linkback, we need one LCLA even with only one link, because we | ||
544 | * can't link back to the one in LCPA space | ||
545 | */ | ||
546 | if (linkback || (lli_len - lli_current > 1)) { | ||
533 | curr_lcla = d40_lcla_alloc_one(chan, desc); | 547 | curr_lcla = d40_lcla_alloc_one(chan, desc); |
548 | first_lcla = curr_lcla; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * For linkback, we normally load the LCPA in the loop since we need to | ||
553 | * link it to the second LCLA and not the first. However, if we | ||
554 | * couldn't even get a first LCLA, then we have to run in LCPA and | ||
555 | * reload manually. | ||
556 | */ | ||
557 | if (!linkback || curr_lcla == -EINVAL) { | ||
558 | unsigned int flags = 0; | ||
534 | 559 | ||
535 | d40_log_lli_lcpa_write(chan->lcpa, | 560 | if (curr_lcla == -EINVAL) |
536 | &lli->dst[lli_current], | 561 | flags |= LLI_TERM_INT; |
537 | &lli->src[lli_current], | ||
538 | curr_lcla); | ||
539 | 562 | ||
540 | lli_current++; | 563 | d40_log_lli_lcpa_write(chan->lcpa, |
564 | &lli->dst[lli_current], | ||
565 | &lli->src[lli_current], | ||
566 | curr_lcla, | ||
567 | flags); | ||
568 | lli_current++; | ||
569 | } | ||
541 | 570 | ||
542 | if (curr_lcla < 0) | 571 | if (curr_lcla < 0) |
543 | goto out; | 572 | goto out; |
@@ -546,17 +575,33 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
546 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + | 575 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + |
547 | 8 * curr_lcla * 2; | 576 | 8 * curr_lcla * 2; |
548 | struct d40_log_lli *lcla = pool->base + lcla_offset; | 577 | struct d40_log_lli *lcla = pool->base + lcla_offset; |
578 | unsigned int flags = 0; | ||
549 | int next_lcla; | 579 | int next_lcla; |
550 | 580 | ||
551 | if (lli_current + 1 < lli_len) | 581 | if (lli_current + 1 < lli_len) |
552 | next_lcla = d40_lcla_alloc_one(chan, desc); | 582 | next_lcla = d40_lcla_alloc_one(chan, desc); |
553 | else | 583 | else |
554 | next_lcla = -EINVAL; | 584 | next_lcla = linkback ? first_lcla : -EINVAL; |
585 | |||
586 | if (cyclic || next_lcla == -EINVAL) | ||
587 | flags |= LLI_TERM_INT; | ||
555 | 588 | ||
589 | if (linkback && curr_lcla == first_lcla) { | ||
590 | /* First link goes in both LCPA and LCLA */ | ||
591 | d40_log_lli_lcpa_write(chan->lcpa, | ||
592 | &lli->dst[lli_current], | ||
593 | &lli->src[lli_current], | ||
594 | next_lcla, flags); | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * One unused LCLA in the cyclic case if the very first | ||
599 | * next_lcla fails... | ||
600 | */ | ||
556 | d40_log_lli_lcla_write(lcla, | 601 | d40_log_lli_lcla_write(lcla, |
557 | &lli->dst[lli_current], | 602 | &lli->dst[lli_current], |
558 | &lli->src[lli_current], | 603 | &lli->src[lli_current], |
559 | next_lcla); | 604 | next_lcla, flags); |
560 | 605 | ||
561 | dma_sync_single_range_for_device(chan->base->dev, | 606 | dma_sync_single_range_for_device(chan->base->dev, |
562 | pool->dma_addr, lcla_offset, | 607 | pool->dma_addr, lcla_offset, |
@@ -565,7 +610,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
565 | 610 | ||
566 | curr_lcla = next_lcla; | 611 | curr_lcla = next_lcla; |
567 | 612 | ||
568 | if (curr_lcla == -EINVAL) { | 613 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
569 | lli_current++; | 614 | lli_current++; |
570 | break; | 615 | break; |
571 | } | 616 | } |
@@ -1074,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1074 | if (d40d == NULL) | 1119 | if (d40d == NULL) |
1075 | return; | 1120 | return; |
1076 | 1121 | ||
1077 | d40_lcla_free_all(d40c, d40d); | 1122 | if (d40d->cyclic) { |
1123 | /* | ||
1124 | * If this was a paritially loaded list, we need to reloaded | ||
1125 | * it, and only when the list is completed. We need to check | ||
1126 | * for done because the interrupt will hit for every link, and | ||
1127 | * not just the last one. | ||
1128 | */ | ||
1129 | if (d40d->lli_current < d40d->lli_len | ||
1130 | && !d40_tx_is_linked(d40c) | ||
1131 | && !d40_residue(d40c)) { | ||
1132 | d40_lcla_free_all(d40c, d40d); | ||
1133 | d40_desc_load(d40c, d40d); | ||
1134 | (void) d40_start(d40c); | ||
1078 | 1135 | ||
1079 | if (d40d->lli_current < d40d->lli_len) { | 1136 | if (d40d->lli_current == d40d->lli_len) |
1080 | d40_desc_load(d40c, d40d); | 1137 | d40d->lli_current = 0; |
1081 | /* Start dma job */ | 1138 | } |
1082 | (void) d40_start(d40c); | 1139 | } else { |
1083 | return; | 1140 | d40_lcla_free_all(d40c, d40d); |
1084 | } | ||
1085 | 1141 | ||
1086 | if (d40_queue_start(d40c) == NULL) | 1142 | if (d40d->lli_current < d40d->lli_len) { |
1087 | d40c->busy = false; | 1143 | d40_desc_load(d40c, d40d); |
1144 | /* Start dma job */ | ||
1145 | (void) d40_start(d40c); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | if (d40_queue_start(d40c) == NULL) | ||
1150 | d40c->busy = false; | ||
1151 | } | ||
1088 | 1152 | ||
1089 | d40c->pending_tx++; | 1153 | d40c->pending_tx++; |
1090 | tasklet_schedule(&d40c->tasklet); | 1154 | tasklet_schedule(&d40c->tasklet); |
@@ -1103,11 +1167,11 @@ static void dma_tasklet(unsigned long data) | |||
1103 | 1167 | ||
1104 | /* Get first active entry from list */ | 1168 | /* Get first active entry from list */ |
1105 | d40d = d40_first_active_get(d40c); | 1169 | d40d = d40_first_active_get(d40c); |
1106 | |||
1107 | if (d40d == NULL) | 1170 | if (d40d == NULL) |
1108 | goto err; | 1171 | goto err; |
1109 | 1172 | ||
1110 | d40c->completed = d40d->txd.cookie; | 1173 | if (!d40d->cyclic) |
1174 | d40c->completed = d40d->txd.cookie; | ||
1111 | 1175 | ||
1112 | /* | 1176 | /* |
1113 | * If terminating a channel pending_tx is set to zero. | 1177 | * If terminating a channel pending_tx is set to zero. |
@@ -1122,16 +1186,18 @@ static void dma_tasklet(unsigned long data) | |||
1122 | callback = d40d->txd.callback; | 1186 | callback = d40d->txd.callback; |
1123 | callback_param = d40d->txd.callback_param; | 1187 | callback_param = d40d->txd.callback_param; |
1124 | 1188 | ||
1125 | if (async_tx_test_ack(&d40d->txd)) { | 1189 | if (!d40d->cyclic) { |
1126 | d40_pool_lli_free(d40c, d40d); | 1190 | if (async_tx_test_ack(&d40d->txd)) { |
1127 | d40_desc_remove(d40d); | 1191 | d40_pool_lli_free(d40c, d40d); |
1128 | d40_desc_free(d40c, d40d); | ||
1129 | } else { | ||
1130 | if (!d40d->is_in_client_list) { | ||
1131 | d40_desc_remove(d40d); | 1192 | d40_desc_remove(d40d); |
1132 | d40_lcla_free_all(d40c, d40d); | 1193 | d40_desc_free(d40c, d40d); |
1133 | list_add_tail(&d40d->node, &d40c->client); | 1194 | } else { |
1134 | d40d->is_in_client_list = true; | 1195 | if (!d40d->is_in_client_list) { |
1196 | d40_desc_remove(d40d); | ||
1197 | d40_lcla_free_all(d40c, d40d); | ||
1198 | list_add_tail(&d40d->node, &d40c->client); | ||
1199 | d40d->is_in_client_list = true; | ||
1200 | } | ||
1135 | } | 1201 | } |
1136 | } | 1202 | } |
1137 | 1203 | ||
@@ -1694,19 +1760,23 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | |||
1694 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 1760 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1695 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | 1761 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
1696 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | 1762 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
1763 | unsigned long flags = 0; | ||
1697 | int ret; | 1764 | int ret; |
1698 | 1765 | ||
1766 | if (desc->cyclic) | ||
1767 | flags |= LLI_CYCLIC | LLI_TERM_INT; | ||
1768 | |||
1699 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, | 1769 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, |
1700 | desc->lli_phy.src, | 1770 | desc->lli_phy.src, |
1701 | virt_to_phys(desc->lli_phy.src), | 1771 | virt_to_phys(desc->lli_phy.src), |
1702 | chan->src_def_cfg, | 1772 | chan->src_def_cfg, |
1703 | src_info, dst_info); | 1773 | src_info, dst_info, flags); |
1704 | 1774 | ||
1705 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | 1775 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, |
1706 | desc->lli_phy.dst, | 1776 | desc->lli_phy.dst, |
1707 | virt_to_phys(desc->lli_phy.dst), | 1777 | virt_to_phys(desc->lli_phy.dst), |
1708 | chan->dst_def_cfg, | 1778 | chan->dst_def_cfg, |
1709 | dst_info, src_info); | 1779 | dst_info, src_info, flags); |
1710 | 1780 | ||
1711 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | 1781 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, |
1712 | desc->lli_pool.size, DMA_TO_DEVICE); | 1782 | desc->lli_pool.size, DMA_TO_DEVICE); |
@@ -1789,12 +1859,16 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
1789 | return NULL; | 1859 | return NULL; |
1790 | } | 1860 | } |
1791 | 1861 | ||
1862 | |||
1792 | spin_lock_irqsave(&chan->lock, flags); | 1863 | spin_lock_irqsave(&chan->lock, flags); |
1793 | 1864 | ||
1794 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); | 1865 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
1795 | if (desc == NULL) | 1866 | if (desc == NULL) |
1796 | goto err; | 1867 | goto err; |
1797 | 1868 | ||
1869 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | ||
1870 | desc->cyclic = true; | ||
1871 | |||
1798 | if (direction != DMA_NONE) { | 1872 | if (direction != DMA_NONE) { |
1799 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 1873 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
1800 | 1874 | ||
@@ -2007,6 +2081,36 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2007 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2081 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
2008 | } | 2082 | } |
2009 | 2083 | ||
2084 | static struct dma_async_tx_descriptor * | ||
2085 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | ||
2086 | size_t buf_len, size_t period_len, | ||
2087 | enum dma_data_direction direction) | ||
2088 | { | ||
2089 | unsigned int periods = buf_len / period_len; | ||
2090 | struct dma_async_tx_descriptor *txd; | ||
2091 | struct scatterlist *sg; | ||
2092 | int i; | ||
2093 | |||
2094 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); | ||
2095 | for (i = 0; i < periods; i++) { | ||
2096 | sg_dma_address(&sg[i]) = dma_addr; | ||
2097 | sg_dma_len(&sg[i]) = period_len; | ||
2098 | dma_addr += period_len; | ||
2099 | } | ||
2100 | |||
2101 | sg[periods].offset = 0; | ||
2102 | sg[periods].length = 0; | ||
2103 | sg[periods].page_link = | ||
2104 | ((unsigned long)sg | 0x01) & ~0x02; | ||
2105 | |||
2106 | txd = d40_prep_sg(chan, sg, sg, periods, direction, | ||
2107 | DMA_PREP_INTERRUPT); | ||
2108 | |||
2109 | kfree(sg); | ||
2110 | |||
2111 | return txd; | ||
2112 | } | ||
2113 | |||
2010 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 2114 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
2011 | dma_cookie_t cookie, | 2115 | dma_cookie_t cookie, |
2012 | struct dma_tx_state *txstate) | 2116 | struct dma_tx_state *txstate) |
@@ -2264,6 +2368,9 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | |||
2264 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | 2368 | if (dma_has_cap(DMA_SG, dev->cap_mask)) |
2265 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | 2369 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; |
2266 | 2370 | ||
2371 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | ||
2372 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | ||
2373 | |||
2267 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; | 2374 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; |
2268 | dev->device_free_chan_resources = d40_free_chan_resources; | 2375 | dev->device_free_chan_resources = d40_free_chan_resources; |
2269 | dev->device_issue_pending = d40_issue_pending; | 2376 | dev->device_issue_pending = d40_issue_pending; |
@@ -2282,6 +2389,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2282 | 2389 | ||
2283 | dma_cap_zero(base->dma_slave.cap_mask); | 2390 | dma_cap_zero(base->dma_slave.cap_mask); |
2284 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 2391 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2392 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||
2285 | 2393 | ||
2286 | d40_ops_init(base, &base->dma_slave); | 2394 | d40_ops_init(base, &base->dma_slave); |
2287 | 2395 | ||
@@ -2316,9 +2424,9 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2316 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2424 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2317 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2425 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2318 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); | 2426 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); |
2427 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||
2319 | 2428 | ||
2320 | d40_ops_init(base, &base->dma_both); | 2429 | d40_ops_init(base, &base->dma_both); |
2321 | |||
2322 | err = dma_async_device_register(&base->dma_both); | 2430 | err = dma_async_device_register(&base->dma_both); |
2323 | 2431 | ||
2324 | if (err) { | 2432 | if (err) { |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 88b9e371be2f..cad9e1daedff 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -202,13 +202,15 @@ static int d40_seg_size(int size, int data_width1, int data_width2) | |||
202 | 202 | ||
203 | static struct d40_phy_lli * | 203 | static struct d40_phy_lli * |
204 | d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, | 204 | d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, |
205 | dma_addr_t lli_phys, u32 reg_cfg, | 205 | dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, |
206 | struct stedma40_half_channel_info *info, | 206 | struct stedma40_half_channel_info *info, |
207 | struct stedma40_half_channel_info *otherinfo, | 207 | struct stedma40_half_channel_info *otherinfo, |
208 | unsigned long flags) | 208 | unsigned long flags) |
209 | { | 209 | { |
210 | bool lastlink = flags & LLI_LAST_LINK; | ||
210 | bool addr_inc = flags & LLI_ADDR_INC; | 211 | bool addr_inc = flags & LLI_ADDR_INC; |
211 | bool term_int = flags & LLI_TERM_INT; | 212 | bool term_int = flags & LLI_TERM_INT; |
213 | bool cyclic = flags & LLI_CYCLIC; | ||
212 | int err; | 214 | int err; |
213 | dma_addr_t next = lli_phys; | 215 | dma_addr_t next = lli_phys; |
214 | int size_rest = size; | 216 | int size_rest = size; |
@@ -226,10 +228,12 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, | |||
226 | otherinfo->data_width); | 228 | otherinfo->data_width); |
227 | size_rest -= size_seg; | 229 | size_rest -= size_seg; |
228 | 230 | ||
229 | if (term_int && size_rest == 0) { | 231 | if (size_rest == 0 && term_int) |
230 | next = 0; | ||
231 | flags |= LLI_TERM_INT; | 232 | flags |= LLI_TERM_INT; |
232 | } else | 233 | |
234 | if (size_rest == 0 && lastlink) | ||
235 | next = cyclic ? first_phys : 0; | ||
236 | else | ||
233 | next = ALIGN(next + sizeof(struct d40_phy_lli), | 237 | next = ALIGN(next + sizeof(struct d40_phy_lli), |
234 | D40_LLI_ALIGN); | 238 | D40_LLI_ALIGN); |
235 | 239 | ||
@@ -257,14 +261,14 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
257 | dma_addr_t lli_phys, | 261 | dma_addr_t lli_phys, |
258 | u32 reg_cfg, | 262 | u32 reg_cfg, |
259 | struct stedma40_half_channel_info *info, | 263 | struct stedma40_half_channel_info *info, |
260 | struct stedma40_half_channel_info *otherinfo) | 264 | struct stedma40_half_channel_info *otherinfo, |
265 | unsigned long flags) | ||
261 | { | 266 | { |
262 | int total_size = 0; | 267 | int total_size = 0; |
263 | int i; | 268 | int i; |
264 | struct scatterlist *current_sg = sg; | 269 | struct scatterlist *current_sg = sg; |
265 | struct d40_phy_lli *lli = lli_sg; | 270 | struct d40_phy_lli *lli = lli_sg; |
266 | dma_addr_t l_phys = lli_phys; | 271 | dma_addr_t l_phys = lli_phys; |
267 | unsigned long flags = 0; | ||
268 | 272 | ||
269 | if (!target) | 273 | if (!target) |
270 | flags |= LLI_ADDR_INC; | 274 | flags |= LLI_ADDR_INC; |
@@ -277,12 +281,12 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
277 | total_size += sg_dma_len(current_sg); | 281 | total_size += sg_dma_len(current_sg); |
278 | 282 | ||
279 | if (i == sg_len - 1) | 283 | if (i == sg_len - 1) |
280 | flags |= LLI_TERM_INT; | 284 | flags |= LLI_TERM_INT | LLI_LAST_LINK; |
281 | 285 | ||
282 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * | 286 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * |
283 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); | 287 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); |
284 | 288 | ||
285 | lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, | 289 | lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, |
286 | reg_cfg, info, otherinfo, flags); | 290 | reg_cfg, info, otherinfo, flags); |
287 | 291 | ||
288 | if (lli == NULL) | 292 | if (lli == NULL) |
@@ -297,15 +301,18 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
297 | 301 | ||
298 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, | 302 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, |
299 | struct d40_log_lli *lli_src, | 303 | struct d40_log_lli *lli_src, |
300 | int next) | 304 | int next, unsigned int flags) |
301 | { | 305 | { |
306 | bool interrupt = flags & LLI_TERM_INT; | ||
302 | u32 slos = 0; | 307 | u32 slos = 0; |
303 | u32 dlos = 0; | 308 | u32 dlos = 0; |
304 | 309 | ||
305 | if (next != -EINVAL) { | 310 | if (next != -EINVAL) { |
306 | slos = next * 2; | 311 | slos = next * 2; |
307 | dlos = next * 2 + 1; | 312 | dlos = next * 2 + 1; |
308 | } else { | 313 | } |
314 | |||
315 | if (interrupt) { | ||
309 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | 316 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; |
310 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | 317 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; |
311 | } | 318 | } |
@@ -320,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst, | |||
320 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 327 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
321 | struct d40_log_lli *lli_dst, | 328 | struct d40_log_lli *lli_dst, |
322 | struct d40_log_lli *lli_src, | 329 | struct d40_log_lli *lli_src, |
323 | int next) | 330 | int next, unsigned int flags) |
324 | { | 331 | { |
325 | d40_log_lli_link(lli_dst, lli_src, next); | 332 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
326 | 333 | ||
327 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | 334 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); |
328 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | 335 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); |
@@ -333,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | |||
333 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 340 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
334 | struct d40_log_lli *lli_dst, | 341 | struct d40_log_lli *lli_dst, |
335 | struct d40_log_lli *lli_src, | 342 | struct d40_log_lli *lli_src, |
336 | int next) | 343 | int next, unsigned int flags) |
337 | { | 344 | { |
338 | d40_log_lli_link(lli_dst, lli_src, next); | 345 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
339 | 346 | ||
340 | writel(lli_src->lcsp02, &lcla[0].lcsp02); | 347 | writel(lli_src->lcsp02, &lcla[0].lcsp02); |
341 | writel(lli_src->lcsp13, &lcla[0].lcsp13); | 348 | writel(lli_src->lcsp13, &lcla[0].lcsp13); |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 59e72f0cc901..195ee65ee7f3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -296,6 +296,8 @@ struct d40_def_lcsp { | |||
296 | enum d40_lli_flags { | 296 | enum d40_lli_flags { |
297 | LLI_ADDR_INC = 1 << 0, | 297 | LLI_ADDR_INC = 1 << 0, |
298 | LLI_TERM_INT = 1 << 1, | 298 | LLI_TERM_INT = 1 << 1, |
299 | LLI_CYCLIC = 1 << 2, | ||
300 | LLI_LAST_LINK = 1 << 3, | ||
299 | }; | 301 | }; |
300 | 302 | ||
301 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | 303 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, |
@@ -314,7 +316,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
314 | dma_addr_t lli_phys, | 316 | dma_addr_t lli_phys, |
315 | u32 reg_cfg, | 317 | u32 reg_cfg, |
316 | struct stedma40_half_channel_info *info, | 318 | struct stedma40_half_channel_info *info, |
317 | struct stedma40_half_channel_info *otherinfo); | 319 | struct stedma40_half_channel_info *otherinfo, |
320 | unsigned long flags); | ||
318 | 321 | ||
319 | /* Logical channels */ | 322 | /* Logical channels */ |
320 | 323 | ||
@@ -328,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg, | |||
328 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 331 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
329 | struct d40_log_lli *lli_dst, | 332 | struct d40_log_lli *lli_dst, |
330 | struct d40_log_lli *lli_src, | 333 | struct d40_log_lli *lli_src, |
331 | int next); | 334 | int next, unsigned int flags); |
332 | 335 | ||
333 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 336 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
334 | struct d40_log_lli *lli_dst, | 337 | struct d40_log_lli *lli_dst, |
335 | struct d40_log_lli *lli_src, | 338 | struct d40_log_lli *lli_src, |
336 | int next); | 339 | int next, unsigned int flags); |
337 | 340 | ||
338 | #endif /* STE_DMA40_LLI_H */ | 341 | #endif /* STE_DMA40_LLI_H */ |