aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Bounine <alexandre.bounine@idt.com>2014-08-08 17:22:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:24 -0400
commit50835e977b69c3278bd5e4264737138346df133f (patch)
treeda891568dd1e4c7f9dae7e440d1ea2a6726aab93
parent4aff1ce7add1c432fe5ea3ae0231155f33e5ef38 (diff)
rapidio/tsi721_dma: rework scatter-gather list handling
Rework Tsi721 RapidIO DMA engine support to allow handling data scatter/gather lists longer than number of hardware buffer descriptors in the DMA channel's descriptor list. The current implementation of Tsi721 DMA transfers requires that number of entries in a scatter/gather list provided by a caller of dmaengine_prep_rio_sg() should not exceed number of allocated hardware buffer descriptors. This patch removes the limitation by processing long scatter/gather lists by sections that can be transferred using hardware descriptor ring of configured size. It also introduces a module parameter "dma_desc_per_channel" to allow run-time configuration of Tsi721 hardware buffer descriptor rings. Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com> Cc: Stef van Os <stef.van.os@prodrive-technologies.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/rapidio/tsi721.txt19
-rw-r--r--drivers/rapidio/devices/tsi721.h12
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c718
3 files changed, 394 insertions, 355 deletions
diff --git a/Documentation/rapidio/tsi721.txt b/Documentation/rapidio/tsi721.txt
index 335f3c6087dc..626052f403bb 100644
--- a/Documentation/rapidio/tsi721.txt
+++ b/Documentation/rapidio/tsi721.txt
@@ -20,13 +20,26 @@ II. Known problems
20 20
21 None. 21 None.
22 22
23III. To do 23III. DMA Engine Support
24 24
25 Add DMA data transfers (non-messaging). 25Tsi721 mport driver supports DMA data transfers between local system memory and
26 Add inbound region (SRIO-to-PCIe) mapping. 26remote RapidIO devices. This functionality is implemented according to SLAVE
27mode API defined by common Linux kernel DMA Engine framework.
28
29Depending on system requirements RapidIO DMA operations can be included/excluded
30by setting CONFIG_RAPIDIO_DMA_ENGINE option. Tsi721 miniport driver uses seven
31out of eight available BDMA channels to support DMA data transfers.
32One BDMA channel is reserved for generation of maintenance read/write requests.
33
34If Tsi721 mport driver have been built with RAPIDIO_DMA_ENGINE support included,
35this driver will accept DMA-specific module parameter:
36 "dma_desc_per_channel" - defines number of hardware buffer descriptors used by
37 each BDMA channel of Tsi721 (by default - 128).
27 38
28IV. Version History 39IV. Version History
29 40
41 1.1.0 - DMA operations re-worked to support data scatter/gather lists larger
42 than hardware buffer descriptors ring.
30 1.0.0 - Initial driver release. 43 1.0.0 - Initial driver release.
31 44
32V. License 45V. License
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 0305675270ee..a7b42680a06a 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -644,27 +644,26 @@ enum tsi721_smsg_int_flag {
644 644
645#ifdef CONFIG_RAPIDIO_DMA_ENGINE 645#ifdef CONFIG_RAPIDIO_DMA_ENGINE
646 646
647#define TSI721_BDMA_BD_RING_SZ 128
648#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1) 647#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1)
649 648
650struct tsi721_tx_desc { 649struct tsi721_tx_desc {
651 struct dma_async_tx_descriptor txd; 650 struct dma_async_tx_descriptor txd;
652 struct tsi721_dma_desc *hw_desc;
653 u16 destid; 651 u16 destid;
654 /* low 64-bits of 66-bit RIO address */ 652 /* low 64-bits of 66-bit RIO address */
655 u64 rio_addr; 653 u64 rio_addr;
656 /* upper 2-bits of 66-bit RIO address */ 654 /* upper 2-bits of 66-bit RIO address */
657 u8 rio_addr_u; 655 u8 rio_addr_u;
658 u32 bcount; 656 enum dma_rtype rtype;
659 bool interrupt;
660 struct list_head desc_node; 657 struct list_head desc_node;
661 struct list_head tx_list; 658 struct scatterlist *sg;
659 unsigned int sg_len;
660 enum dma_status status;
662}; 661};
663 662
664struct tsi721_bdma_chan { 663struct tsi721_bdma_chan {
665 int id; 664 int id;
666 void __iomem *regs; 665 void __iomem *regs;
667 int bd_num; /* number of buffer descriptors */ 666 int bd_num; /* number of HW buffer descriptors */
668 void *bd_base; /* start of DMA descriptors */ 667 void *bd_base; /* start of DMA descriptors */
669 dma_addr_t bd_phys; 668 dma_addr_t bd_phys;
670 void *sts_base; /* start of DMA BD status FIFO */ 669 void *sts_base; /* start of DMA BD status FIFO */
@@ -680,7 +679,6 @@ struct tsi721_bdma_chan {
680 struct list_head active_list; 679 struct list_head active_list;
681 struct list_head queue; 680 struct list_head queue;
682 struct list_head free_list; 681 struct list_head free_list;
683 dma_cookie_t completed_cookie;
684 struct tasklet_struct tasklet; 682 struct tasklet_struct tasklet;
685 bool active; 683 bool active;
686}; 684};
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 44341dc5b148..f64c5decb747 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge 2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3 * 3 *
4 * Copyright 2011 Integrated Device Technology, Inc. 4 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com> 5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -14,9 +14,8 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 15 * more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License along with 17 * The full GNU General Public License is included in this distribution in the
18 * this program; if not, write to the Free Software Foundation, Inc., 59 18 * file called COPYING.
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 19 */
21 20
22#include <linux/io.h> 21#include <linux/io.h>
@@ -32,9 +31,22 @@
32#include <linux/interrupt.h> 31#include <linux/interrupt.h>
33#include <linux/kfifo.h> 32#include <linux/kfifo.h>
34#include <linux/delay.h> 33#include <linux/delay.h>
34#include "../../dma/dmaengine.h"
35 35
36#include "tsi721.h" 36#include "tsi721.h"
37 37
38#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
39
40#ifdef CONFIG_PCI_MSI
41static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
42#endif
43static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
44
45static unsigned int dma_desc_per_channel = 128;
46module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
47MODULE_PARM_DESC(dma_desc_per_channel,
48 "Number of DMA descriptors per channel (default: 128)");
49
38static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) 50static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
39{ 51{
40 return container_of(chan, struct tsi721_bdma_chan, dchan); 52 return container_of(chan, struct tsi721_bdma_chan, dchan);
@@ -59,7 +71,7 @@ struct tsi721_tx_desc *tsi721_dma_first_active(
59 struct tsi721_tx_desc, desc_node); 71 struct tsi721_tx_desc, desc_node);
60} 72}
61 73
62static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) 74static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
63{ 75{
64 struct tsi721_dma_desc *bd_ptr; 76 struct tsi721_dma_desc *bd_ptr;
65 struct device *dev = bdma_chan->dchan.device->dev; 77 struct device *dev = bdma_chan->dchan.device->dev;
@@ -67,17 +79,23 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
67 dma_addr_t bd_phys; 79 dma_addr_t bd_phys;
68 dma_addr_t sts_phys; 80 dma_addr_t sts_phys;
69 int sts_size; 81 int sts_size;
70 int bd_num = bdma_chan->bd_num; 82#ifdef CONFIG_PCI_MSI
83 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
84#endif
71 85
72 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); 86 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
73 87
74 /* Allocate space for DMA descriptors */ 88 /*
89 * Allocate space for DMA descriptors
90 * (add an extra element for link descriptor)
91 */
75 bd_ptr = dma_zalloc_coherent(dev, 92 bd_ptr = dma_zalloc_coherent(dev,
76 bd_num * sizeof(struct tsi721_dma_desc), 93 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
77 &bd_phys, GFP_KERNEL); 94 &bd_phys, GFP_KERNEL);
78 if (!bd_ptr) 95 if (!bd_ptr)
79 return -ENOMEM; 96 return -ENOMEM;
80 97
98 bdma_chan->bd_num = bd_num;
81 bdma_chan->bd_phys = bd_phys; 99 bdma_chan->bd_phys = bd_phys;
82 bdma_chan->bd_base = bd_ptr; 100 bdma_chan->bd_base = bd_ptr;
83 101
@@ -85,8 +103,8 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
85 bd_ptr, (unsigned long long)bd_phys); 103 bd_ptr, (unsigned long long)bd_phys);
86 104
87 /* Allocate space for descriptor status FIFO */ 105 /* Allocate space for descriptor status FIFO */
88 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 106 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
89 bd_num : TSI721_DMA_MINSTSSZ; 107 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
90 sts_size = roundup_pow_of_two(sts_size); 108 sts_size = roundup_pow_of_two(sts_size);
91 sts_ptr = dma_zalloc_coherent(dev, 109 sts_ptr = dma_zalloc_coherent(dev,
92 sts_size * sizeof(struct tsi721_dma_sts), 110 sts_size * sizeof(struct tsi721_dma_sts),
@@ -94,7 +112,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
94 if (!sts_ptr) { 112 if (!sts_ptr) {
95 /* Free space allocated for DMA descriptors */ 113 /* Free space allocated for DMA descriptors */
96 dma_free_coherent(dev, 114 dma_free_coherent(dev,
97 bd_num * sizeof(struct tsi721_dma_desc), 115 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
98 bd_ptr, bd_phys); 116 bd_ptr, bd_phys);
99 bdma_chan->bd_base = NULL; 117 bdma_chan->bd_base = NULL;
100 return -ENOMEM; 118 return -ENOMEM;
@@ -108,11 +126,11 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
108 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 126 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
109 sts_ptr, (unsigned long long)sts_phys, sts_size); 127 sts_ptr, (unsigned long long)sts_phys, sts_size);
110 128
111 /* Initialize DMA descriptors ring */ 129 /* Initialize DMA descriptors ring using added link descriptor */
112 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); 130 bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
113 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & 131 bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
114 TSI721_DMAC_DPTRL_MASK); 132 TSI721_DMAC_DPTRL_MASK);
115 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 133 bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
116 134
117 /* Setup DMA descriptor pointers */ 135 /* Setup DMA descriptor pointers */
118 iowrite32(((u64)bd_phys >> 32), 136 iowrite32(((u64)bd_phys >> 32),
@@ -134,6 +152,55 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
134 152
135 ioread32(bdma_chan->regs + TSI721_DMAC_INT); 153 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
136 154
155#ifdef CONFIG_PCI_MSI
156 /* Request interrupt service if we are in MSI-X mode */
157 if (priv->flags & TSI721_USING_MSIX) {
158 int rc, idx;
159
160 idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
161
162 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
163 priv->msix[idx].irq_name, (void *)bdma_chan);
164
165 if (rc) {
166 dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
167 bdma_chan->id);
168 goto err_out;
169 }
170
171 idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
172
173 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
174 priv->msix[idx].irq_name, (void *)bdma_chan);
175
176 if (rc) {
177 dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
178 bdma_chan->id);
179 free_irq(
180 priv->msix[TSI721_VECT_DMA0_DONE +
181 bdma_chan->id].vector,
182 (void *)bdma_chan);
183 }
184
185err_out:
186 if (rc) {
187 /* Free space allocated for DMA descriptors */
188 dma_free_coherent(dev,
189 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
190 bd_ptr, bd_phys);
191 bdma_chan->bd_base = NULL;
192
193 /* Free space allocated for status descriptors */
194 dma_free_coherent(dev,
195 sts_size * sizeof(struct tsi721_dma_sts),
196 sts_ptr, sts_phys);
197 bdma_chan->sts_base = NULL;
198
199 return -EIO;
200 }
201 }
202#endif /* CONFIG_PCI_MSI */
203
137 /* Toggle DMA channel initialization */ 204 /* Toggle DMA channel initialization */
138 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 205 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
139 ioread32(bdma_chan->regs + TSI721_DMAC_CTL); 206 ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
@@ -147,6 +214,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
147static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) 214static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
148{ 215{
149 u32 ch_stat; 216 u32 ch_stat;
217#ifdef CONFIG_PCI_MSI
218 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
219#endif
150 220
151 if (bdma_chan->bd_base == NULL) 221 if (bdma_chan->bd_base == NULL)
152 return 0; 222 return 0;
@@ -159,9 +229,18 @@ static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
159 /* Put DMA channel into init state */ 229 /* Put DMA channel into init state */
160 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 230 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
161 231
232#ifdef CONFIG_PCI_MSI
233 if (priv->flags & TSI721_USING_MSIX) {
234 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
235 bdma_chan->id].vector, (void *)bdma_chan);
236 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
237 bdma_chan->id].vector, (void *)bdma_chan);
238 }
239#endif /* CONFIG_PCI_MSI */
240
162 /* Free space allocated for DMA descriptors */ 241 /* Free space allocated for DMA descriptors */
163 dma_free_coherent(bdma_chan->dchan.device->dev, 242 dma_free_coherent(bdma_chan->dchan.device->dev,
164 bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), 243 (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
165 bdma_chan->bd_base, bdma_chan->bd_phys); 244 bdma_chan->bd_base, bdma_chan->bd_phys);
166 bdma_chan->bd_base = NULL; 245 bdma_chan->bd_base = NULL;
167 246
@@ -243,8 +322,8 @@ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
243 } 322 }
244 323
245 dev_dbg(bdma_chan->dchan.device->dev, 324 dev_dbg(bdma_chan->dchan.device->dev,
246 "tx_chan: %p, chan: %d, regs: %p\n", 325 "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
247 bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); 326 bdma_chan->wr_count_next);
248 327
249 iowrite32(bdma_chan->wr_count_next, 328 iowrite32(bdma_chan->wr_count_next,
250 bdma_chan->regs + TSI721_DMAC_DWRCNT); 329 bdma_chan->regs + TSI721_DMAC_DWRCNT);
@@ -253,72 +332,19 @@ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
253 bdma_chan->wr_count = bdma_chan->wr_count_next; 332 bdma_chan->wr_count = bdma_chan->wr_count_next;
254} 333}
255 334
256static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
257 struct tsi721_tx_desc *desc)
258{
259 dev_dbg(bdma_chan->dchan.device->dev,
260 "Put desc: %p into free list\n", desc);
261
262 if (desc) {
263 spin_lock_bh(&bdma_chan->lock);
264 list_splice_init(&desc->tx_list, &bdma_chan->free_list);
265 list_add(&desc->desc_node, &bdma_chan->free_list);
266 bdma_chan->wr_count_next = bdma_chan->wr_count;
267 spin_unlock_bh(&bdma_chan->lock);
268 }
269}
270
271static
272struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
273{
274 struct tsi721_tx_desc *tx_desc, *_tx_desc;
275 struct tsi721_tx_desc *ret = NULL;
276 int i;
277
278 spin_lock_bh(&bdma_chan->lock);
279 list_for_each_entry_safe(tx_desc, _tx_desc,
280 &bdma_chan->free_list, desc_node) {
281 if (async_tx_test_ack(&tx_desc->txd)) {
282 list_del(&tx_desc->desc_node);
283 ret = tx_desc;
284 break;
285 }
286 dev_dbg(bdma_chan->dchan.device->dev,
287 "desc %p not ACKed\n", tx_desc);
288 }
289
290 if (ret == NULL) {
291 dev_dbg(bdma_chan->dchan.device->dev,
292 "%s: unable to obtain tx descriptor\n", __func__);
293 goto err_out;
294 }
295
296 i = bdma_chan->wr_count_next % bdma_chan->bd_num;
297 if (i == bdma_chan->bd_num - 1) {
298 i = 0;
299 bdma_chan->wr_count_next++; /* skip link descriptor */
300 }
301
302 bdma_chan->wr_count_next++;
303 tx_desc->txd.phys = bdma_chan->bd_phys +
304 i * sizeof(struct tsi721_dma_desc);
305 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
306err_out:
307 spin_unlock_bh(&bdma_chan->lock);
308
309 return ret;
310}
311
312static int 335static int
313tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg, 336tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
314 enum dma_rtype rtype, u32 sys_size) 337 struct tsi721_dma_desc *bd_ptr,
338 struct scatterlist *sg, u32 sys_size)
315{ 339{
316 struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
317 u64 rio_addr; 340 u64 rio_addr;
318 341
342 if (bd_ptr == NULL)
343 return -EINVAL;
344
319 /* Initialize DMA descriptor */ 345 /* Initialize DMA descriptor */
320 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | 346 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
321 (rtype << 19) | desc->destid); 347 (desc->rtype << 19) | desc->destid);
322 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | 348 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
323 (sys_size << 26)); 349 (sys_size << 26));
324 rio_addr = (desc->rio_addr >> 2) | 350 rio_addr = (desc->rio_addr >> 2) |
@@ -335,51 +361,32 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg,
335} 361}
336 362
337static int 363static int
338tsi721_desc_fill_end(struct tsi721_tx_desc *desc) 364tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
339{ 365{
340 struct tsi721_dma_desc *bd_ptr = desc->hw_desc; 366 if (bd_ptr == NULL)
367 return -EINVAL;
341 368
342 /* Update DMA descriptor */ 369 /* Update DMA descriptor */
343 if (desc->interrupt) 370 if (interrupt)
344 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); 371 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
345 bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1); 372 bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
346 373
347 return 0; 374 return 0;
348} 375}
349 376
350 377static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
351static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, 378 struct tsi721_tx_desc *desc)
352 struct tsi721_tx_desc *desc)
353{ 379{
354 struct dma_async_tx_descriptor *txd = &desc->txd; 380 struct dma_async_tx_descriptor *txd = &desc->txd;
355 dma_async_tx_callback callback = txd->callback; 381 dma_async_tx_callback callback = txd->callback;
356 void *param = txd->callback_param; 382 void *param = txd->callback_param;
357 383
358 list_splice_init(&desc->tx_list, &bdma_chan->free_list);
359 list_move(&desc->desc_node, &bdma_chan->free_list); 384 list_move(&desc->desc_node, &bdma_chan->free_list);
360 bdma_chan->completed_cookie = txd->cookie;
361 385
362 if (callback) 386 if (callback)
363 callback(param); 387 callback(param);
364} 388}
365 389
366static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
367{
368 struct tsi721_tx_desc *desc, *_d;
369 LIST_HEAD(list);
370
371 BUG_ON(!tsi721_dma_is_idle(bdma_chan));
372
373 if (!list_empty(&bdma_chan->queue))
374 tsi721_start_dma(bdma_chan);
375
376 list_splice_init(&bdma_chan->active_list, &list);
377 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
378
379 list_for_each_entry_safe(desc, _d, &list, desc_node)
380 tsi721_dma_chain_complete(bdma_chan, desc);
381}
382
383static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) 390static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
384{ 391{
385 u32 srd_ptr; 392 u32 srd_ptr;
@@ -403,20 +410,159 @@ static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
403 bdma_chan->sts_rdptr = srd_ptr; 410 bdma_chan->sts_rdptr = srd_ptr;
404} 411}
405 412
413/* Must be called with the channel spinlock held */
414static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
415{
416 struct dma_chan *dchan = desc->txd.chan;
417 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
418 u32 sys_size;
419 u64 rio_addr;
420 dma_addr_t next_addr;
421 u32 bcount;
422 struct scatterlist *sg;
423 unsigned int i;
424 int err = 0;
425 struct tsi721_dma_desc *bd_ptr = NULL;
426 u32 idx, rd_idx;
427 u32 add_count = 0;
428
429 if (!tsi721_dma_is_idle(bdma_chan)) {
430 dev_err(bdma_chan->dchan.device->dev,
431 "BUG: Attempt to use non-idle channel\n");
432 return -EIO;
433 }
434
435 /*
436 * Fill DMA channel's hardware buffer descriptors.
437 * (NOTE: RapidIO destination address is limited to 64 bits for now)
438 */
439 rio_addr = desc->rio_addr;
440 next_addr = -1;
441 bcount = 0;
442 sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
443
444 rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
445 rd_idx %= (bdma_chan->bd_num + 1);
446
447 idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
448 if (idx == bdma_chan->bd_num) {
449 /* wrap around link descriptor */
450 idx = 0;
451 add_count++;
452 }
453
454 dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
455 __func__, rd_idx, idx);
456
457 for_each_sg(desc->sg, sg, desc->sg_len, i) {
458
459 dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
460 i, desc->sg_len,
461 (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
462
463 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
464 dev_err(dchan->device->dev,
465 "%s: SG entry %d is too large\n", __func__, i);
466 err = -EINVAL;
467 break;
468 }
469
470 /*
471 * If this sg entry forms contiguous block with previous one,
472 * try to merge it into existing DMA descriptor
473 */
474 if (next_addr == sg_dma_address(sg) &&
475 bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
476 /* Adjust byte count of the descriptor */
477 bcount += sg_dma_len(sg);
478 goto entry_done;
479 } else if (next_addr != -1) {
480 /* Finalize descriptor using total byte count value */
481 tsi721_desc_fill_end(bd_ptr, bcount, 0);
482 dev_dbg(dchan->device->dev,
483 "%s: prev desc final len: %d\n",
484 __func__, bcount);
485 }
486
487 desc->rio_addr = rio_addr;
488
489 if (i && idx == rd_idx) {
490 dev_dbg(dchan->device->dev,
491 "%s: HW descriptor ring is full @ %d\n",
492 __func__, i);
493 desc->sg = sg;
494 desc->sg_len -= i;
495 break;
496 }
497
498 bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
499 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
500 if (err) {
501 dev_err(dchan->device->dev,
502 "Failed to build desc: err=%d\n", err);
503 break;
504 }
505
506 dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
507 bd_ptr, desc->destid, desc->rio_addr);
508
509 next_addr = sg_dma_address(sg);
510 bcount = sg_dma_len(sg);
511
512 add_count++;
513 if (++idx == bdma_chan->bd_num) {
514 /* wrap around link descriptor */
515 idx = 0;
516 add_count++;
517 }
518
519entry_done:
520 if (sg_is_last(sg)) {
521 tsi721_desc_fill_end(bd_ptr, bcount, 0);
522 dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
523 __func__, bcount);
524 desc->sg_len = 0;
525 } else {
526 rio_addr += sg_dma_len(sg);
527 next_addr += sg_dma_len(sg);
528 }
529 }
530
531 if (!err)
532 bdma_chan->wr_count_next += add_count;
533
534 return err;
535}
536
406static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) 537static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
407{ 538{
408 if (list_empty(&bdma_chan->active_list) || 539 struct tsi721_tx_desc *desc;
409 list_is_singular(&bdma_chan->active_list)) { 540 int err;
410 dev_dbg(bdma_chan->dchan.device->dev, 541
411 "%s: Active_list empty\n", __func__); 542 dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
412 tsi721_dma_complete_all(bdma_chan); 543
413 } else { 544 /*
414 dev_dbg(bdma_chan->dchan.device->dev, 545 * If there are any new transactions in the queue add them
415 "%s: Active_list NOT empty\n", __func__); 546 * into the processing list
416 tsi721_dma_chain_complete(bdma_chan, 547 */
417 tsi721_dma_first_active(bdma_chan)); 548 if (!list_empty(&bdma_chan->queue))
418 tsi721_start_dma(bdma_chan); 549 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
550
551 /* Start new transaction (if available) */
552 if (!list_empty(&bdma_chan->active_list)) {
553 desc = tsi721_dma_first_active(bdma_chan);
554 err = tsi721_submit_sg(desc);
555 if (!err)
556 tsi721_start_dma(bdma_chan);
557 else {
558 tsi721_dma_tx_err(bdma_chan, desc);
559 dev_dbg(bdma_chan->dchan.device->dev,
560 "ERR: tsi721_submit_sg failed with err=%d\n",
561 err);
562 }
419 } 563 }
564
565 dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
420} 566}
421 567
422static void tsi721_dma_tasklet(unsigned long data) 568static void tsi721_dma_tasklet(unsigned long data)
@@ -444,8 +590,29 @@ static void tsi721_dma_tasklet(unsigned long data)
444 } 590 }
445 591
446 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { 592 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
593 struct tsi721_tx_desc *desc;
594
447 tsi721_clr_stat(bdma_chan); 595 tsi721_clr_stat(bdma_chan);
448 spin_lock(&bdma_chan->lock); 596 spin_lock(&bdma_chan->lock);
597 desc = tsi721_dma_first_active(bdma_chan);
598
599 if (desc->sg_len == 0) {
600 dma_async_tx_callback callback = NULL;
601 void *param = NULL;
602
603 desc->status = DMA_COMPLETE;
604 dma_cookie_complete(&desc->txd);
605 if (desc->txd.flags & DMA_PREP_INTERRUPT) {
606 callback = desc->txd.callback;
607 param = desc->txd.callback_param;
608 }
609 list_move(&desc->desc_node, &bdma_chan->free_list);
610 spin_unlock(&bdma_chan->lock);
611 if (callback)
612 callback(param);
613 spin_lock(&bdma_chan->lock);
614 }
615
449 tsi721_advance_work(bdma_chan); 616 tsi721_advance_work(bdma_chan);
450 spin_unlock(&bdma_chan->lock); 617 spin_unlock(&bdma_chan->lock);
451 } 618 }
@@ -460,21 +627,24 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
460 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); 627 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
461 dma_cookie_t cookie; 628 dma_cookie_t cookie;
462 629
463 spin_lock_bh(&bdma_chan->lock); 630 /* Check if the descriptor is detached from any lists */
631 if (!list_empty(&desc->desc_node)) {
632 dev_err(bdma_chan->dchan.device->dev,
633 "%s: wrong state of descriptor %p\n", __func__, txd);
634 return -EIO;
635 }
464 636
465 cookie = txd->chan->cookie; 637 spin_lock_bh(&bdma_chan->lock);
466 if (++cookie < 0)
467 cookie = 1;
468 txd->chan->cookie = cookie;
469 txd->cookie = cookie;
470 638
471 if (list_empty(&bdma_chan->active_list)) { 639 if (!bdma_chan->active) {
472 list_add_tail(&desc->desc_node, &bdma_chan->active_list); 640 spin_unlock_bh(&bdma_chan->lock);
473 tsi721_start_dma(bdma_chan); 641 return -ENODEV;
474 } else {
475 list_add_tail(&desc->desc_node, &bdma_chan->queue);
476 } 642 }
477 643
644 cookie = dma_cookie_assign(txd);
645 desc->status = DMA_IN_PROGRESS;
646 list_add_tail(&desc->desc_node, &bdma_chan->queue);
647
478 spin_unlock_bh(&bdma_chan->lock); 648 spin_unlock_bh(&bdma_chan->lock);
479 return cookie; 649 return cookie;
480} 650}
@@ -482,115 +652,52 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
482static int tsi721_alloc_chan_resources(struct dma_chan *dchan) 652static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
483{ 653{
484 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 654 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
485#ifdef CONFIG_PCI_MSI
486 struct tsi721_device *priv = to_tsi721(dchan->device);
487#endif
488 struct tsi721_tx_desc *desc = NULL; 655 struct tsi721_tx_desc *desc = NULL;
489 LIST_HEAD(tmp_list);
490 int i; 656 int i;
491 int rc; 657
658 dev_dbg(dchan->device->dev, "%s: for channel %d\n",
659 __func__, bdma_chan->id);
492 660
493 if (bdma_chan->bd_base) 661 if (bdma_chan->bd_base)
494 return bdma_chan->bd_num - 1; 662 return TSI721_DMA_TX_QUEUE_SZ;
495 663
496 /* Initialize BDMA channel */ 664 /* Initialize BDMA channel */
497 if (tsi721_bdma_ch_init(bdma_chan)) { 665 if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
498 dev_err(dchan->device->dev, "Unable to initialize data DMA" 666 dev_err(dchan->device->dev, "Unable to initialize data DMA"
499 " channel %d, aborting\n", bdma_chan->id); 667 " channel %d, aborting\n", bdma_chan->id);
500 return -ENOMEM; 668 return -ENODEV;
501 } 669 }
502 670
503 /* Alocate matching number of logical descriptors */ 671 /* Allocate queue of transaction descriptors */
504 desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), 672 desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
505 GFP_KERNEL); 673 GFP_KERNEL);
506 if (!desc) { 674 if (!desc) {
507 dev_err(dchan->device->dev, 675 dev_err(dchan->device->dev,
508 "Failed to allocate logical descriptors\n"); 676 "Failed to allocate logical descriptors\n");
509 rc = -ENOMEM; 677 tsi721_bdma_ch_free(bdma_chan);
510 goto err_out; 678 return -ENOMEM;
511 } 679 }
512 680
513 bdma_chan->tx_desc = desc; 681 bdma_chan->tx_desc = desc;
514 682
515 for (i = 0; i < bdma_chan->bd_num - 1; i++) { 683 for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
516 dma_async_tx_descriptor_init(&desc[i].txd, dchan); 684 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
517 desc[i].txd.tx_submit = tsi721_tx_submit; 685 desc[i].txd.tx_submit = tsi721_tx_submit;
518 desc[i].txd.flags = DMA_CTRL_ACK; 686 desc[i].txd.flags = DMA_CTRL_ACK;
519 INIT_LIST_HEAD(&desc[i].tx_list); 687 list_add(&desc[i].desc_node, &bdma_chan->free_list);
520 list_add_tail(&desc[i].desc_node, &tmp_list);
521 } 688 }
522 689
523 spin_lock_bh(&bdma_chan->lock); 690 dma_cookie_init(dchan);
524 list_splice(&tmp_list, &bdma_chan->free_list);
525 bdma_chan->completed_cookie = dchan->cookie = 1;
526 spin_unlock_bh(&bdma_chan->lock);
527
528#ifdef CONFIG_PCI_MSI
529 if (priv->flags & TSI721_USING_MSIX) {
530 /* Request interrupt service if we are in MSI-X mode */
531 rc = request_irq(
532 priv->msix[TSI721_VECT_DMA0_DONE +
533 bdma_chan->id].vector,
534 tsi721_bdma_msix, 0,
535 priv->msix[TSI721_VECT_DMA0_DONE +
536 bdma_chan->id].irq_name,
537 (void *)bdma_chan);
538
539 if (rc) {
540 dev_dbg(dchan->device->dev,
541 "Unable to allocate MSI-X interrupt for "
542 "BDMA%d-DONE\n", bdma_chan->id);
543 goto err_out;
544 }
545
546 rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
547 bdma_chan->id].vector,
548 tsi721_bdma_msix, 0,
549 priv->msix[TSI721_VECT_DMA0_INT +
550 bdma_chan->id].irq_name,
551 (void *)bdma_chan);
552
553 if (rc) {
554 dev_dbg(dchan->device->dev,
555 "Unable to allocate MSI-X interrupt for "
556 "BDMA%d-INT\n", bdma_chan->id);
557 free_irq(
558 priv->msix[TSI721_VECT_DMA0_DONE +
559 bdma_chan->id].vector,
560 (void *)bdma_chan);
561 rc = -EIO;
562 goto err_out;
563 }
564 }
565#endif /* CONFIG_PCI_MSI */
566 691
567 bdma_chan->active = true; 692 bdma_chan->active = true;
568 tsi721_bdma_interrupt_enable(bdma_chan, 1); 693 tsi721_bdma_interrupt_enable(bdma_chan, 1);
569 694
570 return bdma_chan->bd_num - 1; 695 return TSI721_DMA_TX_QUEUE_SZ;
571
572err_out:
573 kfree(desc);
574 tsi721_bdma_ch_free(bdma_chan);
575 return rc;
576} 696}
577 697
578static void tsi721_free_chan_resources(struct dma_chan *dchan) 698static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
579{ 699{
580 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 700 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
581 struct tsi721_device *priv = to_tsi721(dchan->device);
582 LIST_HEAD(list);
583
584 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
585
586 if (bdma_chan->bd_base == NULL)
587 return;
588
589 BUG_ON(!list_empty(&bdma_chan->active_list));
590 BUG_ON(!list_empty(&bdma_chan->queue));
591
592 tsi721_bdma_interrupt_enable(bdma_chan, 0);
593 bdma_chan->active = false;
594 701
595#ifdef CONFIG_PCI_MSI 702#ifdef CONFIG_PCI_MSI
596 if (priv->flags & TSI721_USING_MSIX) { 703 if (priv->flags & TSI721_USING_MSIX) {
@@ -601,64 +708,48 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
601 } else 708 } else
602#endif 709#endif
603 synchronize_irq(priv->pdev->irq); 710 synchronize_irq(priv->pdev->irq);
711}
604 712
605 tasklet_kill(&bdma_chan->tasklet); 713static void tsi721_free_chan_resources(struct dma_chan *dchan)
714{
715 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
606 716
607 spin_lock_bh(&bdma_chan->lock); 717 dev_dbg(dchan->device->dev, "%s: for channel %d\n",
608 list_splice_init(&bdma_chan->free_list, &list); 718 __func__, bdma_chan->id);
609 spin_unlock_bh(&bdma_chan->lock);
610 719
611#ifdef CONFIG_PCI_MSI 720 if (bdma_chan->bd_base == NULL)
612 if (priv->flags & TSI721_USING_MSIX) { 721 return;
613 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
614 bdma_chan->id].vector, (void *)bdma_chan);
615 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
616 bdma_chan->id].vector, (void *)bdma_chan);
617 }
618#endif /* CONFIG_PCI_MSI */
619 722
620 tsi721_bdma_ch_free(bdma_chan); 723 BUG_ON(!list_empty(&bdma_chan->active_list));
724 BUG_ON(!list_empty(&bdma_chan->queue));
725
726 tsi721_bdma_interrupt_enable(bdma_chan, 0);
727 bdma_chan->active = false;
728 tsi721_sync_dma_irq(bdma_chan);
729 tasklet_kill(&bdma_chan->tasklet);
730 INIT_LIST_HEAD(&bdma_chan->free_list);
621 kfree(bdma_chan->tx_desc); 731 kfree(bdma_chan->tx_desc);
732 tsi721_bdma_ch_free(bdma_chan);
622} 733}
623 734
624static 735static
625enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 736enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
626 struct dma_tx_state *txstate) 737 struct dma_tx_state *txstate)
627{ 738{
628 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 739 return dma_cookie_status(dchan, cookie, txstate);
629 dma_cookie_t last_used;
630 dma_cookie_t last_completed;
631 int ret;
632
633 spin_lock_bh(&bdma_chan->lock);
634 last_completed = bdma_chan->completed_cookie;
635 last_used = dchan->cookie;
636 spin_unlock_bh(&bdma_chan->lock);
637
638 ret = dma_async_is_complete(cookie, last_completed, last_used);
639
640 dma_set_tx_state(txstate, last_completed, last_used, 0);
641
642 dev_dbg(dchan->device->dev,
643 "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
644 __func__, ret, last_completed, last_used);
645
646 return ret;
647} 740}
648 741
649static void tsi721_issue_pending(struct dma_chan *dchan) 742static void tsi721_issue_pending(struct dma_chan *dchan)
650{ 743{
651 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 744 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
652 745
653 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 746 dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
654 747
655 if (tsi721_dma_is_idle(bdma_chan)) { 748 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
656 spin_lock_bh(&bdma_chan->lock); 749 spin_lock_bh(&bdma_chan->lock);
657 tsi721_advance_work(bdma_chan); 750 tsi721_advance_work(bdma_chan);
658 spin_unlock_bh(&bdma_chan->lock); 751 spin_unlock_bh(&bdma_chan->lock);
659 } else 752 }
660 dev_dbg(dchan->device->dev,
661 "%s: DMA channel still busy\n", __func__);
662} 753}
663 754
664static 755static
@@ -668,21 +759,19 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
668 void *tinfo) 759 void *tinfo)
669{ 760{
670 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 761 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
671 struct tsi721_tx_desc *desc = NULL; 762 struct tsi721_tx_desc *desc, *_d;
672 struct tsi721_tx_desc *first = NULL;
673 struct scatterlist *sg;
674 struct rio_dma_ext *rext = tinfo; 763 struct rio_dma_ext *rext = tinfo;
675 u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
676 unsigned int i;
677 u32 sys_size = dma_to_mport(dchan->device)->sys_size;
678 enum dma_rtype rtype; 764 enum dma_rtype rtype;
679 dma_addr_t next_addr = -1; 765 struct dma_async_tx_descriptor *txd = NULL;
680 766
681 if (!sgl || !sg_len) { 767 if (!sgl || !sg_len) {
682 dev_err(dchan->device->dev, "%s: No SG list\n", __func__); 768 dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
683 return NULL; 769 return NULL;
684 } 770 }
685 771
772 dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
773 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
774
686 if (dir == DMA_DEV_TO_MEM) 775 if (dir == DMA_DEV_TO_MEM)
687 rtype = NREAD; 776 rtype = NREAD;
688 else if (dir == DMA_MEM_TO_DEV) { 777 else if (dir == DMA_MEM_TO_DEV) {
@@ -704,97 +793,26 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
704 return NULL; 793 return NULL;
705 } 794 }
706 795
707 for_each_sg(sgl, sg, sg_len, i) { 796 spin_lock_bh(&bdma_chan->lock);
708 int err;
709
710 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
711 dev_err(dchan->device->dev,
712 "%s: SG entry %d is too large\n", __func__, i);
713 goto err_desc_put;
714 }
715
716 /*
717 * If this sg entry forms contiguous block with previous one,
718 * try to merge it into existing DMA descriptor
719 */
720 if (desc) {
721 if (next_addr == sg_dma_address(sg) &&
722 desc->bcount + sg_dma_len(sg) <=
723 TSI721_BDMA_MAX_BCOUNT) {
724 /* Adjust byte count of the descriptor */
725 desc->bcount += sg_dma_len(sg);
726 goto entry_done;
727 }
728
729 /*
730 * Finalize this descriptor using total
731 * byte count value.
732 */
733 tsi721_desc_fill_end(desc);
734 dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
735 __func__, desc->bcount);
736 }
737
738 /*
739 * Obtain and initialize a new descriptor
740 */
741 desc = tsi721_desc_get(bdma_chan);
742 if (!desc) {
743 dev_err(dchan->device->dev,
744 "%s: Failed to get new descriptor for SG %d\n",
745 __func__, i);
746 goto err_desc_put;
747 }
748
749 desc->destid = rext->destid;
750 desc->rio_addr = rio_addr;
751 desc->rio_addr_u = 0;
752 desc->bcount = sg_dma_len(sg);
753
754 dev_dbg(dchan->device->dev,
755 "sg%d desc: 0x%llx, addr: 0x%llx len: %d\n",
756 i, (u64)desc->txd.phys,
757 (unsigned long long)sg_dma_address(sg),
758 sg_dma_len(sg));
759
760 dev_dbg(dchan->device->dev,
761 "bd_ptr = %p did=%d raddr=0x%llx\n",
762 desc->hw_desc, desc->destid, desc->rio_addr);
763
764 err = tsi721_desc_fill_init(desc, sg, rtype, sys_size);
765 if (err) {
766 dev_err(dchan->device->dev,
767 "Failed to build desc: %d\n", err);
768 goto err_desc_put;
769 }
770
771 next_addr = sg_dma_address(sg);
772
773 if (!first)
774 first = desc;
775 else
776 list_add_tail(&desc->desc_node, &first->tx_list);
777 797
778entry_done: 798 list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
779 if (sg_is_last(sg)) { 799 if (async_tx_test_ack(&desc->txd)) {
780 desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; 800 list_del_init(&desc->desc_node);
781 tsi721_desc_fill_end(desc); 801 desc->destid = rext->destid;
782 dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", 802 desc->rio_addr = rext->rio_addr;
783 __func__, desc->bcount); 803 desc->rio_addr_u = 0;
784 } else { 804 desc->rtype = rtype;
785 rio_addr += sg_dma_len(sg); 805 desc->sg_len = sg_len;
786 next_addr += sg_dma_len(sg); 806 desc->sg = sgl;
807 txd = &desc->txd;
808 txd->flags = flags;
809 break;
787 } 810 }
788 } 811 }
789 812
790 first->txd.cookie = -EBUSY; 813 spin_unlock_bh(&bdma_chan->lock);
791 desc->txd.flags = flags;
792
793 return &first->txd;
794 814
795err_desc_put: 815 return txd;
796 tsi721_desc_put(bdma_chan, first);
797 return NULL;
798} 816}
799 817
800static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 818static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
@@ -802,23 +820,34 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
802{ 820{
803 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 821 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
804 struct tsi721_tx_desc *desc, *_d; 822 struct tsi721_tx_desc *desc, *_d;
823 u32 dmac_int;
805 LIST_HEAD(list); 824 LIST_HEAD(list);
806 825
807 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 826 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
808 827
809 if (cmd != DMA_TERMINATE_ALL) 828 if (cmd != DMA_TERMINATE_ALL)
810 return -ENXIO; 829 return -ENOSYS;
811 830
812 spin_lock_bh(&bdma_chan->lock); 831 spin_lock_bh(&bdma_chan->lock);
813 832
814 /* make sure to stop the transfer */ 833 bdma_chan->active = false;
815 iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); 834
835 if (!tsi721_dma_is_idle(bdma_chan)) {
836 /* make sure to stop the transfer */
837 iowrite32(TSI721_DMAC_CTL_SUSP,
838 bdma_chan->regs + TSI721_DMAC_CTL);
839
840 /* Wait until DMA channel stops */
841 do {
842 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
843 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
844 }
816 845
817 list_splice_init(&bdma_chan->active_list, &list); 846 list_splice_init(&bdma_chan->active_list, &list);
818 list_splice_init(&bdma_chan->queue, &list); 847 list_splice_init(&bdma_chan->queue, &list);
819 848
820 list_for_each_entry_safe(desc, _d, &list, desc_node) 849 list_for_each_entry_safe(desc, _d, &list, desc_node)
821 tsi721_dma_chain_complete(bdma_chan, desc); 850 tsi721_dma_tx_err(bdma_chan, desc);
822 851
823 spin_unlock_bh(&bdma_chan->lock); 852 spin_unlock_bh(&bdma_chan->lock);
824 853
@@ -828,22 +857,18 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
828int tsi721_register_dma(struct tsi721_device *priv) 857int tsi721_register_dma(struct tsi721_device *priv)
829{ 858{
830 int i; 859 int i;
831 int nr_channels = TSI721_DMA_MAXCH; 860 int nr_channels = 0;
832 int err; 861 int err;
833 struct rio_mport *mport = priv->mport; 862 struct rio_mport *mport = priv->mport;
834 863
835 mport->dma.dev = &priv->pdev->dev;
836 mport->dma.chancnt = nr_channels;
837
838 INIT_LIST_HEAD(&mport->dma.channels); 864 INIT_LIST_HEAD(&mport->dma.channels);
839 865
840 for (i = 0; i < nr_channels; i++) { 866 for (i = 0; i < TSI721_DMA_MAXCH; i++) {
841 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; 867 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
842 868
843 if (i == TSI721_DMACH_MAINT) 869 if (i == TSI721_DMACH_MAINT)
844 continue; 870 continue;
845 871
846 bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ;
847 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); 872 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
848 873
849 bdma_chan->dchan.device = &mport->dma; 874 bdma_chan->dchan.device = &mport->dma;
@@ -862,12 +887,15 @@ int tsi721_register_dma(struct tsi721_device *priv)
862 (unsigned long)bdma_chan); 887 (unsigned long)bdma_chan);
863 list_add_tail(&bdma_chan->dchan.device_node, 888 list_add_tail(&bdma_chan->dchan.device_node,
864 &mport->dma.channels); 889 &mport->dma.channels);
890 nr_channels++;
865 } 891 }
866 892
893 mport->dma.chancnt = nr_channels;
867 dma_cap_zero(mport->dma.cap_mask); 894 dma_cap_zero(mport->dma.cap_mask);
868 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); 895 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
869 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); 896 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
870 897
898 mport->dma.dev = &priv->pdev->dev;
871 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; 899 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
872 mport->dma.device_free_chan_resources = tsi721_free_chan_resources; 900 mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
873 mport->dma.device_tx_status = tsi721_tx_status; 901 mport->dma.device_tx_status = tsi721_tx_status;