aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Vasut <marex@denx.de>2012-09-03 22:40:15 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2012-09-05 19:42:04 -0400
commit010b481834b2b60f7d8543263a63e69396019f7b (patch)
tree2fe727feba034543951c3488ea7d186565d0557d
parent7d520d28dd5287d14b5ec6cf4405a1220ca57d42 (diff)
mxs/spi: Fix issues when doing long continuous transfer
When doing long continuous transfer, eg. from SPI flash via /dev/mtd, the driver dies. This is caused by a bug in the DMA chaining. Rework the DMA transfer code so that this issue does not happen any longer. This involves proper allocation of correct amount of sg-list members. Also, this means proper creation of DMA descriptors. There is actually an important catch to this, the data transfer descriptors must be interleaved with PIO register write descriptor, otherwise the transfer stalls. This can be done in one descriptor, but due to the limitation of the DMA API, it's not possible. It turns out that in order for the SPI DMA to properly support continuous transfers longer than 65280 bytes, there are some very important parts that were left out from the documentation about about the PIO transfer that is used. Firstly, the XFER_SIZE register is not written with the whole length of a transfer, but is written by each and every chained descriptor with the length of the descriptors data buffer. Next, unlike the demo code supplied by FSL, which only writes one PIO word per descriptor, this does not apply if the descriptors are chained, since the XFER_SIZE register must be written. Therefore, it is essential to use four PIO words, CTRL0, CMD0, CMD1, XFER_SIZE. CMD0 and CMD1 are written with zero, since they don't apply. The DMA programs the PIO words in an incrementing order, so four PIO words. Finally, unlike the demo code supplied by FSL, the SSP_CTRL0_IGNORE_CRC must not be set during the whole transfer, but it must be set only on the last descriptor in the chain. Lastly, this code lends code from drivers/mtd/nand/omap2.c, which solves trouble when the buffer supplied to the DMA transfer was vmalloc()'d. So with this patch, it's safe to use /dev/mtdblockX interface again. Signed-off-by: Marek Vasut <marex@denx.de> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
-rw-r--r--drivers/spi/spi-mxs.c141
1 files changed, 88 insertions, 53 deletions
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 10d34ebe9ca3..bcba098e97c5 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -53,9 +53,9 @@
53 53
54#define DRIVER_NAME "mxs-spi" 54#define DRIVER_NAME "mxs-spi"
55 55
56#define SSP_TIMEOUT 1000 /* 1000 ms */ 56/* Use 10S timeout for very long transfers, it should suffice. */
57#define SSP_TIMEOUT 10000
57 58
58#define SG_NUM 4
59#define SG_MAXLEN 0xff00 59#define SG_MAXLEN 0xff00
60 60
61struct mxs_spi { 61struct mxs_spi {
@@ -219,61 +219,94 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
219 int *first, int *last, int write) 219 int *first, int *last, int write)
220{ 220{
221 struct mxs_ssp *ssp = &spi->ssp; 221 struct mxs_ssp *ssp = &spi->ssp;
222 struct dma_async_tx_descriptor *desc; 222 struct dma_async_tx_descriptor *desc = NULL;
223 struct scatterlist sg[SG_NUM]; 223 const bool vmalloced_buf = is_vmalloc_addr(buf);
224 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
225 const int sgs = DIV_ROUND_UP(len, desc_len);
224 int sg_count; 226 int sg_count;
225 uint32_t pio = BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 227 int min, ret;
226 int ret; 228 uint32_t ctrl0;
227 229 struct page *vm_page;
228 if (len > SG_NUM * SG_MAXLEN) { 230 void *sg_buf;
229 dev_err(ssp->dev, "Data chunk too big for DMA\n"); 231 struct {
232 uint32_t pio[4];
233 struct scatterlist sg;
234 } *dma_xfer;
235
236 if (!len)
230 return -EINVAL; 237 return -EINVAL;
231 } 238
239 dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL);
240 if (!dma_xfer)
241 return -ENOMEM;
232 242
233 INIT_COMPLETION(spi->c); 243 INIT_COMPLETION(spi->c);
234 244
245 ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
246 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
247
235 if (*first) 248 if (*first)
236 pio |= BM_SSP_CTRL0_LOCK_CS; 249 ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
237 if (*last)
238 pio |= BM_SSP_CTRL0_IGNORE_CRC;
239 if (!write) 250 if (!write)
240 pio |= BM_SSP_CTRL0_READ; 251 ctrl0 |= BM_SSP_CTRL0_READ;
241
242 if (ssp->devid == IMX23_SSP)
243 pio |= len;
244 else
245 writel(len, ssp->base + HW_SSP_XFER_SIZE);
246
247 /* Queue the PIO register write transfer. */
248 desc = dmaengine_prep_slave_sg(ssp->dmach,
249 (struct scatterlist *)&pio,
250 1, DMA_TRANS_NONE, 0);
251 if (!desc) {
252 dev_err(ssp->dev,
253 "Failed to get PIO reg. write descriptor.\n");
254 return -EINVAL;
255 }
256 252
257 /* Queue the DMA data transfer. */ 253 /* Queue the DMA data transfer. */
258 sg_init_table(sg, (len / SG_MAXLEN) + 1); 254 for (sg_count = 0; sg_count < sgs; sg_count++) {
259 sg_count = 0; 255 min = min(len, desc_len);
260 while (len) { 256
261 sg_set_buf(&sg[sg_count++], buf, min(len, SG_MAXLEN)); 257 /* Prepare the transfer descriptor. */
262 len -= min(len, SG_MAXLEN); 258 if ((sg_count + 1 == sgs) && *last)
263 buf += min(len, SG_MAXLEN); 259 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
264 } 260
265 dma_map_sg(ssp->dev, sg, sg_count, 261 if (ssp->devid == IMX23_SSP)
266 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 262 ctrl0 |= min;
267 263
268 desc = dmaengine_prep_slave_sg(ssp->dmach, sg, sg_count, 264 dma_xfer[sg_count].pio[0] = ctrl0;
269 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 265 dma_xfer[sg_count].pio[3] = min;
270 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 266
271 267 if (vmalloced_buf) {
272 if (!desc) { 268 vm_page = vmalloc_to_page(buf);
273 dev_err(ssp->dev, 269 if (!vm_page) {
274 "Failed to get DMA data write descriptor.\n"); 270 ret = -ENOMEM;
275 ret = -EINVAL; 271 goto err_vmalloc;
276 goto err; 272 }
273 sg_buf = page_address(vm_page) +
274 ((size_t)buf & ~PAGE_MASK);
275 } else {
276 sg_buf = buf;
277 }
278
279 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
280 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
281 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
282
283 len -= min;
284 buf += min;
285
286 /* Queue the PIO register write transfer. */
287 desc = dmaengine_prep_slave_sg(ssp->dmach,
288 (struct scatterlist *)dma_xfer[sg_count].pio,
289 (ssp->devid == IMX23_SSP) ? 1 : 4,
290 DMA_TRANS_NONE,
291 sg_count ? DMA_PREP_INTERRUPT : 0);
292 if (!desc) {
293 dev_err(ssp->dev,
294 "Failed to get PIO reg. write descriptor.\n");
295 ret = -EINVAL;
296 goto err_mapped;
297 }
298
299 desc = dmaengine_prep_slave_sg(ssp->dmach,
300 &dma_xfer[sg_count].sg, 1,
301 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
302 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
303
304 if (!desc) {
305 dev_err(ssp->dev,
306 "Failed to get DMA data write descriptor.\n");
307 ret = -EINVAL;
308 goto err_mapped;
309 }
277 } 310 }
278 311
279 /* 312 /*
@@ -289,21 +322,23 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
289 322
290 ret = wait_for_completion_timeout(&spi->c, 323 ret = wait_for_completion_timeout(&spi->c,
291 msecs_to_jiffies(SSP_TIMEOUT)); 324 msecs_to_jiffies(SSP_TIMEOUT));
292
293 if (!ret) { 325 if (!ret) {
294 dev_err(ssp->dev, "DMA transfer timeout\n"); 326 dev_err(ssp->dev, "DMA transfer timeout\n");
295 ret = -ETIMEDOUT; 327 ret = -ETIMEDOUT;
296 goto err; 328 goto err_vmalloc;
297 } 329 }
298 330
299 ret = 0; 331 ret = 0;
300 332
301err: 333err_vmalloc:
302 for (--sg_count; sg_count >= 0; sg_count--) { 334 while (--sg_count >= 0) {
303 dma_unmap_sg(ssp->dev, &sg[sg_count], 1, 335err_mapped:
336 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
304 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 337 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
305 } 338 }
306 339
340 kfree(dma_xfer);
341
307 return ret; 342 return ret;
308} 343}
309 344