aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-mxs.c
diff options
context:
space:
mode:
authorMarek Vasut <marex@denx.de>2012-08-03 11:26:13 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2012-08-17 17:54:16 -0400
commit474afc042fb9db8f88b68243f78a38cb764692fc (patch)
treebb1c187786042aa5d72393a9e4272388a63c64f6 /drivers/spi/spi-mxs.c
parent65defb9b3ba67c1d6f88ac62c24644eb23a7b676 (diff)
spi/mxs: Add DMA support into SPI driver
Signed-off-by: Marek Vasut <marex@denx.de> Acked-by: Chris Ball <cjb@laptop.org> Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Diffstat (limited to 'drivers/spi/spi-mxs.c')
-rw-r--r--drivers/spi/spi-mxs.c231
1 files changed, 216 insertions, 15 deletions
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 0f28afb80310..130a43688352 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -55,8 +55,12 @@
55 55
56#define SSP_TIMEOUT 1000 /* 1000 ms */ 56#define SSP_TIMEOUT 1000 /* 1000 ms */
57 57
58#define SG_NUM 4
59#define SG_MAXLEN 0xff00
60
58struct mxs_spi { 61struct mxs_spi {
59 struct mxs_ssp ssp; 62 struct mxs_ssp ssp;
63 struct completion c;
60}; 64};
61 65
62static int mxs_spi_setup_transfer(struct spi_device *dev, 66static int mxs_spi_setup_transfer(struct spi_device *dev,
@@ -194,6 +198,115 @@ static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
194 return 0; 198 return 0;
195} 199}
196 200
201static void mxs_ssp_dma_irq_callback(void *param)
202{
203 struct mxs_spi *spi = param;
204 complete(&spi->c);
205}
206
207static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
208{
209 struct mxs_ssp *ssp = dev_id;
210 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
211 __func__, __LINE__,
212 readl(ssp->base + HW_SSP_CTRL1(ssp)),
213 readl(ssp->base + HW_SSP_STATUS(ssp)));
214 return IRQ_HANDLED;
215}
216
217static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
218 unsigned char *buf, int len,
219 int *first, int *last, int write)
220{
221 struct mxs_ssp *ssp = &spi->ssp;
222 struct dma_async_tx_descriptor *desc;
223 struct scatterlist sg[SG_NUM];
224 int sg_count;
225 uint32_t pio = BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
226 int ret;
227
228 if (len > SG_NUM * SG_MAXLEN) {
229 dev_err(ssp->dev, "Data chunk too big for DMA\n");
230 return -EINVAL;
231 }
232
233 init_completion(&spi->c);
234
235 if (*first)
236 pio |= BM_SSP_CTRL0_LOCK_CS;
237 if (*last)
238 pio |= BM_SSP_CTRL0_IGNORE_CRC;
239 if (!write)
240 pio |= BM_SSP_CTRL0_READ;
241
242 if (ssp->devid == IMX23_SSP)
243 pio |= len;
244 else
245 writel(len, ssp->base + HW_SSP_XFER_SIZE);
246
247 /* Queue the PIO register write transfer. */
248 desc = dmaengine_prep_slave_sg(ssp->dmach,
249 (struct scatterlist *)&pio,
250 1, DMA_TRANS_NONE, 0);
251 if (!desc) {
252 dev_err(ssp->dev,
253 "Failed to get PIO reg. write descriptor.\n");
254 return -EINVAL;
255 }
256
257 /* Queue the DMA data transfer. */
258 sg_init_table(sg, (len / SG_MAXLEN) + 1);
259 sg_count = 0;
260 while (len) {
261 sg_set_buf(&sg[sg_count++], buf, min(len, SG_MAXLEN));
262 len -= min(len, SG_MAXLEN);
263 buf += min(len, SG_MAXLEN);
264 }
265 dma_map_sg(ssp->dev, sg, sg_count,
266 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
267
268 desc = dmaengine_prep_slave_sg(ssp->dmach, sg, sg_count,
269 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
270 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
271
272 if (!desc) {
273 dev_err(ssp->dev,
274 "Failed to get DMA data write descriptor.\n");
275 ret = -EINVAL;
276 goto err;
277 }
278
279 /*
280 * The last descriptor must have this callback,
281 * to finish the DMA transaction.
282 */
283 desc->callback = mxs_ssp_dma_irq_callback;
284 desc->callback_param = spi;
285
286 /* Start the transfer. */
287 dmaengine_submit(desc);
288 dma_async_issue_pending(ssp->dmach);
289
290 ret = wait_for_completion_timeout(&spi->c,
291 msecs_to_jiffies(SSP_TIMEOUT));
292
293 if (!ret) {
294 dev_err(ssp->dev, "DMA transfer timeout\n");
295 ret = -ETIMEDOUT;
296 goto err;
297 }
298
299 ret = 0;
300
301err:
302 for (--sg_count; sg_count >= 0; sg_count--) {
303 dma_unmap_sg(ssp->dev, &sg[sg_count], 1,
304 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
305 }
306
307 return ret;
308}
309
197static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, 310static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
198 unsigned char *buf, int len, 311 unsigned char *buf, int len,
199 int *first, int *last, int write) 312 int *first, int *last, int write)
@@ -281,19 +394,49 @@ static int mxs_spi_transfer_one(struct spi_master *master,
281 first = 1; 394 first = 1;
282 if (&t->transfer_list == m->transfers.prev) 395 if (&t->transfer_list == m->transfers.prev)
283 last = 1; 396 last = 1;
284 if (t->rx_buf && t->tx_buf) { 397 if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
285 dev_err(ssp->dev, 398 dev_err(ssp->dev,
286 "Cannot send and receive simultaneously\n"); 399 "Cannot send and receive simultaneously\n");
287 status = -EINVAL; 400 status = -EINVAL;
288 break; 401 break;
289 } 402 }
290 403
291 if (t->tx_buf) 404 /*
292 status = mxs_spi_txrx_pio(spi, cs, (void *)t->tx_buf, 405 * Small blocks can be transfered via PIO.
293 t->len, &first, &last, 1); 406 * Measured by empiric means:
294 if (t->rx_buf) 407 *
295 status = mxs_spi_txrx_pio(spi, cs, t->rx_buf, 408 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
296 t->len, &first, &last, 0); 409 *
410 * DMA only: 2.164808 seconds, 473.0KB/s
411 * Combined: 1.676276 seconds, 610.9KB/s
412 */
413 if (t->len <= 256) {
414 writel(BM_SSP_CTRL1_DMA_ENABLE,
415 ssp->base + HW_SSP_CTRL1(ssp) +
416 STMP_OFFSET_REG_CLR);
417
418 if (t->tx_buf)
419 status = mxs_spi_txrx_pio(spi, cs,
420 (void *)t->tx_buf,
421 t->len, &first, &last, 1);
422 if (t->rx_buf)
423 status = mxs_spi_txrx_pio(spi, cs,
424 t->rx_buf, t->len,
425 &first, &last, 0);
426 } else {
427 writel(BM_SSP_CTRL1_DMA_ENABLE,
428 ssp->base + HW_SSP_CTRL1(ssp) +
429 STMP_OFFSET_REG_SET);
430
431 if (t->tx_buf)
432 status = mxs_spi_txrx_dma(spi, cs,
433 (void *)t->tx_buf, t->len,
434 &first, &last, 1);
435 if (t->rx_buf)
436 status = mxs_spi_txrx_dma(spi, cs,
437 t->rx_buf, t->len,
438 &first, &last, 0);
439 }
297 440
298 m->actual_length += t->len; 441 m->actual_length += t->len;
299 if (status) 442 if (status)
@@ -308,6 +451,21 @@ static int mxs_spi_transfer_one(struct spi_master *master,
308 return status; 451 return status;
309} 452}
310 453
454static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param)
455{
456 struct mxs_ssp *ssp = param;
457
458 if (!mxs_dma_is_apbh(chan))
459 return false;
460
461 if (chan->chan_id != ssp->dma_channel)
462 return false;
463
464 chan->private = &ssp->dma_data;
465
466 return true;
467}
468
311static const struct of_device_id mxs_spi_dt_ids[] = { 469static const struct of_device_id mxs_spi_dt_ids[] = {
312 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, 470 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
313 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, 471 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -323,15 +481,18 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
323 struct spi_master *master; 481 struct spi_master *master;
324 struct mxs_spi *spi; 482 struct mxs_spi *spi;
325 struct mxs_ssp *ssp; 483 struct mxs_ssp *ssp;
326 struct resource *iores; 484 struct resource *iores, *dmares;
327 struct pinctrl *pinctrl; 485 struct pinctrl *pinctrl;
328 struct clk *clk; 486 struct clk *clk;
329 void __iomem *base; 487 void __iomem *base;
330 int devid; 488 int devid, dma_channel;
331 int ret = 0; 489 int ret = 0, irq_err, irq_dma;
490 dma_cap_mask_t mask;
332 491
333 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 492 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
334 if (!iores) 493 irq_err = platform_get_irq(pdev, 0);
494 irq_dma = platform_get_irq(pdev, 1);
495 if (!iores || irq_err < 0 || irq_dma < 0)
335 return -EINVAL; 496 return -EINVAL;
336 497
337 base = devm_request_and_ioremap(&pdev->dev, iores); 498 base = devm_request_and_ioremap(&pdev->dev, iores);
@@ -346,10 +507,26 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
346 if (IS_ERR(clk)) 507 if (IS_ERR(clk))
347 return PTR_ERR(clk); 508 return PTR_ERR(clk);
348 509
349 if (np) 510 if (np) {
350 devid = (enum mxs_ssp_id) of_id->data; 511 devid = (enum mxs_ssp_id) of_id->data;
351 else 512 /*
513 * TODO: This is a temporary solution and should be changed
514 * to use generic DMA binding later when the helpers get in.
515 */
516 ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
517 &dma_channel);
518 if (ret) {
519 dev_err(&pdev->dev,
520 "Failed to get DMA channel\n");
521 return -EINVAL;
522 }
523 } else {
524 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
525 if (!dmares)
526 return -EINVAL;
352 devid = pdev->id_entry->driver_data; 527 devid = pdev->id_entry->driver_data;
528 dma_channel = dmares->start;
529 }
353 530
354 master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 531 master = spi_alloc_master(&pdev->dev, sizeof(*spi));
355 if (!master) 532 if (!master)
@@ -368,8 +545,28 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
368 ssp->clk = clk; 545 ssp->clk = clk;
369 ssp->base = base; 546 ssp->base = base;
370 ssp->devid = devid; 547 ssp->devid = devid;
548 ssp->dma_channel = dma_channel;
549
550 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
551 DRIVER_NAME, ssp);
552 if (ret)
553 goto out_master_free;
554
555 dma_cap_zero(mask);
556 dma_cap_set(DMA_SLAVE, mask);
557 ssp->dma_data.chan_irq = irq_dma;
558 ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp);
559 if (!ssp->dmach) {
560 dev_err(ssp->dev, "Failed to request DMA\n");
561 goto out_master_free;
562 }
371 563
564 /*
565 * Crank up the clock to 120MHz, this will be further divided onto a
566 * proper speed.
567 */
372 clk_prepare_enable(ssp->clk); 568 clk_prepare_enable(ssp->clk);
569 clk_set_rate(ssp->clk, 120 * 1000 * 1000);
373 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; 570 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
374 571
375 stmp_reset_block(ssp->base); 572 stmp_reset_block(ssp->base);
@@ -379,14 +576,16 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
379 ret = spi_register_master(master); 576 ret = spi_register_master(master);
380 if (ret) { 577 if (ret) {
381 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 578 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
382 goto out_master_free; 579 goto out_free_dma;
383 } 580 }
384 581
385 return 0; 582 return 0;
386 583
387out_master_free: 584out_free_dma:
388 platform_set_drvdata(pdev, NULL); 585 platform_set_drvdata(pdev, NULL);
586 dma_release_channel(ssp->dmach);
389 clk_disable_unprepare(ssp->clk); 587 clk_disable_unprepare(ssp->clk);
588out_master_free:
390 spi_master_put(master); 589 spi_master_put(master);
391 return ret; 590 return ret;
392} 591}
@@ -405,6 +604,8 @@ static int __devexit mxs_spi_remove(struct platform_device *pdev)
405 604
406 platform_set_drvdata(pdev, NULL); 605 platform_set_drvdata(pdev, NULL);
407 606
607 dma_release_channel(ssp->dmach);
608
408 clk_disable_unprepare(ssp->clk); 609 clk_disable_unprepare(ssp->clk);
409 610
410 spi_master_put(master); 611 spi_master_put(master);