aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-rspi.c
diff options
context:
space:
mode:
authorGeert Uytterhoeven <geert+renesas@glider.be>2014-06-02 09:38:15 -0400
committerMark Brown <broonie@linaro.org>2014-06-02 10:49:35 -0400
commitc52fb6d63425248bd4152451a2cc74b7df8fa989 (patch)
tree369084deb2c83d773f84b6a320186094244e5b56 /drivers/spi/spi-rspi.c
parente4b52dc4625ee739195189d40c6ddc7d55ddf312 (diff)
spi: rspi: Merge rspi_*_dma() into rspi_dma_transfer()
rspi_send_dma() and rspi_send_receive_dma() are very similar. Consolidate into a single function rspi_dma_transfer(), and add missing checks for dmaengine_submit() failures. Both sg_table pointer parameters can be NULL, as RSPI supports TX-only mode, and unidirectional DMA transfers will also be needed later for Dual/Quad DMA support. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Signed-off-by: Mark Brown <broonie@linaro.org>
Diffstat (limited to 'drivers/spi/spi-rspi.c')
-rw-r--r--drivers/spi/spi-rspi.c139
1 files changed, 61 insertions, 78 deletions
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index bfa5e7e5df5a..c77cfe654b0e 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -463,30 +463,67 @@ static void rspi_dma_complete(void *arg)
463 wake_up_interruptible(&rspi->wait); 463 wake_up_interruptible(&rspi->wait);
464} 464}
465 465
466static int rspi_send_dma(struct rspi_data *rspi, struct sg_table *tx) 466static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
467 struct sg_table *rx)
467{ 468{
468 struct dma_async_tx_descriptor *desc; 469 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
470 u8 irq_mask = 0;
471 unsigned int other_irq = 0;
472 dma_cookie_t cookie;
469 int ret; 473 int ret;
470 474
471 desc = dmaengine_prep_slave_sg(rspi->master->dma_tx, tx->sgl, 475 if (tx) {
472 tx->nents, DMA_TO_DEVICE, 476 desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
473 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 477 tx->sgl, tx->nents, DMA_TO_DEVICE,
474 if (!desc) 478 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
475 return -EIO; 479 if (!desc_tx)
480 return -EIO;
481
482 irq_mask |= SPCR_SPTIE;
483 }
484 if (rx) {
485 desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
486 rx->sgl, rx->nents, DMA_FROM_DEVICE,
487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
488 if (!desc_rx)
489 return -EIO;
490
491 irq_mask |= SPCR_SPRIE;
492 }
476 493
477 /* 494 /*
478 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be 495 * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
479 * called. So, this driver disables the IRQ while DMA transfer. 496 * called. So, this driver disables the IRQ while DMA transfer.
480 */ 497 */
481 disable_irq(rspi->tx_irq); 498 if (tx)
499 disable_irq(other_irq = rspi->tx_irq);
500 if (rx && rspi->rx_irq != other_irq)
501 disable_irq(rspi->rx_irq);
482 502
483 rspi_enable_irq(rspi, SPCR_SPTIE); 503 rspi_enable_irq(rspi, irq_mask);
484 rspi->dma_callbacked = 0; 504 rspi->dma_callbacked = 0;
485 505
486 desc->callback = rspi_dma_complete; 506 if (rx) {
487 desc->callback_param = rspi; 507 desc_rx->callback = rspi_dma_complete;
488 dmaengine_submit(desc); 508 desc_rx->callback_param = rspi;
489 dma_async_issue_pending(rspi->master->dma_tx); 509 cookie = dmaengine_submit(desc_rx);
510 if (dma_submit_error(cookie))
511 return cookie;
512 dma_async_issue_pending(rspi->master->dma_rx);
513 }
514 if (tx) {
515 if (rx) {
516 /* No callback */
517 desc_tx->callback = NULL;
518 } else {
519 desc_tx->callback = rspi_dma_complete;
520 desc_tx->callback_param = rspi;
521 }
522 cookie = dmaengine_submit(desc_tx);
523 if (dma_submit_error(cookie))
524 return cookie;
525 dma_async_issue_pending(rspi->master->dma_tx);
526 }
490 527
491 ret = wait_event_interruptible_timeout(rspi->wait, 528 ret = wait_event_interruptible_timeout(rspi->wait,
492 rspi->dma_callbacked, HZ); 529 rspi->dma_callbacked, HZ);
@@ -494,9 +531,14 @@ static int rspi_send_dma(struct rspi_data *rspi, struct sg_table *tx)
494 ret = 0; 531 ret = 0;
495 else if (!ret) 532 else if (!ret)
496 ret = -ETIMEDOUT; 533 ret = -ETIMEDOUT;
497 rspi_disable_irq(rspi, SPCR_SPTIE);
498 534
499 enable_irq(rspi->tx_irq); 535 rspi_disable_irq(rspi, irq_mask);
536
537 if (tx)
538 enable_irq(rspi->tx_irq);
539 if (rx && rspi->rx_irq != other_irq)
540 enable_irq(rspi->rx_irq);
541
500 return ret; 542 return ret;
501} 543}
502 544
@@ -530,61 +572,6 @@ static void qspi_receive_init(const struct rspi_data *rspi)
530 rspi_write8(rspi, 0, QSPI_SPBFCR); 572 rspi_write8(rspi, 0, QSPI_SPBFCR);
531} 573}
532 574
533static int rspi_send_receive_dma(struct rspi_data *rspi, struct sg_table *tx,
534 struct sg_table *rx)
535{
536 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
537 int ret;
538
539 /* prepare transmit transfer */
540 desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, tx->sgl,
541 tx->nents, DMA_TO_DEVICE,
542 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
543 if (!desc_tx)
544 return -EIO;
545
546 /* prepare receive transfer */
547 desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, rx->sgl,
548 rx->nents, DMA_FROM_DEVICE,
549 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
550 if (!desc_rx)
551 return -EIO;
552
553 /*
554 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
555 * called. So, this driver disables the IRQ while DMA transfer.
556 */
557 disable_irq(rspi->tx_irq);
558 if (rspi->rx_irq != rspi->tx_irq)
559 disable_irq(rspi->rx_irq);
560
561 rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
562 rspi->dma_callbacked = 0;
563
564 desc_rx->callback = rspi_dma_complete;
565 desc_rx->callback_param = rspi;
566 dmaengine_submit(desc_rx);
567 dma_async_issue_pending(rspi->master->dma_rx);
568
569 desc_tx->callback = NULL; /* No callback */
570 dmaengine_submit(desc_tx);
571 dma_async_issue_pending(rspi->master->dma_tx);
572
573 ret = wait_event_interruptible_timeout(rspi->wait,
574 rspi->dma_callbacked, HZ);
575 if (ret > 0 && rspi->dma_callbacked)
576 ret = 0;
577 else if (!ret)
578 ret = -ETIMEDOUT;
579 rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
580
581 enable_irq(rspi->tx_irq);
582 if (rspi->rx_irq != rspi->tx_irq)
583 enable_irq(rspi->rx_irq);
584
585 return ret;
586}
587
588static bool __rspi_can_dma(const struct rspi_data *rspi, 575static bool __rspi_can_dma(const struct rspi_data *rspi,
589 const struct spi_transfer *xfer) 576 const struct spi_transfer *xfer)
590{ 577{
@@ -615,13 +602,9 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
615 } 602 }
616 rspi_write8(rspi, spcr, RSPI_SPCR); 603 rspi_write8(rspi, spcr, RSPI_SPCR);
617 604
618 if (master->can_dma && __rspi_can_dma(rspi, xfer)) { 605 if (master->can_dma && __rspi_can_dma(rspi, xfer))
619 if (xfer->rx_buf) 606 return rspi_dma_transfer(rspi, &xfer->tx_sg,
620 return rspi_send_receive_dma(rspi, &xfer->tx_sg, 607 xfer->rx_buf ? &xfer->rx_sg : NULL);
621 &xfer->rx_sg);
622 else
623 return rspi_send_dma(rspi, &xfer->tx_sg);
624 }
625 608
626 ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); 609 ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
627 if (ret < 0) 610 if (ret < 0)