diff options
author | Geert Uytterhoeven <geert+renesas@glider.be> | 2014-07-09 06:26:22 -0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-07-16 17:41:12 -0400 |
commit | 85912a88c1ebcad04a5cfec971771195ce8d6691 (patch) | |
tree | c52411154cd1c2443f9c51cc43fbdd324853c6f6 | |
parent | 533465a8f8a969bac17550fc6f42cea1dfaa095f (diff) |
spi: rspi: Handle dmaengine_prep_slave_sg() failures gracefully
As typically a shmobile SoC has less DMA channels than devices that can use
DMA, we may want to prioritize access to the DMA channels in the future.
This means that dmaengine_prep_slave_sg() may start failing arbitrarily.
Handle dmaengine_prep_slave_sg() failures gracefully by falling back to
PIO.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r-- | drivers/spi/spi-rspi.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 38fd938d6360..c850dfdfa9e3 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -477,7 +477,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, | |||
477 | tx->sgl, tx->nents, DMA_TO_DEVICE, | 477 | tx->sgl, tx->nents, DMA_TO_DEVICE, |
478 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 478 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
479 | if (!desc_tx) | 479 | if (!desc_tx) |
480 | return -EIO; | 480 | goto no_dma; |
481 | 481 | ||
482 | irq_mask |= SPCR_SPTIE; | 482 | irq_mask |= SPCR_SPTIE; |
483 | } | 483 | } |
@@ -486,7 +486,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, | |||
486 | rx->sgl, rx->nents, DMA_FROM_DEVICE, | 486 | rx->sgl, rx->nents, DMA_FROM_DEVICE, |
487 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 487 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
488 | if (!desc_rx) | 488 | if (!desc_rx) |
489 | return -EIO; | 489 | goto no_dma; |
490 | 490 | ||
491 | irq_mask |= SPCR_SPRIE; | 491 | irq_mask |= SPCR_SPRIE; |
492 | } | 492 | } |
@@ -540,6 +540,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, | |||
540 | enable_irq(rspi->rx_irq); | 540 | enable_irq(rspi->rx_irq); |
541 | 541 | ||
542 | return ret; | 542 | return ret; |
543 | |||
544 | no_dma: | ||
545 | pr_warn_once("%s %s: DMA not available, falling back to PIO\n", | ||
546 | dev_driver_string(&rspi->master->dev), | ||
547 | dev_name(&rspi->master->dev)); | ||
548 | return -EAGAIN; | ||
543 | } | 549 | } |
544 | 550 | ||
545 | static void rspi_receive_init(const struct rspi_data *rspi) | 551 | static void rspi_receive_init(const struct rspi_data *rspi) |
@@ -593,8 +599,10 @@ static int rspi_common_transfer(struct rspi_data *rspi, | |||
593 | 599 | ||
594 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { | 600 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { |
595 | /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ | 601 | /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ |
596 | return rspi_dma_transfer(rspi, &xfer->tx_sg, | 602 | ret = rspi_dma_transfer(rspi, &xfer->tx_sg, |
597 | xfer->rx_buf ? &xfer->rx_sg : NULL); | 603 | xfer->rx_buf ? &xfer->rx_sg : NULL); |
604 | if (ret != -EAGAIN) | ||
605 | return ret; | ||
598 | } | 606 | } |
599 | 607 | ||
600 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); | 608 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); |
@@ -648,8 +656,11 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) | |||
648 | { | 656 | { |
649 | int ret; | 657 | int ret; |
650 | 658 | ||
651 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) | 659 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { |
652 | return rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); | 660 | ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); |
661 | if (ret != -EAGAIN) | ||
662 | return ret; | ||
663 | } | ||
653 | 664 | ||
654 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len); | 665 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len); |
655 | if (ret < 0) | 666 | if (ret < 0) |
@@ -663,8 +674,11 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) | |||
663 | 674 | ||
664 | static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) | 675 | static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) |
665 | { | 676 | { |
666 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) | 677 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { |
667 | return rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); | 678 | int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); |
679 | if (ret != -EAGAIN) | ||
680 | return ret; | ||
681 | } | ||
668 | 682 | ||
669 | return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len); | 683 | return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len); |
670 | } | 684 | } |