aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRadu Pirea <radu.pirea@microchip.com>2017-12-19 10:17:59 -0500
committerMark Brown <broonie@kernel.org>2018-01-05 07:42:09 -0500
commita9889ed62d06ec76f41492ebdc6cc6538e761e3e (patch)
tree5357fff9e7ec62c63165946b4fcfb951ac8ca778
parent66e900a3d225575c8b48b59ae1fe74bb6e5a65cc (diff)
spi: atmel: Implements transfers with bounce buffer
This patch enables SPI DMA transfers for Atmel SAM9 SoCs and implements a bounce buffer for transfers which have vmalloc allocated buffers. Those buffers are not cache coherent even if they have been transformed into sg lists. UBIFS is affected by this cache coherency issue. In this patch I also reverted "spi: atmel: fix corrupted data issue on SAM9 family SoCs"(7094576ccdc3acfe1e06a1e2ab547add375baf7f). Signed-off-by: Radu Pirea <radu.pirea@microchip.com> Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--drivers/spi/spi-atmel.c113
1 files changed, 84 insertions, 29 deletions
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 669470971023..4a11fc0d4136 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -291,6 +291,10 @@ struct atmel_spi {
291 struct spi_transfer *current_transfer; 291 struct spi_transfer *current_transfer;
292 int current_remaining_bytes; 292 int current_remaining_bytes;
293 int done_status; 293 int done_status;
294 dma_addr_t dma_addr_rx_bbuf;
295 dma_addr_t dma_addr_tx_bbuf;
296 void *addr_rx_bbuf;
297 void *addr_tx_bbuf;
294 298
295 struct completion xfer_completion; 299 struct completion xfer_completion;
296 300
@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
436 spin_unlock_irqrestore(&as->lock, as->flags); 440 spin_unlock_irqrestore(&as->lock, as->flags);
437} 441}
438 442
443static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
444{
445 return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
446}
447
439static inline bool atmel_spi_use_dma(struct atmel_spi *as, 448static inline bool atmel_spi_use_dma(struct atmel_spi *as,
440 struct spi_transfer *xfer) 449 struct spi_transfer *xfer)
441{ 450{
@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
448{ 457{
449 struct atmel_spi *as = spi_master_get_devdata(master); 458 struct atmel_spi *as = spi_master_get_devdata(master);
450 459
451 return atmel_spi_use_dma(as, xfer); 460 if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
461 return atmel_spi_use_dma(as, xfer) &&
462 !atmel_spi_is_vmalloc_xfer(xfer);
463 else
464 return atmel_spi_use_dma(as, xfer);
465
452} 466}
453 467
454static int atmel_spi_dma_slave_config(struct atmel_spi *as, 468static int atmel_spi_dma_slave_config(struct atmel_spi *as,
@@ -594,6 +608,11 @@ static void dma_callback(void *data)
594 struct spi_master *master = data; 608 struct spi_master *master = data;
595 struct atmel_spi *as = spi_master_get_devdata(master); 609 struct atmel_spi *as = spi_master_get_devdata(master);
596 610
611 if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
612 IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
613 memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
614 as->current_transfer->len);
615 }
597 complete(&as->xfer_completion); 616 complete(&as->xfer_completion);
598} 617}
599 618
@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
744 goto err_exit; 763 goto err_exit;
745 764
746 /* Send both scatterlists */ 765 /* Send both scatterlists */
747 rxdesc = dmaengine_prep_slave_sg(rxchan, 766 if (atmel_spi_is_vmalloc_xfer(xfer) &&
748 xfer->rx_sg.sgl, xfer->rx_sg.nents, 767 IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
749 DMA_FROM_DEVICE, 768 rxdesc = dmaengine_prep_slave_single(rxchan,
750 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 769 as->dma_addr_rx_bbuf,
770 xfer->len,
771 DMA_FROM_DEVICE,
772 DMA_PREP_INTERRUPT |
773 DMA_CTRL_ACK);
774 } else {
775 rxdesc = dmaengine_prep_slave_sg(rxchan,
776 xfer->rx_sg.sgl,
777 xfer->rx_sg.nents,
778 DMA_FROM_DEVICE,
779 DMA_PREP_INTERRUPT |
780 DMA_CTRL_ACK);
781 }
751 if (!rxdesc) 782 if (!rxdesc)
752 goto err_dma; 783 goto err_dma;
753 784
754 txdesc = dmaengine_prep_slave_sg(txchan, 785 if (atmel_spi_is_vmalloc_xfer(xfer) &&
755 xfer->tx_sg.sgl, xfer->tx_sg.nents, 786 IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
756 DMA_TO_DEVICE, 787 memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
757 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 788 txdesc = dmaengine_prep_slave_single(txchan,
789 as->dma_addr_tx_bbuf,
790 xfer->len, DMA_TO_DEVICE,
791 DMA_PREP_INTERRUPT |
792 DMA_CTRL_ACK);
793 } else {
794 txdesc = dmaengine_prep_slave_sg(txchan,
795 xfer->tx_sg.sgl,
796 xfer->tx_sg.nents,
797 DMA_TO_DEVICE,
798 DMA_PREP_INTERRUPT |
799 DMA_CTRL_ACK);
800 }
758 if (!txdesc) 801 if (!txdesc)
759 goto err_dma; 802 goto err_dma;
760 803
@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
1426 1469
1427 as->caps.is_spi2 = version > 0x121; 1470 as->caps.is_spi2 = version > 0x121;
1428 as->caps.has_wdrbt = version >= 0x210; 1471 as->caps.has_wdrbt = version >= 0x210;
1429#ifdef CONFIG_SOC_SAM_V4_V5
1430 /*
1431 * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
1432 * since this later function tries to map buffers with dma_map_sg()
1433 * even if they have not been allocated inside DMA-safe areas.
1434 * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
1435 * those ARM cores, the data cache follows the PIPT model.
1436 * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
1437 * In case of PIPT caches, there cannot be cache aliases.
1438 * However on ARM9 cores, the data cache follows the VIVT model, hence
1439 * the cache aliases issue can occur when buffers are allocated from
1440 * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
1441 * not taken into account or at least not handled completely (cache
1442 * lines of aliases are not invalidated).
1443 * This is not a theorical issue: it was reproduced when trying to mount
1444 * a UBI file-system on a at91sam9g35ek board.
1445 */
1446 as->caps.has_dma_support = false;
1447#else
1448 as->caps.has_dma_support = version >= 0x212; 1472 as->caps.has_dma_support = version >= 0x212;
1449#endif
1450 as->caps.has_pdc_support = version < 0x212; 1473 as->caps.has_pdc_support = version < 0x212;
1451} 1474}
1452 1475
@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
1592 as->use_pdc = true; 1615 as->use_pdc = true;
1593 } 1616 }
1594 1617
1618 if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1619 as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
1620 SPI_MAX_DMA_XFER,
1621 &as->dma_addr_rx_bbuf,
1622 GFP_KERNEL | GFP_DMA);
1623 if (!as->addr_rx_bbuf) {
1624 as->use_dma = false;
1625 } else {
1626 as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
1627 SPI_MAX_DMA_XFER,
1628 &as->dma_addr_tx_bbuf,
1629 GFP_KERNEL | GFP_DMA);
1630 if (!as->addr_tx_bbuf) {
1631 as->use_dma = false;
1632 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1633 as->addr_rx_bbuf,
1634 as->dma_addr_rx_bbuf);
1635 }
1636 }
1637 if (!as->use_dma)
1638 dev_info(master->dev.parent,
1639 " can not allocate dma coherent memory\n");
1640 }
1641
1595 if (as->caps.has_dma_support && !as->use_dma) 1642 if (as->caps.has_dma_support && !as->use_dma)
1596 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); 1643 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
1597 1644
@@ -1664,6 +1711,14 @@ static int atmel_spi_remove(struct platform_device *pdev)
1664 if (as->use_dma) { 1711 if (as->use_dma) {
1665 atmel_spi_stop_dma(master); 1712 atmel_spi_stop_dma(master);
1666 atmel_spi_release_dma(master); 1713 atmel_spi_release_dma(master);
1714 if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1715 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1716 as->addr_tx_bbuf,
1717 as->dma_addr_tx_bbuf);
1718 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1719 as->addr_rx_bbuf,
1720 as->dma_addr_rx_bbuf);
1721 }
1667 } 1722 }
1668 1723
1669 spin_lock_irq(&as->lock); 1724 spin_lock_irq(&as->lock);