diff options
author | Brian Niebuhr <bniebuhr@efjohnson.com> | 2010-10-01 00:59:29 -0400 |
---|---|---|
committer | Sekhar Nori <nsekhar@ti.com> | 2010-11-18 08:08:34 -0500 |
commit | e91c659bbcf2f47519260182a75f64ede34df3ca (patch) | |
tree | f72c8d87f1b28b2119b1a3f17b85e38e5e4cc13f /drivers | |
parent | d3f7141cbf4580b2f18f93940df29cf0c15e7ef5 (diff) |
spi: davinci: always start receive DMA
In keeping with the full duplex nature of the SPI bus. Always
start receive DMA along with transmit DMA.
If there is no receive buffer provided with the transfer, use
a temporary buffer to receive the data to be thrown away.
[michael.williamson@criticallink.com: receive DMA size should
be same as transfer length to avoid hang-up when transfer length
is smaller than temporary rx buffer size (rx buffer not provided)]
Signed-off-by: Brian Niebuhr <bniebuhr@efjohnson.com>
Tested-By: Michael Williamson <michael.williamson@criticallink.com>
Signed-off-by: Sekhar Nori <nsekhar@ti.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/spi/davinci_spi.c | 70 |
1 files changed, 42 insertions, 28 deletions
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c index f5129390bc2d..705d0069bce0 100644 --- a/drivers/spi/davinci_spi.c +++ b/drivers/spi/davinci_spi.c | |||
@@ -138,6 +138,8 @@ struct davinci_spi { | |||
138 | 138 | ||
139 | const void *tx; | 139 | const void *tx; |
140 | void *rx; | 140 | void *rx; |
141 | #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) | ||
142 | u8 rx_tmp_buf[SPI_TMP_BUFSZ]; | ||
141 | int rcount; | 143 | int rcount; |
142 | int wcount; | 144 | int wcount; |
143 | struct davinci_spi_dma dma_channels; | 145 | struct davinci_spi_dma dma_channels; |
@@ -716,10 +718,12 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
716 | struct davinci_spi *davinci_spi; | 718 | struct davinci_spi *davinci_spi; |
717 | int int_status = 0; | 719 | int int_status = 0; |
718 | int count; | 720 | int count; |
721 | unsigned rx_buf_count; | ||
719 | struct davinci_spi_dma *davinci_spi_dma; | 722 | struct davinci_spi_dma *davinci_spi_dma; |
720 | int data_type, ret; | 723 | int data_type, ret; |
721 | unsigned long tx_reg, rx_reg; | 724 | unsigned long tx_reg, rx_reg; |
722 | struct davinci_spi_platform_data *pdata; | 725 | struct davinci_spi_platform_data *pdata; |
726 | void *rx_buf; | ||
723 | struct device *sdev; | 727 | struct device *sdev; |
724 | 728 | ||
725 | davinci_spi = spi_master_get_devdata(spi->master); | 729 | davinci_spi = spi_master_get_devdata(spi->master); |
@@ -778,50 +782,60 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
778 | t->tx_buf ? data_type : 0, 0); | 782 | t->tx_buf ? data_type : 0, 0); |
779 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); | 783 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); |
780 | 784 | ||
785 | /* | ||
786 | * Receive DMA setup | ||
787 | * | ||
788 | * If there is receive buffer, use it to receive data. If there | ||
789 | * is none provided, use a temporary receive buffer. Set the | ||
790 | * destination B index to 0 so effectively only one byte is used | ||
791 | * in the temporary buffer (address does not increment). | ||
792 | * | ||
793 | * The source of receive data is the receive data register. The | ||
794 | * source address never increments. | ||
795 | */ | ||
796 | |||
781 | if (t->rx_buf) { | 797 | if (t->rx_buf) { |
782 | t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, | 798 | rx_buf = t->rx_buf; |
783 | DMA_FROM_DEVICE); | 799 | rx_buf_count = count; |
784 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | 800 | } else { |
785 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | 801 | rx_buf = davinci_spi->rx_tmp_buf; |
786 | count); | 802 | rx_buf_count = sizeof(davinci_spi->rx_tmp_buf); |
787 | if (t->tx_buf != NULL) | 803 | } |
788 | dma_unmap_single(NULL, t->tx_dma, | 804 | |
789 | count, DMA_TO_DEVICE); | 805 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, |
790 | return -ENOMEM; | 806 | DMA_FROM_DEVICE); |
791 | } | 807 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { |
792 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, | 808 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", |
793 | data_type, count, 1, 0, ASYNC); | 809 | rx_buf_count); |
794 | edma_set_src(davinci_spi_dma->dma_rx_channel, | 810 | if (t->tx_buf) |
795 | rx_reg, INCR, W8BIT); | 811 | dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); |
796 | edma_set_dest(davinci_spi_dma->dma_rx_channel, | 812 | return -ENOMEM; |
797 | t->rx_dma, INCR, W8BIT); | ||
798 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
799 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
800 | data_type, 0); | ||
801 | } | 813 | } |
802 | 814 | ||
815 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, data_type, | ||
816 | count, 1, 0, ASYNC); | ||
817 | edma_set_src(davinci_spi_dma->dma_rx_channel, rx_reg, INCR, W8BIT); | ||
818 | edma_set_dest(davinci_spi_dma->dma_rx_channel, t->rx_dma, INCR, W8BIT); | ||
819 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
820 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
821 | t->rx_buf ? data_type : 0, 0); | ||
822 | |||
803 | if (pdata->cshold_bug) { | 823 | if (pdata->cshold_bug) { |
804 | u16 spidat1 = ioread16(davinci_spi->base + SPIDAT1 + 2); | 824 | u16 spidat1 = ioread16(davinci_spi->base + SPIDAT1 + 2); |
805 | iowrite16(spidat1, davinci_spi->base + SPIDAT1 + 2); | 825 | iowrite16(spidat1, davinci_spi->base + SPIDAT1 + 2); |
806 | } | 826 | } |
807 | 827 | ||
808 | if (t->rx_buf) | 828 | edma_start(davinci_spi_dma->dma_rx_channel); |
809 | edma_start(davinci_spi_dma->dma_rx_channel); | ||
810 | |||
811 | edma_start(davinci_spi_dma->dma_tx_channel); | 829 | edma_start(davinci_spi_dma->dma_tx_channel); |
812 | davinci_spi_set_dma_req(spi, 1); | 830 | davinci_spi_set_dma_req(spi, 1); |
813 | 831 | ||
814 | wait_for_completion_interruptible(&davinci_spi_dma->dma_tx_completion); | 832 | wait_for_completion_interruptible(&davinci_spi_dma->dma_tx_completion); |
815 | 833 | wait_for_completion_interruptible(&davinci_spi_dma->dma_rx_completion); | |
816 | if (t->rx_buf) | ||
817 | wait_for_completion_interruptible( | ||
818 | &davinci_spi_dma->dma_rx_completion); | ||
819 | 834 | ||
820 | if (t->tx_buf) | 835 | if (t->tx_buf) |
821 | dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); | 836 | dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); |
822 | 837 | ||
823 | if (t->rx_buf) | 838 | dma_unmap_single(NULL, t->rx_dma, rx_buf_count, DMA_FROM_DEVICE); |
824 | dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); | ||
825 | 839 | ||
826 | /* | 840 | /* |
827 | * Check for bit error, desync error,parity error,timeout error and | 841 | * Check for bit error, desync error,parity error,timeout error and |