aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-05-21 05:53:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-05-21 05:53:55 -0400
commitb2e3432af1546a9f44a8278a8a91abfbd439259e (patch)
tree9b790675f41496d1056aa444232202dc14913ed8
parent081069ff817ea7434016c482156e1ef0a7ffb4f3 (diff)
parentfd30c37331ad8ed7c7e0e4c21f5ee309fec21d7d (diff)
Merge tag 'spi-v3.15-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi fixes from Mark Brown: "A few core fixes around outlying cases here, nothing that should affect most users but useful fixes. The diffstat is rather larger than one might hope due some simple code motion in the fix for !CONFIG_DMA, the actual meaningful change is much smaller. - Fix handling of unsupported dual and quad mode support on slave registration so that drivers that can degrade gracefully do so, preventing regressions for drivers this is added. - Fix build in !CONFIG_DMA cases following addition of generic DMA mapping support. - Fix error handling for queue creation which due to wider kernel changes can be triggered more easily. - A couple of driver specific fixes" * tag 'spi-v3.15-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: spi/pxa2xx: Prevent DMA from transferring too many bytes spi: core: Don't destroy master queue if we fail to create it spi: qup: Fix return value checking for pm_runtime_get_sync() spi: core: Protect DMA code by #ifdef CONFIG_HAS_DMA spi: core: Ignore unsupported Dual/Quad Transfer Mode bits
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c16
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi.c124
3 files changed, 78 insertions, 64 deletions
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 713af4806f26..f6759dc0153b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
29 struct sg_table *sgt; 29 struct sg_table *sgt;
30 void *buf, *pbuf; 30 void *buf, *pbuf;
31 31
32 /*
33 * Some DMA controllers have problems transferring buffers that are
34 * not multiple of 4 bytes. So we truncate the transfer so that it
35 * is suitable for such controllers, and handle the trailing bytes
36 * manually after the DMA completes.
37 *
38 * REVISIT: It would be better if this information could be
39 * retrieved directly from the DMA device in a similar way than
40 * ->copy_align etc. is done.
41 */
42 len = ALIGN(drv_data->len, 4);
43
44 if (dir == DMA_TO_DEVICE) { 32 if (dir == DMA_TO_DEVICE) {
45 dmadev = drv_data->tx_chan->device->dev; 33 dmadev = drv_data->tx_chan->device->dev;
46 sgt = &drv_data->tx_sgt; 34 sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
144 if (!error) { 132 if (!error) {
145 pxa2xx_spi_unmap_dma_buffers(drv_data); 133 pxa2xx_spi_unmap_dma_buffers(drv_data);
146 134
147 /* Handle the last bytes of unaligned transfer */
148 drv_data->tx += drv_data->tx_map_len; 135 drv_data->tx += drv_data->tx_map_len;
149 drv_data->write(drv_data);
150
151 drv_data->rx += drv_data->rx_map_len; 136 drv_data->rx += drv_data->rx_map_len;
152 drv_data->read(drv_data);
153 137
154 msg->actual_length += drv_data->len; 138 msg->actual_length += drv_data->len;
155 msg->state = pxa2xx_spi_next_transfer(drv_data); 139 msg->state = pxa2xx_spi_next_transfer(drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index b032e8885e24..78c66e3c53ed 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
734 int ret; 734 int ret;
735 735
736 ret = pm_runtime_get_sync(&pdev->dev); 736 ret = pm_runtime_get_sync(&pdev->dev);
737 if (ret) 737 if (ret < 0)
738 return ret; 738 return ret;
739 739
740 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 740 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4eb9bf02996c..939edf473235 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580 spi->master->set_cs(spi, !enable); 580 spi->master->set_cs(spi, !enable);
581} 581}
582 582
583#ifdef CONFIG_HAS_DMA
583static int spi_map_buf(struct spi_master *master, struct device *dev, 584static int spi_map_buf(struct spi_master *master, struct device *dev,
584 struct sg_table *sgt, void *buf, size_t len, 585 struct sg_table *sgt, void *buf, size_t len,
585 enum dma_data_direction dir) 586 enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
637 } 638 }
638} 639}
639 640
640static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 641static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
641{ 642{
642 struct device *tx_dev, *rx_dev; 643 struct device *tx_dev, *rx_dev;
643 struct spi_transfer *xfer; 644 struct spi_transfer *xfer;
644 void *tmp;
645 unsigned int max_tx, max_rx;
646 int ret; 645 int ret;
647 646
648 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
649 max_tx = 0;
650 max_rx = 0;
651
652 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
653 if ((master->flags & SPI_MASTER_MUST_TX) &&
654 !xfer->tx_buf)
655 max_tx = max(xfer->len, max_tx);
656 if ((master->flags & SPI_MASTER_MUST_RX) &&
657 !xfer->rx_buf)
658 max_rx = max(xfer->len, max_rx);
659 }
660
661 if (max_tx) {
662 tmp = krealloc(master->dummy_tx, max_tx,
663 GFP_KERNEL | GFP_DMA);
664 if (!tmp)
665 return -ENOMEM;
666 master->dummy_tx = tmp;
667 memset(tmp, 0, max_tx);
668 }
669
670 if (max_rx) {
671 tmp = krealloc(master->dummy_rx, max_rx,
672 GFP_KERNEL | GFP_DMA);
673 if (!tmp)
674 return -ENOMEM;
675 master->dummy_rx = tmp;
676 }
677
678 if (max_tx || max_rx) {
679 list_for_each_entry(xfer, &msg->transfers,
680 transfer_list) {
681 if (!xfer->tx_buf)
682 xfer->tx_buf = master->dummy_tx;
683 if (!xfer->rx_buf)
684 xfer->rx_buf = master->dummy_rx;
685 }
686 }
687 }
688
689 if (!master->can_dma) 647 if (!master->can_dma)
690 return 0; 648 return 0;
691 649
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
742 700
743 return 0; 701 return 0;
744} 702}
703#else /* !CONFIG_HAS_DMA */
704static inline int __spi_map_msg(struct spi_master *master,
705 struct spi_message *msg)
706{
707 return 0;
708}
709
710static inline int spi_unmap_msg(struct spi_master *master,
711 struct spi_message *msg)
712{
713 return 0;
714}
715#endif /* !CONFIG_HAS_DMA */
716
717static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
718{
719 struct spi_transfer *xfer;
720 void *tmp;
721 unsigned int max_tx, max_rx;
722
723 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
724 max_tx = 0;
725 max_rx = 0;
726
727 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
728 if ((master->flags & SPI_MASTER_MUST_TX) &&
729 !xfer->tx_buf)
730 max_tx = max(xfer->len, max_tx);
731 if ((master->flags & SPI_MASTER_MUST_RX) &&
732 !xfer->rx_buf)
733 max_rx = max(xfer->len, max_rx);
734 }
735
736 if (max_tx) {
737 tmp = krealloc(master->dummy_tx, max_tx,
738 GFP_KERNEL | GFP_DMA);
739 if (!tmp)
740 return -ENOMEM;
741 master->dummy_tx = tmp;
742 memset(tmp, 0, max_tx);
743 }
744
745 if (max_rx) {
746 tmp = krealloc(master->dummy_rx, max_rx,
747 GFP_KERNEL | GFP_DMA);
748 if (!tmp)
749 return -ENOMEM;
750 master->dummy_rx = tmp;
751 }
752
753 if (max_tx || max_rx) {
754 list_for_each_entry(xfer, &msg->transfers,
755 transfer_list) {
756 if (!xfer->tx_buf)
757 xfer->tx_buf = master->dummy_tx;
758 if (!xfer->rx_buf)
759 xfer->rx_buf = master->dummy_rx;
760 }
761 }
762 }
763
764 return __spi_map_msg(master, msg);
765}
745 766
746/* 767/*
747 * spi_transfer_one_message - Default implementation of transfer_one_message() 768 * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
1151{ 1172{
1152 int ret; 1173 int ret;
1153 1174
1154 master->queued = true;
1155 master->transfer = spi_queued_transfer; 1175 master->transfer = spi_queued_transfer;
1156 if (!master->transfer_one_message) 1176 if (!master->transfer_one_message)
1157 master->transfer_one_message = spi_transfer_one_message; 1177 master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
1162 dev_err(&master->dev, "problem initializing queue\n"); 1182 dev_err(&master->dev, "problem initializing queue\n");
1163 goto err_init_queue; 1183 goto err_init_queue;
1164 } 1184 }
1185 master->queued = true;
1165 ret = spi_start_queue(master); 1186 ret = spi_start_queue(master);
1166 if (ret) { 1187 if (ret) {
1167 dev_err(&master->dev, "problem starting queue\n"); 1188 dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
1171 return 0; 1192 return 0;
1172 1193
1173err_start_queue: 1194err_start_queue:
1174err_init_queue:
1175 spi_destroy_queue(master); 1195 spi_destroy_queue(master);
1196err_init_queue:
1176 return ret; 1197 return ret;
1177} 1198}
1178 1199
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1756 */ 1777 */
1757int spi_setup(struct spi_device *spi) 1778int spi_setup(struct spi_device *spi)
1758{ 1779{
1759 unsigned bad_bits; 1780 unsigned bad_bits, ugly_bits;
1760 int status = 0; 1781 int status = 0;
1761 1782
1762 /* check mode to prevent that DUAL and QUAD set at the same time 1783 /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
1776 * that aren't supported with their current master 1797 * that aren't supported with their current master
1777 */ 1798 */
1778 bad_bits = spi->mode & ~spi->master->mode_bits; 1799 bad_bits = spi->mode & ~spi->master->mode_bits;
1800 ugly_bits = bad_bits &
1801 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1802 if (ugly_bits) {
1803 dev_warn(&spi->dev,
1804 "setup: ignoring unsupported mode bits %x\n",
1805 ugly_bits);
1806 spi->mode &= ~ugly_bits;
1807 bad_bits &= ~ugly_bits;
1808 }
1779 if (bad_bits) { 1809 if (bad_bits) {
1780 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1810 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1781 bad_bits); 1811 bad_bits);