diff options
author | Mark Brown <broonie@linaro.org> | 2014-01-28 15:17:03 -0500 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-02-04 15:31:33 -0500 |
commit | 3a2eba9bd0a6447dfbc01635e4cd0689f5f2bdad (patch) | |
tree | 642ccd6b3b125435d4e6efa2dce51e2ccb34c485 | |
parent | 3700c6eb1e41a898c35c8dd4d5b10dc65fdaf486 (diff) |
spi: Provide core support for full duplex devices
It is fairly common for SPI devices to require that one or both transfer
directions is always active. Currently drivers open code this in various
ways with varying degrees of efficiency. Start factoring this out by
providing flags SPI_MASTER_MUST_TX and SPI_MASTER_MUST_RX. These will cause
the core to provide buffers for the requested direction if none are
specified in the underlying transfer.
Currently this is fairly inefficient since we actually allocate a data
buffer which may get large, support for mapping transfers using a
scatterlist will allow us to avoid this for DMA based transfers.
Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r-- | drivers/spi/spi.c | 47 | ||||
-rw-r--r-- | include/linux/spi/spi.h | 6 |
2 files changed, 53 insertions, 0 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index bcdaa74f1c8e..bb7cf561c311 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -587,6 +587,49 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) | |||
587 | struct device *dev = master->dev.parent; | 587 | struct device *dev = master->dev.parent; |
588 | struct device *tx_dev, *rx_dev; | 588 | struct device *tx_dev, *rx_dev; |
589 | struct spi_transfer *xfer; | 589 | struct spi_transfer *xfer; |
590 | void *tmp; | ||
591 | size_t max_tx, max_rx; | ||
592 | |||
593 | if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { | ||
594 | max_tx = 0; | ||
595 | max_rx = 0; | ||
596 | |||
597 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
598 | if ((master->flags & SPI_MASTER_MUST_TX) && | ||
599 | !xfer->tx_buf) | ||
600 | max_tx = max(xfer->len, max_tx); | ||
601 | if ((master->flags & SPI_MASTER_MUST_RX) && | ||
602 | !xfer->rx_buf) | ||
603 | max_rx = max(xfer->len, max_rx); | ||
604 | } | ||
605 | |||
606 | if (max_tx) { | ||
607 | tmp = krealloc(master->dummy_tx, max_tx, | ||
608 | GFP_KERNEL | GFP_DMA); | ||
609 | if (!tmp) | ||
610 | return -ENOMEM; | ||
611 | master->dummy_tx = tmp; | ||
612 | memset(tmp, 0, max_tx); | ||
613 | } | ||
614 | |||
615 | if (max_rx) { | ||
616 | tmp = krealloc(master->dummy_rx, max_rx, | ||
617 | GFP_KERNEL | GFP_DMA); | ||
618 | if (!tmp) | ||
619 | return -ENOMEM; | ||
620 | master->dummy_rx = tmp; | ||
621 | } | ||
622 | |||
623 | if (max_tx || max_rx) { | ||
624 | list_for_each_entry(xfer, &msg->transfers, | ||
625 | transfer_list) { | ||
626 | if (!xfer->tx_buf) | ||
627 | xfer->tx_buf = master->dummy_tx; | ||
628 | if (!xfer->rx_buf) | ||
629 | xfer->rx_buf = master->dummy_rx; | ||
630 | } | ||
631 | } | ||
632 | } | ||
590 | 633 | ||
591 | if (msg->is_dma_mapped || !master->can_dma) | 634 | if (msg->is_dma_mapped || !master->can_dma) |
592 | return 0; | 635 | return 0; |
@@ -759,6 +802,10 @@ static void spi_pump_messages(struct kthread_work *work) | |||
759 | } | 802 | } |
760 | master->busy = false; | 803 | master->busy = false; |
761 | spin_unlock_irqrestore(&master->queue_lock, flags); | 804 | spin_unlock_irqrestore(&master->queue_lock, flags); |
805 | kfree(master->dummy_rx); | ||
806 | master->dummy_rx = NULL; | ||
807 | kfree(master->dummy_tx); | ||
808 | master->dummy_tx = NULL; | ||
762 | if (master->unprepare_transfer_hardware && | 809 | if (master->unprepare_transfer_hardware && |
763 | master->unprepare_transfer_hardware(master)) | 810 | master->unprepare_transfer_hardware(master)) |
764 | dev_err(&master->dev, | 811 | dev_err(&master->dev, |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b354dcbed55b..31a5b0ee93ec 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -347,6 +347,8 @@ struct spi_master { | |||
347 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ | 347 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ |
348 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ | 348 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ |
349 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ | 349 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ |
350 | #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ | ||
351 | #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ | ||
350 | 352 | ||
351 | /* lock and mutex for SPI bus locking */ | 353 | /* lock and mutex for SPI bus locking */ |
352 | spinlock_t bus_lock_spinlock; | 354 | spinlock_t bus_lock_spinlock; |
@@ -443,6 +445,10 @@ struct spi_master { | |||
443 | /* DMA channels for use with core dmaengine helpers */ | 445 | /* DMA channels for use with core dmaengine helpers */ |
444 | struct dma_chan *dma_tx; | 446 | struct dma_chan *dma_tx; |
445 | struct dma_chan *dma_rx; | 447 | struct dma_chan *dma_rx; |
448 | |||
449 | /* dummy data for full duplex devices */ | ||
450 | void *dummy_rx; | ||
451 | void *dummy_tx; | ||
446 | }; | 452 | }; |
447 | 453 | ||
448 | static inline void *spi_master_get_devdata(struct spi_master *master) | 454 | static inline void *spi_master_get_devdata(struct spi_master *master) |