aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2014-01-16 07:22:43 -0500
committerMark Brown <broonie@linaro.org>2014-02-03 08:05:21 -0500
commit99adef310f682d6343cb40c1f6c9c25a4b3a450d (patch)
tree40310c296f820941de639b761927ee6375a623ce
parent38dbfb59d1175ef458d006556061adeaa8751b72 (diff)
spi: Provide core support for DMA mapping transfers
The process of DMA mapping buffers for SPI transfers does not vary between devices so in order to save duplication of code in drivers this can be factored out into the core, allowing it to be integrated with the work that is being done on factoring out the common elements from the data path including more sharing of dmaengine code. In order to use this masters need to provide a can_dma() operation and while the hardware is prepared they should ensure that DMA channels are provided in tx_dma and rx_dma. The core will then ensure that the buffers are mapped for DMA prior to calling transfer_one_message(). Currently the cleanup on error is not complete, this needs to be improved. Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r--drivers/spi/spi.c82
-rw-r--r--include/linux/spi/spi.h18
2 files changed, 100 insertions, 0 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0f9036..bcdaa74f1c8e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,6 +24,8 @@
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/dma-mapping.h>
28#include <linux/dmaengine.h>
27#include <linux/mutex.h> 29#include <linux/mutex.h>
28#include <linux/of_device.h> 30#include <linux/of_device.h>
29#include <linux/of_irq.h> 31#include <linux/of_irq.h>
@@ -580,6 +582,77 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580 spi->master->set_cs(spi, !enable); 582 spi->master->set_cs(spi, !enable);
581} 583}
582 584
585static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
586{
587 struct device *dev = master->dev.parent;
588 struct device *tx_dev, *rx_dev;
589 struct spi_transfer *xfer;
590
591 if (msg->is_dma_mapped || !master->can_dma)
592 return 0;
593
594 tx_dev = &master->dma_tx->dev->device;
595 rx_dev = &master->dma_rx->dev->device;
596
597 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
598 if (!master->can_dma(master, msg->spi, xfer))
599 continue;
600
601 if (xfer->tx_buf != NULL) {
602 xfer->tx_dma = dma_map_single(tx_dev,
603 (void *)xfer->tx_buf,
604 xfer->len,
605 DMA_TO_DEVICE);
606 if (dma_mapping_error(dev, xfer->tx_dma)) {
607 dev_err(dev, "dma_map_single Tx failed\n");
608 return -ENOMEM;
609 }
610 }
611
612 if (xfer->rx_buf != NULL) {
613 xfer->rx_dma = dma_map_single(rx_dev,
614 xfer->rx_buf, xfer->len,
615 DMA_FROM_DEVICE);
616 if (dma_mapping_error(dev, xfer->rx_dma)) {
617 dev_err(dev, "dma_map_single Rx failed\n");
618 dma_unmap_single(tx_dev, xfer->tx_dma,
619 xfer->len, DMA_TO_DEVICE);
620 return -ENOMEM;
621 }
622 }
623 }
624
625 master->cur_msg_mapped = true;
626
627 return 0;
628}
629
630static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
631{
632 struct spi_transfer *xfer;
633 struct device *tx_dev, *rx_dev;
634
635 if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma)
636 return 0;
637
638 tx_dev = &master->dma_tx->dev->device;
639 rx_dev = &master->dma_rx->dev->device;
640
641 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
642 if (!master->can_dma(master, msg->spi, xfer))
643 continue;
644
645 if (xfer->rx_buf)
646 dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len,
647 DMA_FROM_DEVICE);
648 if (xfer->tx_buf)
649 dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len,
650 DMA_TO_DEVICE);
651 }
652
653 return 0;
654}
655
583/* 656/*
584 * spi_transfer_one_message - Default implementation of transfer_one_message() 657 * spi_transfer_one_message - Default implementation of transfer_one_message()
585 * 658 *
@@ -752,6 +825,13 @@ static void spi_pump_messages(struct kthread_work *work)
752 master->cur_msg_prepared = true; 825 master->cur_msg_prepared = true;
753 } 826 }
754 827
828 ret = spi_map_msg(master, master->cur_msg);
829 if (ret) {
830 master->cur_msg->status = ret;
831 spi_finalize_current_message(master);
832 return;
833 }
834
755 ret = master->transfer_one_message(master, master->cur_msg); 835 ret = master->transfer_one_message(master, master->cur_msg);
756 if (ret) { 836 if (ret) {
757 dev_err(&master->dev, 837 dev_err(&master->dev,
@@ -841,6 +921,8 @@ void spi_finalize_current_message(struct spi_master *master)
841 queue_kthread_work(&master->kworker, &master->pump_messages); 921 queue_kthread_work(&master->kworker, &master->pump_messages);
842 spin_unlock_irqrestore(&master->queue_lock, flags); 922 spin_unlock_irqrestore(&master->queue_lock, flags);
843 923
924 spi_unmap_msg(master, mesg);
925
844 if (master->cur_msg_prepared && master->unprepare_message) { 926 if (master->cur_msg_prepared && master->unprepare_message) {
845 ret = master->unprepare_message(master, mesg); 927 ret = master->unprepare_message(master, mesg);
846 if (ret) { 928 if (ret) {
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a1d4ca290862..b354dcbed55b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -25,6 +25,8 @@
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <linux/completion.h> 26#include <linux/completion.h>
27 27
28struct dma_chan;
29
28/* 30/*
29 * INTERFACES between SPI master-side drivers and SPI infrastructure. 31 * INTERFACES between SPI master-side drivers and SPI infrastructure.
30 * (There's no SPI slave support for Linux yet...) 32 * (There's no SPI slave support for Linux yet...)
@@ -387,6 +389,17 @@ struct spi_master {
387 void (*cleanup)(struct spi_device *spi); 389 void (*cleanup)(struct spi_device *spi);
388 390
389 /* 391 /*
392 * Used to enable core support for DMA handling, if can_dma()
393 * exists and returns true then the transfer will be mapped
394 * prior to transfer_one() being called. The driver should
395 * not modify or store xfer and dma_tx and dma_rx must be set
396 * while the device is prepared.
397 */
398 bool (*can_dma)(struct spi_master *master,
399 struct spi_device *spi,
400 struct spi_transfer *xfer);
401
402 /*
390 * These hooks are for drivers that want to use the generic 403 * These hooks are for drivers that want to use the generic
391 * master transfer queueing mechanism. If these are used, the 404 * master transfer queueing mechanism. If these are used, the
392 * transfer() function above must NOT be specified by the driver. 405 * transfer() function above must NOT be specified by the driver.
@@ -404,6 +417,7 @@ struct spi_master {
404 bool rt; 417 bool rt;
405 bool auto_runtime_pm; 418 bool auto_runtime_pm;
406 bool cur_msg_prepared; 419 bool cur_msg_prepared;
420 bool cur_msg_mapped;
407 struct completion xfer_completion; 421 struct completion xfer_completion;
408 422
409 int (*prepare_transfer_hardware)(struct spi_master *master); 423 int (*prepare_transfer_hardware)(struct spi_master *master);
@@ -425,6 +439,10 @@ struct spi_master {
425 439
426 /* gpio chip select */ 440 /* gpio chip select */
427 int *cs_gpios; 441 int *cs_gpios;
442
443 /* DMA channels for use with core dmaengine helpers */
444 struct dma_chan *dma_tx;
445 struct dma_chan *dma_rx;
428}; 446};
429 447
430static inline void *spi_master_get_devdata(struct spi_master *master) 448static inline void *spi_master_get_devdata(struct spi_master *master)