diff options
author | Mark Brown <broonie@linaro.org> | 2014-01-16 07:25:46 -0500 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-02-07 08:27:44 -0500 |
commit | 3f29588795bdd0d48f0ecc94e3ee06c128ca3300 (patch) | |
tree | 88b6d1ee290f9fc94132df5debfc0f1f35218d9b | |
parent | 6ad45a27cbe343ec8d7888e5edf6335499a4b555 (diff) |
spi/s3c64xx: Use core DMA mapping code with dmaengine
When using dmaengine allow the core to do the DMA mapping. We still need
local mapping code for the non-dmaengine case so this doesn't save us
anything for now.
Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 178 |
1 files changed, 103 insertions, 75 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 25c9bd409a87..4490e8c499c0 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -291,6 +291,81 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | |||
291 | .name = "samsung-spi-dma", | 291 | .name = "samsung-spi-dma", |
292 | }; | 292 | }; |
293 | 293 | ||
294 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | ||
295 | struct spi_message *msg) | ||
296 | { | ||
297 | struct device *dev = &sdd->pdev->dev; | ||
298 | struct spi_transfer *xfer; | ||
299 | |||
300 | if (is_polling(sdd) || msg->is_dma_mapped) | ||
301 | return 0; | ||
302 | |||
303 | /* First mark all xfer unmapped */ | ||
304 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
305 | xfer->rx_dma = XFER_DMAADDR_INVALID; | ||
306 | xfer->tx_dma = XFER_DMAADDR_INVALID; | ||
307 | } | ||
308 | |||
309 | /* Map until end or first fail */ | ||
310 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
311 | |||
312 | if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) | ||
313 | continue; | ||
314 | |||
315 | if (xfer->tx_buf != NULL) { | ||
316 | xfer->tx_dma = dma_map_single(dev, | ||
317 | (void *)xfer->tx_buf, xfer->len, | ||
318 | DMA_TO_DEVICE); | ||
319 | if (dma_mapping_error(dev, xfer->tx_dma)) { | ||
320 | dev_err(dev, "dma_map_single Tx failed\n"); | ||
321 | xfer->tx_dma = XFER_DMAADDR_INVALID; | ||
322 | return -ENOMEM; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | if (xfer->rx_buf != NULL) { | ||
327 | xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, | ||
328 | xfer->len, DMA_FROM_DEVICE); | ||
329 | if (dma_mapping_error(dev, xfer->rx_dma)) { | ||
330 | dev_err(dev, "dma_map_single Rx failed\n"); | ||
331 | dma_unmap_single(dev, xfer->tx_dma, | ||
332 | xfer->len, DMA_TO_DEVICE); | ||
333 | xfer->tx_dma = XFER_DMAADDR_INVALID; | ||
334 | xfer->rx_dma = XFER_DMAADDR_INVALID; | ||
335 | return -ENOMEM; | ||
336 | } | ||
337 | } | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | ||
344 | struct spi_message *msg) | ||
345 | { | ||
346 | struct device *dev = &sdd->pdev->dev; | ||
347 | struct spi_transfer *xfer; | ||
348 | |||
349 | if (is_polling(sdd) || msg->is_dma_mapped) | ||
350 | return; | ||
351 | |||
352 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
353 | |||
354 | if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) | ||
355 | continue; | ||
356 | |||
357 | if (xfer->rx_buf != NULL | ||
358 | && xfer->rx_dma != XFER_DMAADDR_INVALID) | ||
359 | dma_unmap_single(dev, xfer->rx_dma, | ||
360 | xfer->len, DMA_FROM_DEVICE); | ||
361 | |||
362 | if (xfer->tx_buf != NULL | ||
363 | && xfer->tx_dma != XFER_DMAADDR_INVALID) | ||
364 | dma_unmap_single(dev, xfer->tx_dma, | ||
365 | xfer->len, DMA_TO_DEVICE); | ||
366 | } | ||
367 | } | ||
368 | |||
294 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | 369 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, |
295 | unsigned len, dma_addr_t buf) | 370 | unsigned len, dma_addr_t buf) |
296 | { | 371 | { |
@@ -378,8 +453,22 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | |||
378 | { | 453 | { |
379 | sdd->ops->stop((enum dma_ch)dma->ch); | 454 | sdd->ops->stop((enum dma_ch)dma->ch); |
380 | } | 455 | } |
456 | |||
457 | #define s3c64xx_spi_can_dma NULL | ||
458 | |||
381 | #else | 459 | #else |
382 | 460 | ||
461 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | ||
462 | struct spi_message *msg) | ||
463 | { | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | ||
468 | struct spi_message *msg) | ||
469 | { | ||
470 | } | ||
471 | |||
383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | 472 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, |
384 | struct sg_table *sgt) | 473 | struct sg_table *sgt) |
385 | { | 474 | { |
@@ -437,6 +526,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
437 | ret = -EBUSY; | 526 | ret = -EBUSY; |
438 | goto out; | 527 | goto out; |
439 | } | 528 | } |
529 | spi->dma_rx = sdd->rx_dma.ch; | ||
440 | 530 | ||
441 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 531 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
442 | (void *)sdd->tx_dma.dmach, dev, "tx"); | 532 | (void *)sdd->tx_dma.dmach, dev, "tx"); |
@@ -445,6 +535,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
445 | ret = -EBUSY; | 535 | ret = -EBUSY; |
446 | goto out_rx; | 536 | goto out_rx; |
447 | } | 537 | } |
538 | spi->dma_tx = sdd->tx_dma.ch; | ||
448 | } | 539 | } |
449 | 540 | ||
450 | ret = pm_runtime_get_sync(&sdd->pdev->dev); | 541 | ret = pm_runtime_get_sync(&sdd->pdev->dev); |
@@ -482,6 +573,16 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | |||
482 | { | 573 | { |
483 | dmaengine_terminate_all(dma->ch); | 574 | dmaengine_terminate_all(dma->ch); |
484 | } | 575 | } |
576 | |||
577 | static bool s3c64xx_spi_can_dma(struct spi_master *master, | ||
578 | struct spi_device *spi, | ||
579 | struct spi_transfer *xfer) | ||
580 | { | ||
581 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | ||
582 | |||
583 | return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; | ||
584 | } | ||
585 | |||
485 | #endif | 586 | #endif |
486 | 587 | ||
487 | static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | 588 | static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, |
@@ -764,81 +865,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
764 | 865 | ||
765 | #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) | 866 | #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) |
766 | 867 | ||
767 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | ||
768 | struct spi_message *msg) | ||
769 | { | ||
770 | struct device *dev = &sdd->pdev->dev; | ||
771 | struct spi_transfer *xfer; | ||
772 | |||
773 | if (is_polling(sdd) || msg->is_dma_mapped) | ||
774 | return 0; | ||
775 | |||
776 | /* First mark all xfer unmapped */ | ||
777 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
778 | xfer->rx_dma = XFER_DMAADDR_INVALID; | ||
779 | xfer->tx_dma = XFER_DMAADDR_INVALID; | ||
780 | } | ||
781 | |||
782 | /* Map until end or first fail */ | ||
783 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
784 | |||
785 | if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) | ||
786 | continue; | ||
787 | |||
788 | if (xfer->tx_buf != NULL) { | ||
789 | xfer->tx_dma = dma_map_single(dev, | ||
790 | (void *)xfer->tx_buf, xfer->len, | ||
791 | DMA_TO_DEVICE); | ||
792 | if (dma_mapping_error(dev, xfer->tx_dma)) { | ||
793 | dev_err(dev, "dma_map_single Tx failed\n"); | ||
794 | xfer->tx_dma = XFER_DMAADDR_INVALID; | ||
795 | return -ENOMEM; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (xfer->rx_buf != NULL) { | ||
800 | xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, | ||
801 | xfer->len, DMA_FROM_DEVICE); | ||
802 | if (dma_mapping_error(dev, xfer->rx_dma)) { | ||
803 | dev_err(dev, "dma_map_single Rx failed\n"); | ||
804 | dma_unmap_single(dev, xfer->tx_dma, | ||
805 | xfer->len, DMA_TO_DEVICE); | ||
806 | xfer->tx_dma = XFER_DMAADDR_INVALID; | ||
807 | xfer->rx_dma = XFER_DMAADDR_INVALID; | ||
808 | return -ENOMEM; | ||
809 | } | ||
810 | } | ||
811 | } | ||
812 | |||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | ||
817 | struct spi_message *msg) | ||
818 | { | ||
819 | struct device *dev = &sdd->pdev->dev; | ||
820 | struct spi_transfer *xfer; | ||
821 | |||
822 | if (is_polling(sdd) || msg->is_dma_mapped) | ||
823 | return; | ||
824 | |||
825 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
826 | |||
827 | if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) | ||
828 | continue; | ||
829 | |||
830 | if (xfer->rx_buf != NULL | ||
831 | && xfer->rx_dma != XFER_DMAADDR_INVALID) | ||
832 | dma_unmap_single(dev, xfer->rx_dma, | ||
833 | xfer->len, DMA_FROM_DEVICE); | ||
834 | |||
835 | if (xfer->tx_buf != NULL | ||
836 | && xfer->tx_dma != XFER_DMAADDR_INVALID) | ||
837 | dma_unmap_single(dev, xfer->tx_dma, | ||
838 | xfer->len, DMA_TO_DEVICE); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | static int s3c64xx_spi_prepare_message(struct spi_master *master, | 868 | static int s3c64xx_spi_prepare_message(struct spi_master *master, |
843 | struct spi_message *msg) | 869 | struct spi_message *msg) |
844 | { | 870 | { |
@@ -1338,6 +1364,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1338 | /* the spi->mode bits understood by this driver: */ | 1364 | /* the spi->mode bits understood by this driver: */ |
1339 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1365 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1340 | master->auto_runtime_pm = true; | 1366 | master->auto_runtime_pm = true; |
1367 | if (!is_polling(sdd)) | ||
1368 | master->can_dma = s3c64xx_spi_can_dma; | ||
1341 | 1369 | ||
1342 | sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); | 1370 | sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); |
1343 | if (IS_ERR(sdd->regs)) { | 1371 | if (IS_ERR(sdd->regs)) { |