diff options
author | Mark Brown <broonie@linaro.org> | 2014-02-27 04:54:00 -0500 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-02-27 04:54:00 -0500 |
commit | 4867147bcde8fa94f56fc6e8149c57ca65c4e5ed (patch) | |
tree | abec12e8ad2f38e914d99788d4f0bfcb043ade09 | |
parent | 8d19534a8d539bb2e598e56e017a423f205e909e (diff) | |
parent | 513273538a6c10dba1170ecdee5c2da15acecdb5 (diff) |
Merge branch 'topic/dma' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into HEAD
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 207 | ||||
-rw-r--r-- | drivers/spi/spi.c | 180 | ||||
-rw-r--r-- | include/linux/spi/spi.h | 31 |
3 files changed, 310 insertions, 108 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index ae907dde1371..25c9bd409a87 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | |||
381 | #else | 381 | #else |
382 | 382 | ||
383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | 383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, |
384 | unsigned len, dma_addr_t buf) | 384 | struct sg_table *sgt) |
385 | { | 385 | { |
386 | struct s3c64xx_spi_driver_data *sdd; | 386 | struct s3c64xx_spi_driver_data *sdd; |
387 | struct dma_slave_config config; | 387 | struct dma_slave_config config; |
@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
407 | dmaengine_slave_config(dma->ch, &config); | 407 | dmaengine_slave_config(dma->ch, &config); |
408 | } | 408 | } |
409 | 409 | ||
410 | desc = dmaengine_prep_slave_single(dma->ch, buf, len, | 410 | desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, |
411 | dma->direction, DMA_PREP_INTERRUPT); | 411 | dma->direction, DMA_PREP_INTERRUPT); |
412 | 412 | ||
413 | desc->callback = s3c64xx_spi_dmacb; | 413 | desc->callback = s3c64xx_spi_dmacb; |
414 | desc->callback_param = dma; | 414 | desc->callback_param = dma; |
@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
515 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; | 515 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; |
516 | if (dma_mode) { | 516 | if (dma_mode) { |
517 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; | 517 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; |
518 | #ifndef CONFIG_S3C_DMA | ||
519 | prepare_dma(&sdd->tx_dma, &xfer->tx_sg); | ||
520 | #else | ||
518 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); | 521 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); |
522 | #endif | ||
519 | } else { | 523 | } else { |
520 | switch (sdd->cur_bpw) { | 524 | switch (sdd->cur_bpw) { |
521 | case 32: | 525 | case 32: |
@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
547 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | 551 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) |
548 | | S3C64XX_SPI_PACKET_CNT_EN, | 552 | | S3C64XX_SPI_PACKET_CNT_EN, |
549 | regs + S3C64XX_SPI_PACKET_CNT); | 553 | regs + S3C64XX_SPI_PACKET_CNT); |
554 | #ifndef CONFIG_S3C_DMA | ||
555 | prepare_dma(&sdd->rx_dma, &xfer->rx_sg); | ||
556 | #else | ||
550 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); | 557 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); |
558 | #endif | ||
551 | } | 559 | } |
552 | } | 560 | } |
553 | 561 | ||
@@ -555,23 +563,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
555 | writel(chcfg, regs + S3C64XX_SPI_CH_CFG); | 563 | writel(chcfg, regs + S3C64XX_SPI_CH_CFG); |
556 | } | 564 | } |
557 | 565 | ||
558 | static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, | ||
559 | struct spi_device *spi) | ||
560 | { | ||
561 | if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ | ||
562 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ | ||
563 | /* Deselect the last toggled device */ | ||
564 | if (spi->cs_gpio >= 0) | ||
565 | gpio_set_value(spi->cs_gpio, | ||
566 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
567 | } | ||
568 | sdd->tgl_spi = NULL; | ||
569 | } | ||
570 | |||
571 | if (spi->cs_gpio >= 0) | ||
572 | gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); | ||
573 | } | ||
574 | |||
575 | static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, | 566 | static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, |
576 | int timeout_ms) | 567 | int timeout_ms) |
577 | { | 568 | { |
@@ -593,112 +584,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, | |||
593 | return RX_FIFO_LVL(status, sdd); | 584 | return RX_FIFO_LVL(status, sdd); |
594 | } | 585 | } |
595 | 586 | ||
596 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | 587 | static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, |
597 | struct spi_transfer *xfer, int dma_mode) | 588 | struct spi_transfer *xfer) |
598 | { | 589 | { |
599 | void __iomem *regs = sdd->regs; | 590 | void __iomem *regs = sdd->regs; |
600 | unsigned long val; | 591 | unsigned long val; |
592 | u32 status; | ||
601 | int ms; | 593 | int ms; |
602 | 594 | ||
603 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ | 595 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
604 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; | 596 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
605 | ms += 10; /* some tolerance */ | 597 | ms += 10; /* some tolerance */ |
606 | 598 | ||
607 | if (dma_mode) { | 599 | val = msecs_to_jiffies(ms) + 10; |
608 | val = msecs_to_jiffies(ms) + 10; | 600 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); |
609 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); | 601 | |
610 | } else { | 602 | /* |
611 | u32 status; | 603 | * If the previous xfer was completed within timeout, then |
612 | val = msecs_to_loops(ms); | 604 | * proceed further else return -EIO. |
613 | do { | 605 | * DmaTx returns after simply writing data in the FIFO, |
606 | * w/o waiting for real transmission on the bus to finish. | ||
607 | * DmaRx returns only after Dma read data from FIFO which | ||
608 | * needs bus transmission to finish, so we don't worry if | ||
609 | * Xfer involved Rx(with or without Tx). | ||
610 | */ | ||
611 | if (val && !xfer->rx_buf) { | ||
612 | val = msecs_to_loops(10); | ||
613 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
614 | while ((TX_FIFO_LVL(status, sdd) | ||
615 | || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) | ||
616 | && --val) { | ||
617 | cpu_relax(); | ||
614 | status = readl(regs + S3C64XX_SPI_STATUS); | 618 | status = readl(regs + S3C64XX_SPI_STATUS); |
615 | } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); | 619 | } |
620 | |||
616 | } | 621 | } |
617 | 622 | ||
618 | if (dma_mode) { | 623 | /* If timed out while checking rx/tx status return error */ |
619 | u32 status; | 624 | if (!val) |
620 | 625 | return -EIO; | |
621 | /* | ||
622 | * If the previous xfer was completed within timeout, then | ||
623 | * proceed further else return -EIO. | ||
624 | * DmaTx returns after simply writing data in the FIFO, | ||
625 | * w/o waiting for real transmission on the bus to finish. | ||
626 | * DmaRx returns only after Dma read data from FIFO which | ||
627 | * needs bus transmission to finish, so we don't worry if | ||
628 | * Xfer involved Rx(with or without Tx). | ||
629 | */ | ||
630 | if (val && !xfer->rx_buf) { | ||
631 | val = msecs_to_loops(10); | ||
632 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
633 | while ((TX_FIFO_LVL(status, sdd) | ||
634 | || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) | ||
635 | && --val) { | ||
636 | cpu_relax(); | ||
637 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
638 | } | ||
639 | 626 | ||
640 | } | 627 | return 0; |
628 | } | ||
641 | 629 | ||
642 | /* If timed out while checking rx/tx status return error */ | 630 | static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, |
643 | if (!val) | 631 | struct spi_transfer *xfer) |
644 | return -EIO; | 632 | { |
645 | } else { | 633 | void __iomem *regs = sdd->regs; |
646 | int loops; | 634 | unsigned long val; |
647 | u32 cpy_len; | 635 | u32 status; |
648 | u8 *buf; | 636 | int loops; |
649 | 637 | u32 cpy_len; | |
650 | /* If it was only Tx */ | 638 | u8 *buf; |
651 | if (!xfer->rx_buf) { | 639 | int ms; |
652 | sdd->state &= ~TXBUSY; | ||
653 | return 0; | ||
654 | } | ||
655 | 640 | ||
656 | /* | 641 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
657 | * If the receive length is bigger than the controller fifo | 642 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
658 | * size, calculate the loops and read the fifo as many times. | 643 | ms += 10; /* some tolerance */ |
659 | * loops = length / max fifo size (calculated by using the | ||
660 | * fifo mask). | ||
661 | * For any size less than the fifo size the below code is | ||
662 | * executed atleast once. | ||
663 | */ | ||
664 | loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); | ||
665 | buf = xfer->rx_buf; | ||
666 | do { | ||
667 | /* wait for data to be received in the fifo */ | ||
668 | cpy_len = s3c64xx_spi_wait_for_timeout(sdd, | ||
669 | (loops ? ms : 0)); | ||
670 | 644 | ||
671 | switch (sdd->cur_bpw) { | 645 | val = msecs_to_loops(ms); |
672 | case 32: | 646 | do { |
673 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, | 647 | status = readl(regs + S3C64XX_SPI_STATUS); |
674 | buf, cpy_len / 4); | 648 | } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); |
675 | break; | ||
676 | case 16: | ||
677 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
678 | buf, cpy_len / 2); | ||
679 | break; | ||
680 | default: | ||
681 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
682 | buf, cpy_len); | ||
683 | break; | ||
684 | } | ||
685 | 649 | ||
686 | buf = buf + cpy_len; | 650 | |
687 | } while (loops--); | 651 | /* If it was only Tx */ |
688 | sdd->state &= ~RXBUSY; | 652 | if (!xfer->rx_buf) { |
653 | sdd->state &= ~TXBUSY; | ||
654 | return 0; | ||
689 | } | 655 | } |
690 | 656 | ||
691 | return 0; | 657 | /* |
692 | } | 658 | * If the receive length is bigger than the controller fifo |
659 | * size, calculate the loops and read the fifo as many times. | ||
660 | * loops = length / max fifo size (calculated by using the | ||
661 | * fifo mask). | ||
662 | * For any size less than the fifo size the below code is | ||
663 | * executed atleast once. | ||
664 | */ | ||
665 | loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); | ||
666 | buf = xfer->rx_buf; | ||
667 | do { | ||
668 | /* wait for data to be received in the fifo */ | ||
669 | cpy_len = s3c64xx_spi_wait_for_timeout(sdd, | ||
670 | (loops ? ms : 0)); | ||
671 | |||
672 | switch (sdd->cur_bpw) { | ||
673 | case 32: | ||
674 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, | ||
675 | buf, cpy_len / 4); | ||
676 | break; | ||
677 | case 16: | ||
678 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
679 | buf, cpy_len / 2); | ||
680 | break; | ||
681 | default: | ||
682 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
683 | buf, cpy_len); | ||
684 | break; | ||
685 | } | ||
693 | 686 | ||
694 | static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | 687 | buf = buf + cpy_len; |
695 | struct spi_device *spi) | 688 | } while (loops--); |
696 | { | 689 | sdd->state &= ~RXBUSY; |
697 | if (sdd->tgl_spi == spi) | ||
698 | sdd->tgl_spi = NULL; | ||
699 | 690 | ||
700 | if (spi->cs_gpio >= 0) | 691 | return 0; |
701 | gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
702 | } | 692 | } |
703 | 693 | ||
704 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 694 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
@@ -929,7 +919,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, | |||
929 | 919 | ||
930 | spin_unlock_irqrestore(&sdd->lock, flags); | 920 | spin_unlock_irqrestore(&sdd->lock, flags); |
931 | 921 | ||
932 | status = wait_for_xfer(sdd, xfer, use_dma); | 922 | if (use_dma) |
923 | status = wait_for_dma(sdd, xfer); | ||
924 | else | ||
925 | status = wait_for_pio(sdd, xfer); | ||
933 | 926 | ||
934 | if (status) { | 927 | if (status) { |
935 | dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", | 928 | dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", |
@@ -1092,14 +1085,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1092 | 1085 | ||
1093 | pm_runtime_put(&sdd->pdev->dev); | 1086 | pm_runtime_put(&sdd->pdev->dev); |
1094 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); | 1087 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); |
1095 | disable_cs(sdd, spi); | ||
1096 | return 0; | 1088 | return 0; |
1097 | 1089 | ||
1098 | setup_exit: | 1090 | setup_exit: |
1099 | pm_runtime_put(&sdd->pdev->dev); | 1091 | pm_runtime_put(&sdd->pdev->dev); |
1100 | /* setup() returns with device de-selected */ | 1092 | /* setup() returns with device de-selected */ |
1101 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); | 1093 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); |
1102 | disable_cs(sdd, spi); | ||
1103 | 1094 | ||
1104 | gpio_free(cs->line); | 1095 | gpio_free(cs->line); |
1105 | spi_set_ctldata(spi, NULL); | 1096 | spi_set_ctldata(spi, NULL); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 23756b0f9036..f3fb1acf9ac1 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/dmaengine.h> | ||
27 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
28 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
29 | #include <linux/of_irq.h> | 31 | #include <linux/of_irq.h> |
@@ -580,6 +582,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable) | |||
580 | spi->master->set_cs(spi, !enable); | 582 | spi->master->set_cs(spi, !enable); |
581 | } | 583 | } |
582 | 584 | ||
585 | static int spi_map_buf(struct spi_master *master, struct device *dev, | ||
586 | struct sg_table *sgt, void *buf, size_t len, | ||
587 | enum dma_data_direction dir) | ||
588 | { | ||
589 | const bool vmalloced_buf = is_vmalloc_addr(buf); | ||
590 | const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; | ||
591 | const int sgs = DIV_ROUND_UP(len, desc_len); | ||
592 | struct page *vm_page; | ||
593 | void *sg_buf; | ||
594 | size_t min; | ||
595 | int i, ret; | ||
596 | |||
597 | ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); | ||
598 | if (ret != 0) | ||
599 | return ret; | ||
600 | |||
601 | for (i = 0; i < sgs; i++) { | ||
602 | min = min_t(size_t, len, desc_len); | ||
603 | |||
604 | if (vmalloced_buf) { | ||
605 | vm_page = vmalloc_to_page(buf); | ||
606 | if (!vm_page) { | ||
607 | sg_free_table(sgt); | ||
608 | return -ENOMEM; | ||
609 | } | ||
610 | sg_buf = page_address(vm_page) + | ||
611 | ((size_t)buf & ~PAGE_MASK); | ||
612 | } else { | ||
613 | sg_buf = buf; | ||
614 | } | ||
615 | |||
616 | sg_set_buf(&sgt->sgl[i], sg_buf, min); | ||
617 | |||
618 | buf += min; | ||
619 | len -= min; | ||
620 | } | ||
621 | |||
622 | ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); | ||
623 | if (ret < 0) { | ||
624 | sg_free_table(sgt); | ||
625 | return ret; | ||
626 | } | ||
627 | |||
628 | sgt->nents = ret; | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | static void spi_unmap_buf(struct spi_master *master, struct device *dev, | ||
634 | struct sg_table *sgt, enum dma_data_direction dir) | ||
635 | { | ||
636 | if (sgt->orig_nents) { | ||
637 | dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); | ||
638 | sg_free_table(sgt); | ||
639 | } | ||
640 | } | ||
641 | |||
642 | static int spi_map_msg(struct spi_master *master, struct spi_message *msg) | ||
643 | { | ||
644 | struct device *tx_dev, *rx_dev; | ||
645 | struct spi_transfer *xfer; | ||
646 | void *tmp; | ||
647 | unsigned int max_tx, max_rx; | ||
648 | int ret; | ||
649 | |||
650 | if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { | ||
651 | max_tx = 0; | ||
652 | max_rx = 0; | ||
653 | |||
654 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
655 | if ((master->flags & SPI_MASTER_MUST_TX) && | ||
656 | !xfer->tx_buf) | ||
657 | max_tx = max(xfer->len, max_tx); | ||
658 | if ((master->flags & SPI_MASTER_MUST_RX) && | ||
659 | !xfer->rx_buf) | ||
660 | max_rx = max(xfer->len, max_rx); | ||
661 | } | ||
662 | |||
663 | if (max_tx) { | ||
664 | tmp = krealloc(master->dummy_tx, max_tx, | ||
665 | GFP_KERNEL | GFP_DMA); | ||
666 | if (!tmp) | ||
667 | return -ENOMEM; | ||
668 | master->dummy_tx = tmp; | ||
669 | memset(tmp, 0, max_tx); | ||
670 | } | ||
671 | |||
672 | if (max_rx) { | ||
673 | tmp = krealloc(master->dummy_rx, max_rx, | ||
674 | GFP_KERNEL | GFP_DMA); | ||
675 | if (!tmp) | ||
676 | return -ENOMEM; | ||
677 | master->dummy_rx = tmp; | ||
678 | } | ||
679 | |||
680 | if (max_tx || max_rx) { | ||
681 | list_for_each_entry(xfer, &msg->transfers, | ||
682 | transfer_list) { | ||
683 | if (!xfer->tx_buf) | ||
684 | xfer->tx_buf = master->dummy_tx; | ||
685 | if (!xfer->rx_buf) | ||
686 | xfer->rx_buf = master->dummy_rx; | ||
687 | } | ||
688 | } | ||
689 | } | ||
690 | |||
691 | if (!master->can_dma) | ||
692 | return 0; | ||
693 | |||
694 | tx_dev = &master->dma_tx->dev->device; | ||
695 | rx_dev = &master->dma_rx->dev->device; | ||
696 | |||
697 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
698 | if (!master->can_dma(master, msg->spi, xfer)) | ||
699 | continue; | ||
700 | |||
701 | if (xfer->tx_buf != NULL) { | ||
702 | ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, | ||
703 | (void *)xfer->tx_buf, xfer->len, | ||
704 | DMA_TO_DEVICE); | ||
705 | if (ret != 0) | ||
706 | return ret; | ||
707 | } | ||
708 | |||
709 | if (xfer->rx_buf != NULL) { | ||
710 | ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, | ||
711 | xfer->rx_buf, xfer->len, | ||
712 | DMA_FROM_DEVICE); | ||
713 | if (ret != 0) { | ||
714 | spi_unmap_buf(master, tx_dev, &xfer->tx_sg, | ||
715 | DMA_TO_DEVICE); | ||
716 | return ret; | ||
717 | } | ||
718 | } | ||
719 | } | ||
720 | |||
721 | master->cur_msg_mapped = true; | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) | ||
727 | { | ||
728 | struct spi_transfer *xfer; | ||
729 | struct device *tx_dev, *rx_dev; | ||
730 | |||
731 | if (!master->cur_msg_mapped || !master->can_dma) | ||
732 | return 0; | ||
733 | |||
734 | tx_dev = &master->dma_tx->dev->device; | ||
735 | rx_dev = &master->dma_rx->dev->device; | ||
736 | |||
737 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
738 | if (!master->can_dma(master, msg->spi, xfer)) | ||
739 | continue; | ||
740 | |||
741 | spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); | ||
742 | spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); | ||
743 | } | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
583 | /* | 748 | /* |
584 | * spi_transfer_one_message - Default implementation of transfer_one_message() | 749 | * spi_transfer_one_message - Default implementation of transfer_one_message() |
585 | * | 750 | * |
@@ -686,6 +851,10 @@ static void spi_pump_messages(struct kthread_work *work) | |||
686 | } | 851 | } |
687 | master->busy = false; | 852 | master->busy = false; |
688 | spin_unlock_irqrestore(&master->queue_lock, flags); | 853 | spin_unlock_irqrestore(&master->queue_lock, flags); |
854 | kfree(master->dummy_rx); | ||
855 | master->dummy_rx = NULL; | ||
856 | kfree(master->dummy_tx); | ||
857 | master->dummy_tx = NULL; | ||
689 | if (master->unprepare_transfer_hardware && | 858 | if (master->unprepare_transfer_hardware && |
690 | master->unprepare_transfer_hardware(master)) | 859 | master->unprepare_transfer_hardware(master)) |
691 | dev_err(&master->dev, | 860 | dev_err(&master->dev, |
@@ -752,6 +921,13 @@ static void spi_pump_messages(struct kthread_work *work) | |||
752 | master->cur_msg_prepared = true; | 921 | master->cur_msg_prepared = true; |
753 | } | 922 | } |
754 | 923 | ||
924 | ret = spi_map_msg(master, master->cur_msg); | ||
925 | if (ret) { | ||
926 | master->cur_msg->status = ret; | ||
927 | spi_finalize_current_message(master); | ||
928 | return; | ||
929 | } | ||
930 | |||
755 | ret = master->transfer_one_message(master, master->cur_msg); | 931 | ret = master->transfer_one_message(master, master->cur_msg); |
756 | if (ret) { | 932 | if (ret) { |
757 | dev_err(&master->dev, | 933 | dev_err(&master->dev, |
@@ -841,6 +1017,8 @@ void spi_finalize_current_message(struct spi_master *master) | |||
841 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1017 | queue_kthread_work(&master->kworker, &master->pump_messages); |
842 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1018 | spin_unlock_irqrestore(&master->queue_lock, flags); |
843 | 1019 | ||
1020 | spi_unmap_msg(master, mesg); | ||
1021 | |||
844 | if (master->cur_msg_prepared && master->unprepare_message) { | 1022 | if (master->cur_msg_prepared && master->unprepare_message) { |
845 | ret = master->unprepare_message(master, mesg); | 1023 | ret = master->unprepare_message(master, mesg); |
846 | if (ret) { | 1024 | if (ret) { |
@@ -1374,6 +1552,8 @@ int spi_register_master(struct spi_master *master) | |||
1374 | mutex_init(&master->bus_lock_mutex); | 1552 | mutex_init(&master->bus_lock_mutex); |
1375 | master->bus_lock_flag = 0; | 1553 | master->bus_lock_flag = 0; |
1376 | init_completion(&master->xfer_completion); | 1554 | init_completion(&master->xfer_completion); |
1555 | if (!master->max_dma_len) | ||
1556 | master->max_dma_len = INT_MAX; | ||
1377 | 1557 | ||
1378 | /* register the device, then userspace will see it. | 1558 | /* register the device, then userspace will see it. |
1379 | * registration fails if the bus ID is in use. | 1559 | * registration fails if the bus ID is in use. |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a1d4ca290862..0c23c835d48b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/kthread.h> | 25 | #include <linux/kthread.h> |
26 | #include <linux/completion.h> | 26 | #include <linux/completion.h> |
27 | #include <linux/scatterlist.h> | ||
28 | |||
29 | struct dma_chan; | ||
27 | 30 | ||
28 | /* | 31 | /* |
29 | * INTERFACES between SPI master-side drivers and SPI infrastructure. | 32 | * INTERFACES between SPI master-side drivers and SPI infrastructure. |
@@ -266,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
266 | * @auto_runtime_pm: the core should ensure a runtime PM reference is held | 269 | * @auto_runtime_pm: the core should ensure a runtime PM reference is held |
267 | * while the hardware is prepared, using the parent | 270 | * while the hardware is prepared, using the parent |
268 | * device for the spidev | 271 | * device for the spidev |
272 | * @max_dma_len: Maximum length of a DMA transfer for the device. | ||
269 | * @prepare_transfer_hardware: a message will soon arrive from the queue | 273 | * @prepare_transfer_hardware: a message will soon arrive from the queue |
270 | * so the subsystem requests the driver to prepare the transfer hardware | 274 | * so the subsystem requests the driver to prepare the transfer hardware |
271 | * by issuing this call | 275 | * by issuing this call |
@@ -345,6 +349,8 @@ struct spi_master { | |||
345 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ | 349 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ |
346 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ | 350 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ |
347 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ | 351 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ |
352 | #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ | ||
353 | #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ | ||
348 | 354 | ||
349 | /* lock and mutex for SPI bus locking */ | 355 | /* lock and mutex for SPI bus locking */ |
350 | spinlock_t bus_lock_spinlock; | 356 | spinlock_t bus_lock_spinlock; |
@@ -387,6 +393,17 @@ struct spi_master { | |||
387 | void (*cleanup)(struct spi_device *spi); | 393 | void (*cleanup)(struct spi_device *spi); |
388 | 394 | ||
389 | /* | 395 | /* |
396 | * Used to enable core support for DMA handling, if can_dma() | ||
397 | * exists and returns true then the transfer will be mapped | ||
398 | * prior to transfer_one() being called. The driver should | ||
399 | * not modify or store xfer and dma_tx and dma_rx must be set | ||
400 | * while the device is prepared. | ||
401 | */ | ||
402 | bool (*can_dma)(struct spi_master *master, | ||
403 | struct spi_device *spi, | ||
404 | struct spi_transfer *xfer); | ||
405 | |||
406 | /* | ||
390 | * These hooks are for drivers that want to use the generic | 407 | * These hooks are for drivers that want to use the generic |
391 | * master transfer queueing mechanism. If these are used, the | 408 | * master transfer queueing mechanism. If these are used, the |
392 | * transfer() function above must NOT be specified by the driver. | 409 | * transfer() function above must NOT be specified by the driver. |
@@ -404,7 +421,9 @@ struct spi_master { | |||
404 | bool rt; | 421 | bool rt; |
405 | bool auto_runtime_pm; | 422 | bool auto_runtime_pm; |
406 | bool cur_msg_prepared; | 423 | bool cur_msg_prepared; |
424 | bool cur_msg_mapped; | ||
407 | struct completion xfer_completion; | 425 | struct completion xfer_completion; |
426 | size_t max_dma_len; | ||
408 | 427 | ||
409 | int (*prepare_transfer_hardware)(struct spi_master *master); | 428 | int (*prepare_transfer_hardware)(struct spi_master *master); |
410 | int (*transfer_one_message)(struct spi_master *master, | 429 | int (*transfer_one_message)(struct spi_master *master, |
@@ -425,6 +444,14 @@ struct spi_master { | |||
425 | 444 | ||
426 | /* gpio chip select */ | 445 | /* gpio chip select */ |
427 | int *cs_gpios; | 446 | int *cs_gpios; |
447 | |||
448 | /* DMA channels for use with core dmaengine helpers */ | ||
449 | struct dma_chan *dma_tx; | ||
450 | struct dma_chan *dma_rx; | ||
451 | |||
452 | /* dummy data for full duplex devices */ | ||
453 | void *dummy_rx; | ||
454 | void *dummy_tx; | ||
428 | }; | 455 | }; |
429 | 456 | ||
430 | static inline void *spi_master_get_devdata(struct spi_master *master) | 457 | static inline void *spi_master_get_devdata(struct spi_master *master) |
@@ -509,6 +536,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); | |||
509 | * (optionally) changing the chipselect status, then starting | 536 | * (optionally) changing the chipselect status, then starting |
510 | * the next transfer or completing this @spi_message. | 537 | * the next transfer or completing this @spi_message. |
511 | * @transfer_list: transfers are sequenced through @spi_message.transfers | 538 | * @transfer_list: transfers are sequenced through @spi_message.transfers |
539 | * @tx_sg: Scatterlist for transmit, currently not for client use | ||
540 | * @rx_sg: Scatterlist for receive, currently not for client use | ||
512 | * | 541 | * |
513 | * SPI transfers always write the same number of bytes as they read. | 542 | * SPI transfers always write the same number of bytes as they read. |
514 | * Protocol drivers should always provide @rx_buf and/or @tx_buf. | 543 | * Protocol drivers should always provide @rx_buf and/or @tx_buf. |
@@ -576,6 +605,8 @@ struct spi_transfer { | |||
576 | 605 | ||
577 | dma_addr_t tx_dma; | 606 | dma_addr_t tx_dma; |
578 | dma_addr_t rx_dma; | 607 | dma_addr_t rx_dma; |
608 | struct sg_table tx_sg; | ||
609 | struct sg_table rx_sg; | ||
579 | 610 | ||
580 | unsigned cs_change:1; | 611 | unsigned cs_change:1; |
581 | unsigned tx_nbits:3; | 612 | unsigned tx_nbits:3; |