diff options
author | Mark Brown <broonie@linaro.org> | 2014-03-29 20:50:56 -0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-03-29 20:50:56 -0400 |
commit | a78389844ea95f18aadda79246587c707586ce89 (patch) | |
tree | 37d57897ff10f1a940736fc6f3fa94968edbab22 | |
parent | 5d0eb26ce83647f0f6ed2e681a77821c92acd014 (diff) | |
parent | 513273538a6c10dba1170ecdee5c2da15acecdb5 (diff) |
Merge remote-tracking branch 'spi/topic/dma' into spi-next
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 207 | ||||
-rw-r--r-- | drivers/spi/spi.c | 180 | ||||
-rw-r--r-- | include/linux/spi/spi.h | 31 |
3 files changed, 310 insertions, 108 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index ae907dde1371..25c9bd409a87 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | |||
381 | #else | 381 | #else |
382 | 382 | ||
383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | 383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, |
384 | unsigned len, dma_addr_t buf) | 384 | struct sg_table *sgt) |
385 | { | 385 | { |
386 | struct s3c64xx_spi_driver_data *sdd; | 386 | struct s3c64xx_spi_driver_data *sdd; |
387 | struct dma_slave_config config; | 387 | struct dma_slave_config config; |
@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
407 | dmaengine_slave_config(dma->ch, &config); | 407 | dmaengine_slave_config(dma->ch, &config); |
408 | } | 408 | } |
409 | 409 | ||
410 | desc = dmaengine_prep_slave_single(dma->ch, buf, len, | 410 | desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, |
411 | dma->direction, DMA_PREP_INTERRUPT); | 411 | dma->direction, DMA_PREP_INTERRUPT); |
412 | 412 | ||
413 | desc->callback = s3c64xx_spi_dmacb; | 413 | desc->callback = s3c64xx_spi_dmacb; |
414 | desc->callback_param = dma; | 414 | desc->callback_param = dma; |
@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
515 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; | 515 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; |
516 | if (dma_mode) { | 516 | if (dma_mode) { |
517 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; | 517 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; |
518 | #ifndef CONFIG_S3C_DMA | ||
519 | prepare_dma(&sdd->tx_dma, &xfer->tx_sg); | ||
520 | #else | ||
518 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); | 521 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); |
522 | #endif | ||
519 | } else { | 523 | } else { |
520 | switch (sdd->cur_bpw) { | 524 | switch (sdd->cur_bpw) { |
521 | case 32: | 525 | case 32: |
@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
547 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | 551 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) |
548 | | S3C64XX_SPI_PACKET_CNT_EN, | 552 | | S3C64XX_SPI_PACKET_CNT_EN, |
549 | regs + S3C64XX_SPI_PACKET_CNT); | 553 | regs + S3C64XX_SPI_PACKET_CNT); |
554 | #ifndef CONFIG_S3C_DMA | ||
555 | prepare_dma(&sdd->rx_dma, &xfer->rx_sg); | ||
556 | #else | ||
550 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); | 557 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); |
558 | #endif | ||
551 | } | 559 | } |
552 | } | 560 | } |
553 | 561 | ||
@@ -555,23 +563,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
555 | writel(chcfg, regs + S3C64XX_SPI_CH_CFG); | 563 | writel(chcfg, regs + S3C64XX_SPI_CH_CFG); |
556 | } | 564 | } |
557 | 565 | ||
558 | static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, | ||
559 | struct spi_device *spi) | ||
560 | { | ||
561 | if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ | ||
562 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ | ||
563 | /* Deselect the last toggled device */ | ||
564 | if (spi->cs_gpio >= 0) | ||
565 | gpio_set_value(spi->cs_gpio, | ||
566 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
567 | } | ||
568 | sdd->tgl_spi = NULL; | ||
569 | } | ||
570 | |||
571 | if (spi->cs_gpio >= 0) | ||
572 | gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); | ||
573 | } | ||
574 | |||
575 | static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, | 566 | static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, |
576 | int timeout_ms) | 567 | int timeout_ms) |
577 | { | 568 | { |
@@ -593,112 +584,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, | |||
593 | return RX_FIFO_LVL(status, sdd); | 584 | return RX_FIFO_LVL(status, sdd); |
594 | } | 585 | } |
595 | 586 | ||
596 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | 587 | static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, |
597 | struct spi_transfer *xfer, int dma_mode) | 588 | struct spi_transfer *xfer) |
598 | { | 589 | { |
599 | void __iomem *regs = sdd->regs; | 590 | void __iomem *regs = sdd->regs; |
600 | unsigned long val; | 591 | unsigned long val; |
592 | u32 status; | ||
601 | int ms; | 593 | int ms; |
602 | 594 | ||
603 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ | 595 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
604 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; | 596 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
605 | ms += 10; /* some tolerance */ | 597 | ms += 10; /* some tolerance */ |
606 | 598 | ||
607 | if (dma_mode) { | 599 | val = msecs_to_jiffies(ms) + 10; |
608 | val = msecs_to_jiffies(ms) + 10; | 600 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); |
609 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); | 601 | |
610 | } else { | 602 | /* |
611 | u32 status; | 603 | * If the previous xfer was completed within timeout, then |
612 | val = msecs_to_loops(ms); | 604 | * proceed further else return -EIO. |
613 | do { | 605 | * DmaTx returns after simply writing data in the FIFO, |
606 | * w/o waiting for real transmission on the bus to finish. | ||
607 | * DmaRx returns only after Dma read data from FIFO which | ||
608 | * needs bus transmission to finish, so we don't worry if | ||
609 | * Xfer involved Rx(with or without Tx). | ||
610 | */ | ||
611 | if (val && !xfer->rx_buf) { | ||
612 | val = msecs_to_loops(10); | ||
613 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
614 | while ((TX_FIFO_LVL(status, sdd) | ||
615 | || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) | ||
616 | && --val) { | ||
617 | cpu_relax(); | ||
614 | status = readl(regs + S3C64XX_SPI_STATUS); | 618 | status = readl(regs + S3C64XX_SPI_STATUS); |
615 | } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); | 619 | } |
620 | |||
616 | } | 621 | } |
617 | 622 | ||
618 | if (dma_mode) { | 623 | /* If timed out while checking rx/tx status return error */ |
619 | u32 status; | 624 | if (!val) |
620 | 625 | return -EIO; | |
621 | /* | ||
622 | * If the previous xfer was completed within timeout, then | ||
623 | * proceed further else return -EIO. | ||
624 | * DmaTx returns after simply writing data in the FIFO, | ||
625 | * w/o waiting for real transmission on the bus to finish. | ||
626 | * DmaRx returns only after Dma read data from FIFO which | ||
627 | * needs bus transmission to finish, so we don't worry if | ||
628 | * Xfer involved Rx(with or without Tx). | ||
629 | */ | ||
630 | if (val && !xfer->rx_buf) { | ||
631 | val = msecs_to_loops(10); | ||
632 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
633 | while ((TX_FIFO_LVL(status, sdd) | ||
634 | || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) | ||
635 | && --val) { | ||
636 | cpu_relax(); | ||
637 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
638 | } | ||
639 | 626 | ||
640 | } | 627 | return 0; |
628 | } | ||
641 | 629 | ||
642 | /* If timed out while checking rx/tx status return error */ | 630 | static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, |
643 | if (!val) | 631 | struct spi_transfer *xfer) |
644 | return -EIO; | 632 | { |
645 | } else { | 633 | void __iomem *regs = sdd->regs; |
646 | int loops; | 634 | unsigned long val; |
647 | u32 cpy_len; | 635 | u32 status; |
648 | u8 *buf; | 636 | int loops; |
649 | 637 | u32 cpy_len; | |
650 | /* If it was only Tx */ | 638 | u8 *buf; |
651 | if (!xfer->rx_buf) { | 639 | int ms; |
652 | sdd->state &= ~TXBUSY; | ||
653 | return 0; | ||
654 | } | ||
655 | 640 | ||
656 | /* | 641 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
657 | * If the receive length is bigger than the controller fifo | 642 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
658 | * size, calculate the loops and read the fifo as many times. | 643 | ms += 10; /* some tolerance */ |
659 | * loops = length / max fifo size (calculated by using the | ||
660 | * fifo mask). | ||
661 | * For any size less than the fifo size the below code is | ||
662 | * executed atleast once. | ||
663 | */ | ||
664 | loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); | ||
665 | buf = xfer->rx_buf; | ||
666 | do { | ||
667 | /* wait for data to be received in the fifo */ | ||
668 | cpy_len = s3c64xx_spi_wait_for_timeout(sdd, | ||
669 | (loops ? ms : 0)); | ||
670 | 644 | ||
671 | switch (sdd->cur_bpw) { | 645 | val = msecs_to_loops(ms); |
672 | case 32: | 646 | do { |
673 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, | 647 | status = readl(regs + S3C64XX_SPI_STATUS); |
674 | buf, cpy_len / 4); | 648 | } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); |
675 | break; | ||
676 | case 16: | ||
677 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
678 | buf, cpy_len / 2); | ||
679 | break; | ||
680 | default: | ||
681 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
682 | buf, cpy_len); | ||
683 | break; | ||
684 | } | ||
685 | 649 | ||
686 | buf = buf + cpy_len; | 650 | |
687 | } while (loops--); | 651 | /* If it was only Tx */ |
688 | sdd->state &= ~RXBUSY; | 652 | if (!xfer->rx_buf) { |
653 | sdd->state &= ~TXBUSY; | ||
654 | return 0; | ||
689 | } | 655 | } |
690 | 656 | ||
691 | return 0; | 657 | /* |
692 | } | 658 | * If the receive length is bigger than the controller fifo |
659 | * size, calculate the loops and read the fifo as many times. | ||
660 | * loops = length / max fifo size (calculated by using the | ||
661 | * fifo mask). | ||
662 | * For any size less than the fifo size the below code is | ||
663 | * executed atleast once. | ||
664 | */ | ||
665 | loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); | ||
666 | buf = xfer->rx_buf; | ||
667 | do { | ||
668 | /* wait for data to be received in the fifo */ | ||
669 | cpy_len = s3c64xx_spi_wait_for_timeout(sdd, | ||
670 | (loops ? ms : 0)); | ||
671 | |||
672 | switch (sdd->cur_bpw) { | ||
673 | case 32: | ||
674 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, | ||
675 | buf, cpy_len / 4); | ||
676 | break; | ||
677 | case 16: | ||
678 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
679 | buf, cpy_len / 2); | ||
680 | break; | ||
681 | default: | ||
682 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
683 | buf, cpy_len); | ||
684 | break; | ||
685 | } | ||
693 | 686 | ||
694 | static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | 687 | buf = buf + cpy_len; |
695 | struct spi_device *spi) | 688 | } while (loops--); |
696 | { | 689 | sdd->state &= ~RXBUSY; |
697 | if (sdd->tgl_spi == spi) | ||
698 | sdd->tgl_spi = NULL; | ||
699 | 690 | ||
700 | if (spi->cs_gpio >= 0) | 691 | return 0; |
701 | gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
702 | } | 692 | } |
703 | 693 | ||
704 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 694 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
@@ -929,7 +919,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, | |||
929 | 919 | ||
930 | spin_unlock_irqrestore(&sdd->lock, flags); | 920 | spin_unlock_irqrestore(&sdd->lock, flags); |
931 | 921 | ||
932 | status = wait_for_xfer(sdd, xfer, use_dma); | 922 | if (use_dma) |
923 | status = wait_for_dma(sdd, xfer); | ||
924 | else | ||
925 | status = wait_for_pio(sdd, xfer); | ||
933 | 926 | ||
934 | if (status) { | 927 | if (status) { |
935 | dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", | 928 | dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", |
@@ -1092,14 +1085,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1092 | 1085 | ||
1093 | pm_runtime_put(&sdd->pdev->dev); | 1086 | pm_runtime_put(&sdd->pdev->dev); |
1094 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); | 1087 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); |
1095 | disable_cs(sdd, spi); | ||
1096 | return 0; | 1088 | return 0; |
1097 | 1089 | ||
1098 | setup_exit: | 1090 | setup_exit: |
1099 | pm_runtime_put(&sdd->pdev->dev); | 1091 | pm_runtime_put(&sdd->pdev->dev); |
1100 | /* setup() returns with device de-selected */ | 1092 | /* setup() returns with device de-selected */ |
1101 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); | 1093 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); |
1102 | disable_cs(sdd, spi); | ||
1103 | 1094 | ||
1104 | gpio_free(cs->line); | 1095 | gpio_free(cs->line); |
1105 | spi_set_ctldata(spi, NULL); | 1096 | spi_set_ctldata(spi, NULL); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0a8f399c907f..32e4603d5fc8 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/dmaengine.h> | ||
27 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
28 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
29 | #include <linux/of_irq.h> | 31 | #include <linux/of_irq.h> |
@@ -578,6 +580,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable) | |||
578 | spi->master->set_cs(spi, !enable); | 580 | spi->master->set_cs(spi, !enable); |
579 | } | 581 | } |
580 | 582 | ||
583 | static int spi_map_buf(struct spi_master *master, struct device *dev, | ||
584 | struct sg_table *sgt, void *buf, size_t len, | ||
585 | enum dma_data_direction dir) | ||
586 | { | ||
587 | const bool vmalloced_buf = is_vmalloc_addr(buf); | ||
588 | const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; | ||
589 | const int sgs = DIV_ROUND_UP(len, desc_len); | ||
590 | struct page *vm_page; | ||
591 | void *sg_buf; | ||
592 | size_t min; | ||
593 | int i, ret; | ||
594 | |||
595 | ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); | ||
596 | if (ret != 0) | ||
597 | return ret; | ||
598 | |||
599 | for (i = 0; i < sgs; i++) { | ||
600 | min = min_t(size_t, len, desc_len); | ||
601 | |||
602 | if (vmalloced_buf) { | ||
603 | vm_page = vmalloc_to_page(buf); | ||
604 | if (!vm_page) { | ||
605 | sg_free_table(sgt); | ||
606 | return -ENOMEM; | ||
607 | } | ||
608 | sg_buf = page_address(vm_page) + | ||
609 | ((size_t)buf & ~PAGE_MASK); | ||
610 | } else { | ||
611 | sg_buf = buf; | ||
612 | } | ||
613 | |||
614 | sg_set_buf(&sgt->sgl[i], sg_buf, min); | ||
615 | |||
616 | buf += min; | ||
617 | len -= min; | ||
618 | } | ||
619 | |||
620 | ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); | ||
621 | if (ret < 0) { | ||
622 | sg_free_table(sgt); | ||
623 | return ret; | ||
624 | } | ||
625 | |||
626 | sgt->nents = ret; | ||
627 | |||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | static void spi_unmap_buf(struct spi_master *master, struct device *dev, | ||
632 | struct sg_table *sgt, enum dma_data_direction dir) | ||
633 | { | ||
634 | if (sgt->orig_nents) { | ||
635 | dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); | ||
636 | sg_free_table(sgt); | ||
637 | } | ||
638 | } | ||
639 | |||
640 | static int spi_map_msg(struct spi_master *master, struct spi_message *msg) | ||
641 | { | ||
642 | struct device *tx_dev, *rx_dev; | ||
643 | struct spi_transfer *xfer; | ||
644 | void *tmp; | ||
645 | unsigned int max_tx, max_rx; | ||
646 | int ret; | ||
647 | |||
648 | if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { | ||
649 | max_tx = 0; | ||
650 | max_rx = 0; | ||
651 | |||
652 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
653 | if ((master->flags & SPI_MASTER_MUST_TX) && | ||
654 | !xfer->tx_buf) | ||
655 | max_tx = max(xfer->len, max_tx); | ||
656 | if ((master->flags & SPI_MASTER_MUST_RX) && | ||
657 | !xfer->rx_buf) | ||
658 | max_rx = max(xfer->len, max_rx); | ||
659 | } | ||
660 | |||
661 | if (max_tx) { | ||
662 | tmp = krealloc(master->dummy_tx, max_tx, | ||
663 | GFP_KERNEL | GFP_DMA); | ||
664 | if (!tmp) | ||
665 | return -ENOMEM; | ||
666 | master->dummy_tx = tmp; | ||
667 | memset(tmp, 0, max_tx); | ||
668 | } | ||
669 | |||
670 | if (max_rx) { | ||
671 | tmp = krealloc(master->dummy_rx, max_rx, | ||
672 | GFP_KERNEL | GFP_DMA); | ||
673 | if (!tmp) | ||
674 | return -ENOMEM; | ||
675 | master->dummy_rx = tmp; | ||
676 | } | ||
677 | |||
678 | if (max_tx || max_rx) { | ||
679 | list_for_each_entry(xfer, &msg->transfers, | ||
680 | transfer_list) { | ||
681 | if (!xfer->tx_buf) | ||
682 | xfer->tx_buf = master->dummy_tx; | ||
683 | if (!xfer->rx_buf) | ||
684 | xfer->rx_buf = master->dummy_rx; | ||
685 | } | ||
686 | } | ||
687 | } | ||
688 | |||
689 | if (!master->can_dma) | ||
690 | return 0; | ||
691 | |||
692 | tx_dev = &master->dma_tx->dev->device; | ||
693 | rx_dev = &master->dma_rx->dev->device; | ||
694 | |||
695 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
696 | if (!master->can_dma(master, msg->spi, xfer)) | ||
697 | continue; | ||
698 | |||
699 | if (xfer->tx_buf != NULL) { | ||
700 | ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, | ||
701 | (void *)xfer->tx_buf, xfer->len, | ||
702 | DMA_TO_DEVICE); | ||
703 | if (ret != 0) | ||
704 | return ret; | ||
705 | } | ||
706 | |||
707 | if (xfer->rx_buf != NULL) { | ||
708 | ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, | ||
709 | xfer->rx_buf, xfer->len, | ||
710 | DMA_FROM_DEVICE); | ||
711 | if (ret != 0) { | ||
712 | spi_unmap_buf(master, tx_dev, &xfer->tx_sg, | ||
713 | DMA_TO_DEVICE); | ||
714 | return ret; | ||
715 | } | ||
716 | } | ||
717 | } | ||
718 | |||
719 | master->cur_msg_mapped = true; | ||
720 | |||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) | ||
725 | { | ||
726 | struct spi_transfer *xfer; | ||
727 | struct device *tx_dev, *rx_dev; | ||
728 | |||
729 | if (!master->cur_msg_mapped || !master->can_dma) | ||
730 | return 0; | ||
731 | |||
732 | tx_dev = &master->dma_tx->dev->device; | ||
733 | rx_dev = &master->dma_rx->dev->device; | ||
734 | |||
735 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
736 | if (!master->can_dma(master, msg->spi, xfer)) | ||
737 | continue; | ||
738 | |||
739 | spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); | ||
740 | spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); | ||
741 | } | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
581 | /* | 746 | /* |
582 | * spi_transfer_one_message - Default implementation of transfer_one_message() | 747 | * spi_transfer_one_message - Default implementation of transfer_one_message() |
583 | * | 748 | * |
@@ -684,6 +849,10 @@ static void spi_pump_messages(struct kthread_work *work) | |||
684 | } | 849 | } |
685 | master->busy = false; | 850 | master->busy = false; |
686 | spin_unlock_irqrestore(&master->queue_lock, flags); | 851 | spin_unlock_irqrestore(&master->queue_lock, flags); |
852 | kfree(master->dummy_rx); | ||
853 | master->dummy_rx = NULL; | ||
854 | kfree(master->dummy_tx); | ||
855 | master->dummy_tx = NULL; | ||
687 | if (master->unprepare_transfer_hardware && | 856 | if (master->unprepare_transfer_hardware && |
688 | master->unprepare_transfer_hardware(master)) | 857 | master->unprepare_transfer_hardware(master)) |
689 | dev_err(&master->dev, | 858 | dev_err(&master->dev, |
@@ -750,6 +919,13 @@ static void spi_pump_messages(struct kthread_work *work) | |||
750 | master->cur_msg_prepared = true; | 919 | master->cur_msg_prepared = true; |
751 | } | 920 | } |
752 | 921 | ||
922 | ret = spi_map_msg(master, master->cur_msg); | ||
923 | if (ret) { | ||
924 | master->cur_msg->status = ret; | ||
925 | spi_finalize_current_message(master); | ||
926 | return; | ||
927 | } | ||
928 | |||
753 | ret = master->transfer_one_message(master, master->cur_msg); | 929 | ret = master->transfer_one_message(master, master->cur_msg); |
754 | if (ret) { | 930 | if (ret) { |
755 | dev_err(&master->dev, | 931 | dev_err(&master->dev, |
@@ -837,6 +1013,8 @@ void spi_finalize_current_message(struct spi_master *master) | |||
837 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1013 | queue_kthread_work(&master->kworker, &master->pump_messages); |
838 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1014 | spin_unlock_irqrestore(&master->queue_lock, flags); |
839 | 1015 | ||
1016 | spi_unmap_msg(master, mesg); | ||
1017 | |||
840 | if (master->cur_msg_prepared && master->unprepare_message) { | 1018 | if (master->cur_msg_prepared && master->unprepare_message) { |
841 | ret = master->unprepare_message(master, mesg); | 1019 | ret = master->unprepare_message(master, mesg); |
842 | if (ret) { | 1020 | if (ret) { |
@@ -1370,6 +1548,8 @@ int spi_register_master(struct spi_master *master) | |||
1370 | mutex_init(&master->bus_lock_mutex); | 1548 | mutex_init(&master->bus_lock_mutex); |
1371 | master->bus_lock_flag = 0; | 1549 | master->bus_lock_flag = 0; |
1372 | init_completion(&master->xfer_completion); | 1550 | init_completion(&master->xfer_completion); |
1551 | if (!master->max_dma_len) | ||
1552 | master->max_dma_len = INT_MAX; | ||
1373 | 1553 | ||
1374 | /* register the device, then userspace will see it. | 1554 | /* register the device, then userspace will see it. |
1375 | * registration fails if the bus ID is in use. | 1555 | * registration fails if the bus ID is in use. |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4203c66d8803..36c86ef51ff3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/kthread.h> | 25 | #include <linux/kthread.h> |
26 | #include <linux/completion.h> | 26 | #include <linux/completion.h> |
27 | #include <linux/scatterlist.h> | ||
28 | |||
29 | struct dma_chan; | ||
27 | 30 | ||
28 | /* | 31 | /* |
29 | * INTERFACES between SPI master-side drivers and SPI infrastructure. | 32 | * INTERFACES between SPI master-side drivers and SPI infrastructure. |
@@ -266,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
266 | * @auto_runtime_pm: the core should ensure a runtime PM reference is held | 269 | * @auto_runtime_pm: the core should ensure a runtime PM reference is held |
267 | * while the hardware is prepared, using the parent | 270 | * while the hardware is prepared, using the parent |
268 | * device for the spidev | 271 | * device for the spidev |
272 | * @max_dma_len: Maximum length of a DMA transfer for the device. | ||
269 | * @prepare_transfer_hardware: a message will soon arrive from the queue | 273 | * @prepare_transfer_hardware: a message will soon arrive from the queue |
270 | * so the subsystem requests the driver to prepare the transfer hardware | 274 | * so the subsystem requests the driver to prepare the transfer hardware |
271 | * by issuing this call | 275 | * by issuing this call |
@@ -348,6 +352,8 @@ struct spi_master { | |||
348 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ | 352 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ |
349 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ | 353 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ |
350 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ | 354 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ |
355 | #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ | ||
356 | #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ | ||
351 | 357 | ||
352 | /* lock and mutex for SPI bus locking */ | 358 | /* lock and mutex for SPI bus locking */ |
353 | spinlock_t bus_lock_spinlock; | 359 | spinlock_t bus_lock_spinlock; |
@@ -390,6 +396,17 @@ struct spi_master { | |||
390 | void (*cleanup)(struct spi_device *spi); | 396 | void (*cleanup)(struct spi_device *spi); |
391 | 397 | ||
392 | /* | 398 | /* |
399 | * Used to enable core support for DMA handling, if can_dma() | ||
400 | * exists and returns true then the transfer will be mapped | ||
401 | * prior to transfer_one() being called. The driver should | ||
402 | * not modify or store xfer and dma_tx and dma_rx must be set | ||
403 | * while the device is prepared. | ||
404 | */ | ||
405 | bool (*can_dma)(struct spi_master *master, | ||
406 | struct spi_device *spi, | ||
407 | struct spi_transfer *xfer); | ||
408 | |||
409 | /* | ||
393 | * These hooks are for drivers that want to use the generic | 410 | * These hooks are for drivers that want to use the generic |
394 | * master transfer queueing mechanism. If these are used, the | 411 | * master transfer queueing mechanism. If these are used, the |
395 | * transfer() function above must NOT be specified by the driver. | 412 | * transfer() function above must NOT be specified by the driver. |
@@ -407,7 +424,9 @@ struct spi_master { | |||
407 | bool rt; | 424 | bool rt; |
408 | bool auto_runtime_pm; | 425 | bool auto_runtime_pm; |
409 | bool cur_msg_prepared; | 426 | bool cur_msg_prepared; |
427 | bool cur_msg_mapped; | ||
410 | struct completion xfer_completion; | 428 | struct completion xfer_completion; |
429 | size_t max_dma_len; | ||
411 | 430 | ||
412 | int (*prepare_transfer_hardware)(struct spi_master *master); | 431 | int (*prepare_transfer_hardware)(struct spi_master *master); |
413 | int (*transfer_one_message)(struct spi_master *master, | 432 | int (*transfer_one_message)(struct spi_master *master, |
@@ -428,6 +447,14 @@ struct spi_master { | |||
428 | 447 | ||
429 | /* gpio chip select */ | 448 | /* gpio chip select */ |
430 | int *cs_gpios; | 449 | int *cs_gpios; |
450 | |||
451 | /* DMA channels for use with core dmaengine helpers */ | ||
452 | struct dma_chan *dma_tx; | ||
453 | struct dma_chan *dma_rx; | ||
454 | |||
455 | /* dummy data for full duplex devices */ | ||
456 | void *dummy_rx; | ||
457 | void *dummy_tx; | ||
431 | }; | 458 | }; |
432 | 459 | ||
433 | static inline void *spi_master_get_devdata(struct spi_master *master) | 460 | static inline void *spi_master_get_devdata(struct spi_master *master) |
@@ -512,6 +539,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); | |||
512 | * (optionally) changing the chipselect status, then starting | 539 | * (optionally) changing the chipselect status, then starting |
513 | * the next transfer or completing this @spi_message. | 540 | * the next transfer or completing this @spi_message. |
514 | * @transfer_list: transfers are sequenced through @spi_message.transfers | 541 | * @transfer_list: transfers are sequenced through @spi_message.transfers |
542 | * @tx_sg: Scatterlist for transmit, currently not for client use | ||
543 | * @rx_sg: Scatterlist for receive, currently not for client use | ||
515 | * | 544 | * |
516 | * SPI transfers always write the same number of bytes as they read. | 545 | * SPI transfers always write the same number of bytes as they read. |
517 | * Protocol drivers should always provide @rx_buf and/or @tx_buf. | 546 | * Protocol drivers should always provide @rx_buf and/or @tx_buf. |
@@ -579,6 +608,8 @@ struct spi_transfer { | |||
579 | 608 | ||
580 | dma_addr_t tx_dma; | 609 | dma_addr_t tx_dma; |
581 | dma_addr_t rx_dma; | 610 | dma_addr_t rx_dma; |
611 | struct sg_table tx_sg; | ||
612 | struct sg_table rx_sg; | ||
582 | 613 | ||
583 | unsigned cs_change:1; | 614 | unsigned cs_change:1; |
584 | unsigned tx_nbits:3; | 615 | unsigned tx_nbits:3; |