diff options
Diffstat (limited to 'drivers/spi/spi-s3c64xx.c')
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 207 |
1 files changed, 99 insertions, 108 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index ae907dde1371..25c9bd409a87 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | |||
381 | #else | 381 | #else |
382 | 382 | ||
383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | 383 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, |
384 | unsigned len, dma_addr_t buf) | 384 | struct sg_table *sgt) |
385 | { | 385 | { |
386 | struct s3c64xx_spi_driver_data *sdd; | 386 | struct s3c64xx_spi_driver_data *sdd; |
387 | struct dma_slave_config config; | 387 | struct dma_slave_config config; |
@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
407 | dmaengine_slave_config(dma->ch, &config); | 407 | dmaengine_slave_config(dma->ch, &config); |
408 | } | 408 | } |
409 | 409 | ||
410 | desc = dmaengine_prep_slave_single(dma->ch, buf, len, | 410 | desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, |
411 | dma->direction, DMA_PREP_INTERRUPT); | 411 | dma->direction, DMA_PREP_INTERRUPT); |
412 | 412 | ||
413 | desc->callback = s3c64xx_spi_dmacb; | 413 | desc->callback = s3c64xx_spi_dmacb; |
414 | desc->callback_param = dma; | 414 | desc->callback_param = dma; |
@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
515 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; | 515 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; |
516 | if (dma_mode) { | 516 | if (dma_mode) { |
517 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; | 517 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; |
518 | #ifndef CONFIG_S3C_DMA | ||
519 | prepare_dma(&sdd->tx_dma, &xfer->tx_sg); | ||
520 | #else | ||
518 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); | 521 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); |
522 | #endif | ||
519 | } else { | 523 | } else { |
520 | switch (sdd->cur_bpw) { | 524 | switch (sdd->cur_bpw) { |
521 | case 32: | 525 | case 32: |
@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
547 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | 551 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) |
548 | | S3C64XX_SPI_PACKET_CNT_EN, | 552 | | S3C64XX_SPI_PACKET_CNT_EN, |
549 | regs + S3C64XX_SPI_PACKET_CNT); | 553 | regs + S3C64XX_SPI_PACKET_CNT); |
554 | #ifndef CONFIG_S3C_DMA | ||
555 | prepare_dma(&sdd->rx_dma, &xfer->rx_sg); | ||
556 | #else | ||
550 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); | 557 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); |
558 | #endif | ||
551 | } | 559 | } |
552 | } | 560 | } |
553 | 561 | ||
@@ -555,23 +563,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
555 | writel(chcfg, regs + S3C64XX_SPI_CH_CFG); | 563 | writel(chcfg, regs + S3C64XX_SPI_CH_CFG); |
556 | } | 564 | } |
557 | 565 | ||
558 | static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, | ||
559 | struct spi_device *spi) | ||
560 | { | ||
561 | if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ | ||
562 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ | ||
563 | /* Deselect the last toggled device */ | ||
564 | if (spi->cs_gpio >= 0) | ||
565 | gpio_set_value(spi->cs_gpio, | ||
566 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
567 | } | ||
568 | sdd->tgl_spi = NULL; | ||
569 | } | ||
570 | |||
571 | if (spi->cs_gpio >= 0) | ||
572 | gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); | ||
573 | } | ||
574 | |||
575 | static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, | 566 | static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, |
576 | int timeout_ms) | 567 | int timeout_ms) |
577 | { | 568 | { |
@@ -593,112 +584,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, | |||
593 | return RX_FIFO_LVL(status, sdd); | 584 | return RX_FIFO_LVL(status, sdd); |
594 | } | 585 | } |
595 | 586 | ||
596 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | 587 | static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, |
597 | struct spi_transfer *xfer, int dma_mode) | 588 | struct spi_transfer *xfer) |
598 | { | 589 | { |
599 | void __iomem *regs = sdd->regs; | 590 | void __iomem *regs = sdd->regs; |
600 | unsigned long val; | 591 | unsigned long val; |
592 | u32 status; | ||
601 | int ms; | 593 | int ms; |
602 | 594 | ||
603 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ | 595 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
604 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; | 596 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
605 | ms += 10; /* some tolerance */ | 597 | ms += 10; /* some tolerance */ |
606 | 598 | ||
607 | if (dma_mode) { | 599 | val = msecs_to_jiffies(ms) + 10; |
608 | val = msecs_to_jiffies(ms) + 10; | 600 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); |
609 | val = wait_for_completion_timeout(&sdd->xfer_completion, val); | 601 | |
610 | } else { | 602 | /* |
611 | u32 status; | 603 | * If the previous xfer was completed within timeout, then |
612 | val = msecs_to_loops(ms); | 604 | * proceed further else return -EIO. |
613 | do { | 605 | * DmaTx returns after simply writing data in the FIFO, |
606 | * w/o waiting for real transmission on the bus to finish. | ||
607 | * DmaRx returns only after Dma read data from FIFO which | ||
608 | * needs bus transmission to finish, so we don't worry if | ||
609 | * Xfer involved Rx(with or without Tx). | ||
610 | */ | ||
611 | if (val && !xfer->rx_buf) { | ||
612 | val = msecs_to_loops(10); | ||
613 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
614 | while ((TX_FIFO_LVL(status, sdd) | ||
615 | || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) | ||
616 | && --val) { | ||
617 | cpu_relax(); | ||
614 | status = readl(regs + S3C64XX_SPI_STATUS); | 618 | status = readl(regs + S3C64XX_SPI_STATUS); |
615 | } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); | 619 | } |
620 | |||
616 | } | 621 | } |
617 | 622 | ||
618 | if (dma_mode) { | 623 | /* If timed out while checking rx/tx status return error */ |
619 | u32 status; | 624 | if (!val) |
620 | 625 | return -EIO; | |
621 | /* | ||
622 | * If the previous xfer was completed within timeout, then | ||
623 | * proceed further else return -EIO. | ||
624 | * DmaTx returns after simply writing data in the FIFO, | ||
625 | * w/o waiting for real transmission on the bus to finish. | ||
626 | * DmaRx returns only after Dma read data from FIFO which | ||
627 | * needs bus transmission to finish, so we don't worry if | ||
628 | * Xfer involved Rx(with or without Tx). | ||
629 | */ | ||
630 | if (val && !xfer->rx_buf) { | ||
631 | val = msecs_to_loops(10); | ||
632 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
633 | while ((TX_FIFO_LVL(status, sdd) | ||
634 | || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) | ||
635 | && --val) { | ||
636 | cpu_relax(); | ||
637 | status = readl(regs + S3C64XX_SPI_STATUS); | ||
638 | } | ||
639 | 626 | ||
640 | } | 627 | return 0; |
628 | } | ||
641 | 629 | ||
642 | /* If timed out while checking rx/tx status return error */ | 630 | static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, |
643 | if (!val) | 631 | struct spi_transfer *xfer) |
644 | return -EIO; | 632 | { |
645 | } else { | 633 | void __iomem *regs = sdd->regs; |
646 | int loops; | 634 | unsigned long val; |
647 | u32 cpy_len; | 635 | u32 status; |
648 | u8 *buf; | 636 | int loops; |
649 | 637 | u32 cpy_len; | |
650 | /* If it was only Tx */ | 638 | u8 *buf; |
651 | if (!xfer->rx_buf) { | 639 | int ms; |
652 | sdd->state &= ~TXBUSY; | ||
653 | return 0; | ||
654 | } | ||
655 | 640 | ||
656 | /* | 641 | /* millisecs to xfer 'len' bytes @ 'cur_speed' */ |
657 | * If the receive length is bigger than the controller fifo | 642 | ms = xfer->len * 8 * 1000 / sdd->cur_speed; |
658 | * size, calculate the loops and read the fifo as many times. | 643 | ms += 10; /* some tolerance */ |
659 | * loops = length / max fifo size (calculated by using the | ||
660 | * fifo mask). | ||
661 | * For any size less than the fifo size the below code is | ||
662 | * executed atleast once. | ||
663 | */ | ||
664 | loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); | ||
665 | buf = xfer->rx_buf; | ||
666 | do { | ||
667 | /* wait for data to be received in the fifo */ | ||
668 | cpy_len = s3c64xx_spi_wait_for_timeout(sdd, | ||
669 | (loops ? ms : 0)); | ||
670 | 644 | ||
671 | switch (sdd->cur_bpw) { | 645 | val = msecs_to_loops(ms); |
672 | case 32: | 646 | do { |
673 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, | 647 | status = readl(regs + S3C64XX_SPI_STATUS); |
674 | buf, cpy_len / 4); | 648 | } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); |
675 | break; | ||
676 | case 16: | ||
677 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
678 | buf, cpy_len / 2); | ||
679 | break; | ||
680 | default: | ||
681 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
682 | buf, cpy_len); | ||
683 | break; | ||
684 | } | ||
685 | 649 | ||
686 | buf = buf + cpy_len; | 650 | |
687 | } while (loops--); | 651 | /* If it was only Tx */ |
688 | sdd->state &= ~RXBUSY; | 652 | if (!xfer->rx_buf) { |
653 | sdd->state &= ~TXBUSY; | ||
654 | return 0; | ||
689 | } | 655 | } |
690 | 656 | ||
691 | return 0; | 657 | /* |
692 | } | 658 | * If the receive length is bigger than the controller fifo |
659 | * size, calculate the loops and read the fifo as many times. | ||
660 | * loops = length / max fifo size (calculated by using the | ||
661 | * fifo mask). | ||
662 | * For any size less than the fifo size the below code is | ||
663 | * executed atleast once. | ||
664 | */ | ||
665 | loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); | ||
666 | buf = xfer->rx_buf; | ||
667 | do { | ||
668 | /* wait for data to be received in the fifo */ | ||
669 | cpy_len = s3c64xx_spi_wait_for_timeout(sdd, | ||
670 | (loops ? ms : 0)); | ||
671 | |||
672 | switch (sdd->cur_bpw) { | ||
673 | case 32: | ||
674 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, | ||
675 | buf, cpy_len / 4); | ||
676 | break; | ||
677 | case 16: | ||
678 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
679 | buf, cpy_len / 2); | ||
680 | break; | ||
681 | default: | ||
682 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
683 | buf, cpy_len); | ||
684 | break; | ||
685 | } | ||
693 | 686 | ||
694 | static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | 687 | buf = buf + cpy_len; |
695 | struct spi_device *spi) | 688 | } while (loops--); |
696 | { | 689 | sdd->state &= ~RXBUSY; |
697 | if (sdd->tgl_spi == spi) | ||
698 | sdd->tgl_spi = NULL; | ||
699 | 690 | ||
700 | if (spi->cs_gpio >= 0) | 691 | return 0; |
701 | gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
702 | } | 692 | } |
703 | 693 | ||
704 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 694 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
@@ -929,7 +919,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, | |||
929 | 919 | ||
930 | spin_unlock_irqrestore(&sdd->lock, flags); | 920 | spin_unlock_irqrestore(&sdd->lock, flags); |
931 | 921 | ||
932 | status = wait_for_xfer(sdd, xfer, use_dma); | 922 | if (use_dma) |
923 | status = wait_for_dma(sdd, xfer); | ||
924 | else | ||
925 | status = wait_for_pio(sdd, xfer); | ||
933 | 926 | ||
934 | if (status) { | 927 | if (status) { |
935 | dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", | 928 | dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", |
@@ -1092,14 +1085,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1092 | 1085 | ||
1093 | pm_runtime_put(&sdd->pdev->dev); | 1086 | pm_runtime_put(&sdd->pdev->dev); |
1094 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); | 1087 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); |
1095 | disable_cs(sdd, spi); | ||
1096 | return 0; | 1088 | return 0; |
1097 | 1089 | ||
1098 | setup_exit: | 1090 | setup_exit: |
1099 | pm_runtime_put(&sdd->pdev->dev); | 1091 | pm_runtime_put(&sdd->pdev->dev); |
1100 | /* setup() returns with device de-selected */ | 1092 | /* setup() returns with device de-selected */ |
1101 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); | 1093 | writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); |
1102 | disable_cs(sdd, spi); | ||
1103 | 1094 | ||
1104 | gpio_free(cs->line); | 1095 | gpio_free(cs->line); |
1105 | spi_set_ctldata(spi, NULL); | 1096 | spi_set_ctldata(spi, NULL); |