aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGirish K S <girishks2000@gmail.com>2013-05-20 02:51:32 -0400
committerMark Brown <broonie@linaro.org>2013-06-19 14:07:42 -0400
commit7e9955567eadb83642be55e6ae5853412dd48d14 (patch)
treedc4077a1d01bd7c87d52892bb7a99b91862f75d9
parent7d132055814ef17a6c7b69f342244c410a5e000f (diff)
spi: s3c64xx: added support for polling mode
The 64xx spi driver supports partial polling mode. Only the last chunk of the transfer length is transferred or recieved in polling mode. Some SoC's that adopt this controller might not have have dma interface. This patch adds support for complete polling mode and gives flexibity for the user to select poll/dma mode. Signed-off-by: Girish K S <ks.giri@samsung.com> Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r--drivers/spi/spi-s3c64xx.c153
1 files changed, 104 insertions, 49 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5000586cb98d..0a806924b906 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -39,6 +39,7 @@
39#endif 39#endif
40 40
41#define MAX_SPI_PORTS 3 41#define MAX_SPI_PORTS 3
42#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
42 43
43/* Registers and bit-fields */ 44/* Registers and bit-fields */
44 45
@@ -130,6 +131,7 @@
130#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT 131#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
131 132
132#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 133#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
134#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
133 135
134#define RXBUSY (1<<2) 136#define RXBUSY (1<<2)
135#define TXBUSY (1<<3) 137#define TXBUSY (1<<3)
@@ -158,6 +160,7 @@ struct s3c64xx_spi_port_config {
158 int fifo_lvl_mask[MAX_SPI_PORTS]; 160 int fifo_lvl_mask[MAX_SPI_PORTS];
159 int rx_lvl_offset; 161 int rx_lvl_offset;
160 int tx_st_done; 162 int tx_st_done;
163 int quirks;
161 bool high_speed; 164 bool high_speed;
162 bool clk_from_cmu; 165 bool clk_from_cmu;
163}; 166};
@@ -344,8 +347,12 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
344{ 347{
345 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 348 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
346 349
347 /* Acquire DMA channels */ 350 /*
348 while (!acquire_dma(sdd)) 351 * If DMA resource was not available during
352 * probe, no need to continue with dma requests
353 * else Acquire DMA channels
354 */
355 while (!is_polling(sdd) && !acquire_dma(sdd))
349 usleep_range(10000, 11000); 356 usleep_range(10000, 11000);
350 357
351 pm_runtime_get_sync(&sdd->pdev->dev); 358 pm_runtime_get_sync(&sdd->pdev->dev);
@@ -358,9 +365,12 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
358 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 365 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
359 366
360 /* Free DMA channels */ 367 /* Free DMA channels */
361 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client); 368 if (!is_polling(sdd)) {
362 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client); 369 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
363 370 &s3c64xx_spi_dma_client);
371 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
372 &s3c64xx_spi_dma_client);
373 }
364 pm_runtime_put(&sdd->pdev->dev); 374 pm_runtime_put(&sdd->pdev->dev);
365 375
366 return 0; 376 return 0;
@@ -464,8 +474,10 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
464 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 474 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
465 475
466 /* Free DMA channels */ 476 /* Free DMA channels */
467 dma_release_channel(sdd->rx_dma.ch); 477 if (!is_polling(sdd)) {
468 dma_release_channel(sdd->tx_dma.ch); 478 dma_release_channel(sdd->rx_dma.ch);
479 dma_release_channel(sdd->tx_dma.ch);
480 }
469 481
470 pm_runtime_put(&sdd->pdev->dev); 482 pm_runtime_put(&sdd->pdev->dev);
471 return 0; 483 return 0;
@@ -566,6 +578,30 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
566 578
567 cs = spi->controller_data; 579 cs = spi->controller_data;
568 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); 580 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
581
582 /* Start the signals */
583 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
584}
585
586static u32 wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
587 int timeout_ms)
588{
589 void __iomem *regs = sdd->regs;
590 unsigned long val = 1;
591 u32 status;
592
593 /* max fifo depth available */
594 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
595
596 if (timeout_ms)
597 val = msecs_to_loops(timeout_ms);
598
599 do {
600 status = readl(regs + S3C64XX_SPI_STATUS);
601 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
602
603 /* return the actual received data length */
604 return RX_FIFO_LVL(status, sdd);
569} 605}
570 606
571static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 607static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
@@ -590,20 +626,19 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
590 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 626 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
591 } 627 }
592 628
593 if (!val)
594 return -EIO;
595
596 if (dma_mode) { 629 if (dma_mode) {
597 u32 status; 630 u32 status;
598 631
599 /* 632 /*
633 * If the previous xfer was completed within timeout, then
634 * proceed further else return -EIO.
600 * DmaTx returns after simply writing data in the FIFO, 635 * DmaTx returns after simply writing data in the FIFO,
601 * w/o waiting for real transmission on the bus to finish. 636 * w/o waiting for real transmission on the bus to finish.
602 * DmaRx returns only after Dma read data from FIFO which 637 * DmaRx returns only after Dma read data from FIFO which
603 * needs bus transmission to finish, so we don't worry if 638 * needs bus transmission to finish, so we don't worry if
604 * Xfer involved Rx(with or without Tx). 639 * Xfer involved Rx(with or without Tx).
605 */ 640 */
606 if (xfer->rx_buf == NULL) { 641 if (val && !xfer->rx_buf) {
607 val = msecs_to_loops(10); 642 val = msecs_to_loops(10);
608 status = readl(regs + S3C64XX_SPI_STATUS); 643 status = readl(regs + S3C64XX_SPI_STATUS);
609 while ((TX_FIFO_LVL(status, sdd) 644 while ((TX_FIFO_LVL(status, sdd)
@@ -613,30 +648,53 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
613 status = readl(regs + S3C64XX_SPI_STATUS); 648 status = readl(regs + S3C64XX_SPI_STATUS);
614 } 649 }
615 650
616 if (!val)
617 return -EIO;
618 } 651 }
652
653 /* If timed out while checking rx/tx status return error */
654 if (!val)
655 return -EIO;
619 } else { 656 } else {
657 int loops;
658 u32 cpy_len;
659 u8 *buf;
660
620 /* If it was only Tx */ 661 /* If it was only Tx */
621 if (xfer->rx_buf == NULL) { 662 if (!xfer->rx_buf) {
622 sdd->state &= ~TXBUSY; 663 sdd->state &= ~TXBUSY;
623 return 0; 664 return 0;
624 } 665 }
625 666
626 switch (sdd->cur_bpw) { 667 /*
627 case 32: 668 * If the receive length is bigger than the controller fifo
628 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 669 * size, calculate the loops and read the fifo as many times.
629 xfer->rx_buf, xfer->len / 4); 670 * loops = length / max fifo size (calculated by using the
630 break; 671 * fifo mask).
631 case 16: 672 * For any size less than the fifo size the below code is
632 ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 673 * executed atleast once.
633 xfer->rx_buf, xfer->len / 2); 674 */
634 break; 675 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
635 default: 676 buf = xfer->rx_buf;
636 ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 677 do {
637 xfer->rx_buf, xfer->len); 678 /* wait for data to be received in the fifo */
638 break; 679 cpy_len = wait_for_timeout(sdd, (loops ? ms : 0));
639 } 680
681 switch (sdd->cur_bpw) {
682 case 32:
683 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
684 buf, cpy_len / 4);
685 break;
686 case 16:
687 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
688 buf, cpy_len / 2);
689 break;
690 default:
691 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
692 buf, cpy_len);
693 break;
694 }
695
696 buf = buf + cpy_len;
697 } while (loops--);
640 sdd->state &= ~RXBUSY; 698 sdd->state &= ~RXBUSY;
641 } 699 }
642 700
@@ -652,6 +710,9 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
652 sdd->tgl_spi = NULL; 710 sdd->tgl_spi = NULL;
653 711
654 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); 712 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
713
714 /* Quiese the signals */
715 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
655} 716}
656 717
657static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 718static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -733,7 +794,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
733 struct device *dev = &sdd->pdev->dev; 794 struct device *dev = &sdd->pdev->dev;
734 struct spi_transfer *xfer; 795 struct spi_transfer *xfer;
735 796
736 if (msg->is_dma_mapped) 797 if (is_polling(sdd) || msg->is_dma_mapped)
737 return 0; 798 return 0;
738 799
739 /* First mark all xfer unmapped */ 800 /* First mark all xfer unmapped */
@@ -782,7 +843,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
782 struct device *dev = &sdd->pdev->dev; 843 struct device *dev = &sdd->pdev->dev;
783 struct spi_transfer *xfer; 844 struct spi_transfer *xfer;
784 845
785 if (msg->is_dma_mapped) 846 if (is_polling(sdd) || msg->is_dma_mapped)
786 return; 847 return;
787 848
788 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 849 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
@@ -861,8 +922,9 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
861 922
862 /* Polling method for xfers not bigger than FIFO capacity */ 923 /* Polling method for xfers not bigger than FIFO capacity */
863 use_dma = 0; 924 use_dma = 0;
864 if (sdd->rx_dma.ch && sdd->tx_dma.ch && 925 if (!is_polling(sdd) &&
865 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))) 926 (sdd->rx_dma.ch && sdd->tx_dma.ch &&
927 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
866 use_dma = 1; 928 use_dma = 1;
867 929
868 spin_lock_irqsave(&sdd->lock, flags); 930 spin_lock_irqsave(&sdd->lock, flags);
@@ -876,17 +938,10 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
876 /* Slave Select */ 938 /* Slave Select */
877 enable_cs(sdd, spi); 939 enable_cs(sdd, spi);
878 940
879 /* Start the signals */
880 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
881
882 spin_unlock_irqrestore(&sdd->lock, flags); 941 spin_unlock_irqrestore(&sdd->lock, flags);
883 942
884 status = wait_for_xfer(sdd, xfer, use_dma); 943 status = wait_for_xfer(sdd, xfer, use_dma);
885 944
886 /* Quiese the signals */
887 writel(S3C64XX_SPI_SLAVE_SIG_INACT,
888 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
889
890 if (status) { 945 if (status) {
891 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 946 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
892 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 947 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
@@ -1287,19 +1342,19 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1287 if (!sdd->pdev->dev.of_node) { 1342 if (!sdd->pdev->dev.of_node) {
1288 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1343 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1289 if (!res) { 1344 if (!res) {
1290 dev_err(&pdev->dev, "Unable to get SPI tx dma " 1345 dev_warn(&pdev->dev, "Unable to get SPI tx dma "
1291 "resource\n"); 1346 "resource. Switching to poll mode\n");
1292 return -ENXIO; 1347 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1293 } 1348 } else
1294 sdd->tx_dma.dmach = res->start; 1349 sdd->tx_dma.dmach = res->start;
1295 1350
1296 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1351 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1297 if (!res) { 1352 if (!res) {
1298 dev_err(&pdev->dev, "Unable to get SPI rx dma " 1353 dev_warn(&pdev->dev, "Unable to get SPI rx dma "
1299 "resource\n"); 1354 "resource. Switching to poll mode\n");
1300 return -ENXIO; 1355 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1301 } 1356 } else
1302 sdd->rx_dma.dmach = res->start; 1357 sdd->rx_dma.dmach = res->start;
1303 } 1358 }
1304 1359
1305 sdd->tx_dma.direction = DMA_MEM_TO_DEV; 1360 sdd->tx_dma.direction = DMA_MEM_TO_DEV;