aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-s3c64xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-s3c64xx.c')
-rw-r--r--drivers/spi/spi-s3c64xx.c166
1 files changed, 89 insertions, 77 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index da32cd9c9b59..e515b8a6f590 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -576,101 +576,110 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
576 return RX_FIFO_LVL(status, sdd); 576 return RX_FIFO_LVL(status, sdd);
577} 577}
578 578
579static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 579static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
580 struct spi_transfer *xfer, int dma_mode) 580 struct spi_transfer *xfer)
581{ 581{
582 void __iomem *regs = sdd->regs; 582 void __iomem *regs = sdd->regs;
583 unsigned long val; 583 unsigned long val;
584 u32 status;
584 int ms; 585 int ms;
585 586
586 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 587 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
587 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 588 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
588 ms += 10; /* some tolerance */ 589 ms += 10; /* some tolerance */
589 590
590 if (dma_mode) { 591 val = msecs_to_jiffies(ms) + 10;
591 val = msecs_to_jiffies(ms) + 10; 592 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
592 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 593
593 } else { 594 /*
594 u32 status; 595 * If the previous xfer was completed within timeout, then
595 val = msecs_to_loops(ms); 596 * proceed further else return -EIO.
596 do { 597 * DmaTx returns after simply writing data in the FIFO,
598 * w/o waiting for real transmission on the bus to finish.
599 * DmaRx returns only after Dma read data from FIFO which
600 * needs bus transmission to finish, so we don't worry if
601 * Xfer involved Rx(with or without Tx).
602 */
603 if (val && !xfer->rx_buf) {
604 val = msecs_to_loops(10);
605 status = readl(regs + S3C64XX_SPI_STATUS);
606 while ((TX_FIFO_LVL(status, sdd)
607 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
608 && --val) {
609 cpu_relax();
597 status = readl(regs + S3C64XX_SPI_STATUS); 610 status = readl(regs + S3C64XX_SPI_STATUS);
598 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 611 }
612
599 } 613 }
600 614
601 if (dma_mode) { 615 /* If timed out while checking rx/tx status return error */
602 u32 status; 616 if (!val)
603 617 return -EIO;
604 /*
605 * If the previous xfer was completed within timeout, then
606 * proceed further else return -EIO.
607 * DmaTx returns after simply writing data in the FIFO,
608 * w/o waiting for real transmission on the bus to finish.
609 * DmaRx returns only after Dma read data from FIFO which
610 * needs bus transmission to finish, so we don't worry if
611 * Xfer involved Rx(with or without Tx).
612 */
613 if (val && !xfer->rx_buf) {
614 val = msecs_to_loops(10);
615 status = readl(regs + S3C64XX_SPI_STATUS);
616 while ((TX_FIFO_LVL(status, sdd)
617 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
618 && --val) {
619 cpu_relax();
620 status = readl(regs + S3C64XX_SPI_STATUS);
621 }
622 618
623 } 619 return 0;
620}
624 621
625 /* If timed out while checking rx/tx status return error */ 622static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
626 if (!val) 623 struct spi_transfer *xfer)
627 return -EIO; 624{
628 } else { 625 void __iomem *regs = sdd->regs;
629 int loops; 626 unsigned long val;
630 u32 cpy_len; 627 u32 status;
631 u8 *buf; 628 int loops;
632 629 u32 cpy_len;
633 /* If it was only Tx */ 630 u8 *buf;
634 if (!xfer->rx_buf) { 631 int ms;
635 sdd->state &= ~TXBUSY;
636 return 0;
637 }
638 632
639 /* 633 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
640 * If the receive length is bigger than the controller fifo 634 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
641 * size, calculate the loops and read the fifo as many times. 635 ms += 10; /* some tolerance */
642 * loops = length / max fifo size (calculated by using the
643 * fifo mask).
644 * For any size less than the fifo size the below code is
645 * executed atleast once.
646 */
647 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
648 buf = xfer->rx_buf;
649 do {
650 /* wait for data to be received in the fifo */
651 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
652 (loops ? ms : 0));
653 636
654 switch (sdd->cur_bpw) { 637 val = msecs_to_loops(ms);
655 case 32: 638 do {
656 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 639 status = readl(regs + S3C64XX_SPI_STATUS);
657 buf, cpy_len / 4); 640 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
658 break;
659 case 16:
660 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
661 buf, cpy_len / 2);
662 break;
663 default:
664 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
665 buf, cpy_len);
666 break;
667 }
668 641
669 buf = buf + cpy_len; 642
670 } while (loops--); 643 /* If it was only Tx */
671 sdd->state &= ~RXBUSY; 644 if (!xfer->rx_buf) {
645 sdd->state &= ~TXBUSY;
646 return 0;
672 } 647 }
673 648
649 /*
650 * If the receive length is bigger than the controller fifo
651 * size, calculate the loops and read the fifo as many times.
652 * loops = length / max fifo size (calculated by using the
653 * fifo mask).
654 * For any size less than the fifo size the below code is
655 * executed atleast once.
656 */
657 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
658 buf = xfer->rx_buf;
659 do {
660 /* wait for data to be received in the fifo */
661 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
662 (loops ? ms : 0));
663
664 switch (sdd->cur_bpw) {
665 case 32:
666 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
667 buf, cpy_len / 4);
668 break;
669 case 16:
670 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
671 buf, cpy_len / 2);
672 break;
673 default:
674 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
675 buf, cpy_len);
676 break;
677 }
678
679 buf = buf + cpy_len;
680 } while (loops--);
681 sdd->state &= ~RXBUSY;
682
674 return 0; 683 return 0;
675} 684}
676 685
@@ -902,7 +911,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
902 911
903 spin_unlock_irqrestore(&sdd->lock, flags); 912 spin_unlock_irqrestore(&sdd->lock, flags);
904 913
905 status = wait_for_xfer(sdd, xfer, use_dma); 914 if (use_dma)
915 status = wait_for_dma(sdd, xfer);
916 else
917 status = wait_for_pio(sdd, xfer);
906 918
907 if (status) { 919 if (status) {
908 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 920 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",