aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Niebuhr <bniebuhr@efjohnson.com>2010-10-06 07:33:10 -0400
committerSekhar Nori <nsekhar@ti.com>2010-11-18 08:08:36 -0500
commit87467bd9052725283b9a9f4b1b310fed8744fb1e (patch)
tree522a1d71933440af2b16072bbf95a138dd08392e
parent6dbd29b27bd2627ba0025a6cff14381e69512cdf (diff)
spi: davinci: let DMA operation be specified on per-device basis
Let DMA operation be specified on a per-device basis instead of selecting it once during probe. A side effect of this is the need to combine the PIO and DMA buffer txrx_bufs routine. This is good since they anyway share some common functionality. Signed-off-by: Brian Niebuhr <bniebuhr@efjohnson.com> Tested-By: Michael Williamson <michael.williamson@criticallink.com> Signed-off-by: Sekhar Nori <nsekhar@ti.com>
-rw-r--r--arch/arm/mach-davinci/include/mach/spi.h1
-rw-r--r--drivers/spi/davinci_spi.c342
2 files changed, 156 insertions, 187 deletions
diff --git a/arch/arm/mach-davinci/include/mach/spi.h b/arch/arm/mach-davinci/include/mach/spi.h
index f7586a03678c..b3ab7d04943a 100644
--- a/arch/arm/mach-davinci/include/mach/spi.h
+++ b/arch/arm/mach-davinci/include/mach/spi.h
@@ -41,6 +41,7 @@ struct davinci_spi_config {
41 u8 parity_enable; 41 u8 parity_enable;
42#define SPI_IO_TYPE_INTR 0 42#define SPI_IO_TYPE_INTR 0
43#define SPI_IO_TYPE_POLL 1 43#define SPI_IO_TYPE_POLL 1
44#define SPI_IO_TYPE_DMA 2
44 u8 io_type; 45 u8 io_type;
45 u8 timer_disable; 46 u8 timer_disable;
46 u8 c2tdelay; 47 u8 c2tdelay;
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index 6094e3a07853..5fe298099a1a 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -500,6 +500,25 @@ out:
500 return errors; 500 return errors;
501} 501}
502 502
503static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
504{
505 struct davinci_spi *davinci_spi = data;
506 struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels;
507
508 edma_stop(lch);
509
510 if (status == DMA_COMPLETE) {
511 if (lch == davinci_spi_dma->dma_rx_channel)
512 davinci_spi->rcount = 0;
513 if (lch == davinci_spi_dma->dma_tx_channel)
514 davinci_spi->wcount = 0;
515 }
516
517 if ((!davinci_spi->wcount && !davinci_spi->rcount) ||
518 (status != DMA_COMPLETE))
519 complete(&davinci_spi->done);
520}
521
503/** 522/**
504 * davinci_spi_bufs - functions which will handle transfer data 523 * davinci_spi_bufs - functions which will handle transfer data
505 * @spi: spi device on which data transfer to be done 524 * @spi: spi device on which data transfer to be done
@@ -509,25 +528,30 @@ out:
509 * of SPI controller and then wait until the completion will be marked 528 * of SPI controller and then wait until the completion will be marked
510 * by the IRQ Handler. 529 * by the IRQ Handler.
511 */ 530 */
512static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) 531static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
513{ 532{
514 struct davinci_spi *davinci_spi; 533 struct davinci_spi *davinci_spi;
515 int ret; 534 int data_type, ret;
516 u32 tx_data, data1_reg_val; 535 u32 tx_data, data1_reg_val;
517 u32 errors = 0; 536 u32 errors = 0;
518 struct davinci_spi_config *spicfg; 537 struct davinci_spi_config *spicfg;
519 struct davinci_spi_platform_data *pdata; 538 struct davinci_spi_platform_data *pdata;
539 unsigned uninitialized_var(rx_buf_count);
540 struct device *sdev;
520 541
521 davinci_spi = spi_master_get_devdata(spi->master); 542 davinci_spi = spi_master_get_devdata(spi->master);
522 pdata = davinci_spi->pdata; 543 pdata = davinci_spi->pdata;
523 spicfg = (struct davinci_spi_config *)spi->controller_data; 544 spicfg = (struct davinci_spi_config *)spi->controller_data;
524 if (!spicfg) 545 if (!spicfg)
525 spicfg = &davinci_spi_default_cfg; 546 spicfg = &davinci_spi_default_cfg;
547 sdev = davinci_spi->bitbang.master->dev.parent;
548
549 /* convert len to words based on bits_per_word */
550 data_type = davinci_spi->bytes_per_word[spi->chip_select];
526 551
527 davinci_spi->tx = t->tx_buf; 552 davinci_spi->tx = t->tx_buf;
528 davinci_spi->rx = t->rx_buf; 553 davinci_spi->rx = t->rx_buf;
529 davinci_spi->wcount = t->len / 554 davinci_spi->wcount = t->len / data_type;
530 davinci_spi->bytes_per_word[spi->chip_select];
531 davinci_spi->rcount = davinci_spi->wcount; 555 davinci_spi->rcount = davinci_spi->wcount;
532 556
533 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1); 557 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
@@ -535,20 +559,117 @@ static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
535 /* Enable SPI */ 559 /* Enable SPI */
536 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 560 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
537 561
538 if (spicfg->io_type == SPI_IO_TYPE_INTR) { 562 INIT_COMPLETION(davinci_spi->done);
563
564 if (spicfg->io_type == SPI_IO_TYPE_INTR)
539 set_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT); 565 set_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);
540 INIT_COMPLETION(davinci_spi->done);
541 }
542 566
543 /* start the transfer */ 567 if (spicfg->io_type != SPI_IO_TYPE_DMA) {
544 davinci_spi->wcount--; 568 /* start the transfer */
545 tx_data = davinci_spi->get_tx(davinci_spi); 569 davinci_spi->wcount--;
546 data1_reg_val &= 0xFFFF0000; 570 tx_data = davinci_spi->get_tx(davinci_spi);
547 data1_reg_val |= tx_data & 0xFFFF; 571 data1_reg_val &= 0xFFFF0000;
548 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 572 data1_reg_val |= tx_data & 0xFFFF;
573 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
574 } else {
575 struct davinci_spi_dma *davinci_spi_dma;
576 unsigned long tx_reg, rx_reg;
577 struct edmacc_param param;
578 void *rx_buf;
579
580 davinci_spi_dma = &davinci_spi->dma_channels;
581
582 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
583 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
584
585 /*
586 * Transmit DMA setup
587 *
588 * If there is transmit data, map the transmit buffer, set it
589 * as the source of data and set the source B index to data
590 * size. If there is no transmit data, set the transmit register
591 * as the source of data, and set the source B index to zero.
592 *
593 * The destination is always the transmit register itself. And
594 * the destination never increments.
595 */
596
597 if (t->tx_buf) {
598 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
599 davinci_spi->wcount, DMA_TO_DEVICE);
600 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
601 dev_dbg(sdev, "Unable to DMA map %d bytes"
602 "TX buffer\n",
603 davinci_spi->wcount);
604 return -ENOMEM;
605 }
606 }
607
608 param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_tx_channel);
609 param.src = t->tx_buf ? t->tx_dma : tx_reg;
610 param.a_b_cnt = davinci_spi->wcount << 16 | data_type;
611 param.dst = tx_reg;
612 param.src_dst_bidx = t->tx_buf ? data_type : 0;
613 param.link_bcntrld = 0xffff;
614 param.src_dst_cidx = 0;
615 param.ccnt = 1;
616 edma_write_slot(davinci_spi_dma->dma_tx_channel, &param);
617 edma_link(davinci_spi_dma->dma_tx_channel,
618 davinci_spi_dma->dummy_param_slot);
619
620 /*
621 * Receive DMA setup
622 *
623 * If there is receive buffer, use it to receive data. If there
624 * is none provided, use a temporary receive buffer. Set the
625 * destination B index to 0 so effectively only one byte is used
626 * in the temporary buffer (address does not increment).
627 *
628 * The source of receive data is the receive data register. The
629 * source address never increments.
630 */
631
632 if (t->rx_buf) {
633 rx_buf = t->rx_buf;
634 rx_buf_count = davinci_spi->rcount;
635 } else {
636 rx_buf = davinci_spi->rx_tmp_buf;
637 rx_buf_count = sizeof(davinci_spi->rx_tmp_buf);
638 }
639
640 t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
641 DMA_FROM_DEVICE);
642 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
643 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
644 rx_buf_count);
645 if (t->tx_buf)
646 dma_unmap_single(NULL, t->tx_dma,
647 davinci_spi->wcount,
648 DMA_TO_DEVICE);
649 return -ENOMEM;
650 }
651
652 param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_rx_channel);
653 param.src = rx_reg;
654 param.a_b_cnt = davinci_spi->rcount << 16 | data_type;
655 param.dst = t->rx_dma;
656 param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
657 param.link_bcntrld = 0xffff;
658 param.src_dst_cidx = 0;
659 param.ccnt = 1;
660 edma_write_slot(davinci_spi_dma->dma_rx_channel, &param);
661
662 if (pdata->cshold_bug)
663 iowrite16(data1_reg_val >> 16,
664 davinci_spi->base + SPIDAT1 + 2);
665
666 edma_start(davinci_spi_dma->dma_rx_channel);
667 edma_start(davinci_spi_dma->dma_tx_channel);
668 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
669 }
549 670
550 /* Wait for the transfer to complete */ 671 /* Wait for the transfer to complete */
551 if (spicfg->io_type == SPI_IO_TYPE_INTR) { 672 if (spicfg->io_type != SPI_IO_TYPE_POLL) {
552 wait_for_completion_interruptible(&(davinci_spi->done)); 673 wait_for_completion_interruptible(&(davinci_spi->done));
553 } else { 674 } else {
554 while (davinci_spi->rcount > 0 || davinci_spi->wcount > 0) { 675 while (davinci_spi->rcount > 0 || davinci_spi->wcount > 0) {
@@ -560,6 +681,17 @@ static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
560 } 681 }
561 682
562 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 683 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
684 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
685
686 if (t->tx_buf)
687 dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount,
688 DMA_TO_DEVICE);
689
690 dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
691 DMA_FROM_DEVICE);
692
693 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
694 }
563 695
564 /* 696 /*
565 * Check for bit error, desync error,parity error,timeout error and 697 * Check for bit error, desync error,parity error,timeout error and
@@ -572,6 +704,11 @@ static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
572 return ret; 704 return ret;
573 } 705 }
574 706
707 if (davinci_spi->rcount != 0 || davinci_spi->wcount != 0) {
708 dev_err(sdev, "SPI data transfer error\n");
709 return -EIO;
710 }
711
575 return t->len; 712 return t->len;
576} 713}
577 714
@@ -601,174 +738,6 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
601 return IRQ_HANDLED; 738 return IRQ_HANDLED;
602} 739}
603 740
604static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
605{
606 struct davinci_spi *davinci_spi = data;
607 struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels;
608
609 edma_stop(lch);
610
611 if (status == DMA_COMPLETE) {
612 if (lch == davinci_spi_dma->dma_rx_channel)
613 davinci_spi->rcount = 0;
614 if (lch == davinci_spi_dma->dma_tx_channel)
615 davinci_spi->wcount = 0;
616 }
617
618 if ((!davinci_spi->wcount && !davinci_spi->rcount) ||
619 (status != DMA_COMPLETE))
620 complete(&davinci_spi->done);
621}
622
623static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
624{
625 struct davinci_spi *davinci_spi;
626 int int_status = 0;
627 unsigned rx_buf_count;
628 struct davinci_spi_dma *davinci_spi_dma;
629 int data_type, ret;
630 unsigned long tx_reg, rx_reg;
631 struct davinci_spi_platform_data *pdata;
632 void *rx_buf;
633 struct device *sdev;
634 struct edmacc_param param;
635
636 davinci_spi = spi_master_get_devdata(spi->master);
637 pdata = davinci_spi->pdata;
638 sdev = davinci_spi->bitbang.master->dev.parent;
639
640 davinci_spi_dma = &davinci_spi->dma_channels;
641
642 /* convert len to words based on bits_per_word */
643 data_type = davinci_spi->bytes_per_word[spi->chip_select];
644
645 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
646 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
647
648 davinci_spi->tx = t->tx_buf;
649 davinci_spi->rx = t->rx_buf;
650 davinci_spi->wcount = t->len / data_type;
651 davinci_spi->rcount = davinci_spi->wcount;
652
653 INIT_COMPLETION(davinci_spi->done);
654
655 /* disable all interrupts for dma transfers */
656 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
657 /* Enable SPI */
658 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
659
660 /*
661 * Transmit DMA setup
662 *
663 * If there is transmit data, map the transmit buffer, set it as the
664 * source of data and set the source B index to data size.
665 * If there is no transmit data, set the transmit register as the
666 * source of data, and set the source B index to zero.
667 *
668 * The destination is always the transmit register itself. And the
669 * destination never increments.
670 */
671
672 if (t->tx_buf) {
673 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
674 davinci_spi->wcount, DMA_TO_DEVICE);
675 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
676 dev_dbg(sdev, "Unable to DMA map %d bytes TX buffer\n",
677 davinci_spi->wcount);
678 return -ENOMEM;
679 }
680 }
681
682 param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_tx_channel);
683 param.src = t->tx_buf ? t->tx_dma : tx_reg;
684 param.a_b_cnt = davinci_spi->wcount << 16 | data_type;
685 param.dst = tx_reg;
686 param.src_dst_bidx = t->tx_buf ? data_type : 0;
687 param.link_bcntrld = 0xffff;
688 param.src_dst_cidx = 0;
689 param.ccnt = 1;
690 edma_write_slot(davinci_spi_dma->dma_tx_channel, &param);
691 edma_link(davinci_spi_dma->dma_tx_channel,
692 davinci_spi_dma->dummy_param_slot);
693
694 /*
695 * Receive DMA setup
696 *
697 * If there is receive buffer, use it to receive data. If there
698 * is none provided, use a temporary receive buffer. Set the
699 * destination B index to 0 so effectively only one byte is used
700 * in the temporary buffer (address does not increment).
701 *
702 * The source of receive data is the receive data register. The
703 * source address never increments.
704 */
705
706 if (t->rx_buf) {
707 rx_buf = t->rx_buf;
708 rx_buf_count = davinci_spi->rcount;
709 } else {
710 rx_buf = davinci_spi->rx_tmp_buf;
711 rx_buf_count = sizeof(davinci_spi->rx_tmp_buf);
712 }
713
714 t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
715 DMA_FROM_DEVICE);
716 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
717 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
718 rx_buf_count);
719 if (t->tx_buf)
720 dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount,
721 DMA_TO_DEVICE);
722 return -ENOMEM;
723 }
724
725 param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_rx_channel);
726 param.src = rx_reg;
727 param.a_b_cnt = davinci_spi->rcount << 16 | data_type;
728 param.dst = t->rx_dma;
729 param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
730 param.link_bcntrld = 0xffff;
731 param.src_dst_cidx = 0;
732 param.ccnt = 1;
733 edma_write_slot(davinci_spi_dma->dma_rx_channel, &param);
734
735 if (pdata->cshold_bug) {
736 u16 spidat1 = ioread16(davinci_spi->base + SPIDAT1 + 2);
737 iowrite16(spidat1, davinci_spi->base + SPIDAT1 + 2);
738 }
739
740 edma_start(davinci_spi_dma->dma_rx_channel);
741 edma_start(davinci_spi_dma->dma_tx_channel);
742 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
743
744 wait_for_completion_interruptible(&davinci_spi->done);
745
746 if (t->tx_buf)
747 dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount,
748 DMA_TO_DEVICE);
749
750 dma_unmap_single(NULL, t->rx_dma, rx_buf_count, DMA_FROM_DEVICE);
751
752 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
753
754 /*
755 * Check for bit error, desync error,parity error,timeout error and
756 * receive overflow errors
757 */
758 int_status = ioread32(davinci_spi->base + SPIFLG);
759
760 ret = davinci_spi_check_error(davinci_spi, int_status);
761 if (ret != 0)
762 return ret;
763
764 if (davinci_spi->rcount != 0 || davinci_spi->wcount != 0) {
765 dev_err(sdev, "SPI data transfer error\n");
766 return -EIO;
767 }
768
769 return t->len;
770}
771
772static int davinci_spi_request_dma(struct davinci_spi *davinci_spi) 741static int davinci_spi_request_dma(struct davinci_spi *davinci_spi)
773{ 742{
774 int r; 743 int r;
@@ -918,7 +887,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
918 if (r) 887 if (r)
919 dma_eventq = r->start; 888 dma_eventq = r->start;
920 889
921 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; 890 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs;
922 if (dma_rx_chan != SPI_NO_RESOURCE && 891 if (dma_rx_chan != SPI_NO_RESOURCE &&
923 dma_tx_chan != SPI_NO_RESOURCE && 892 dma_tx_chan != SPI_NO_RESOURCE &&
924 dma_eventq != SPI_NO_RESOURCE) { 893 dma_eventq != SPI_NO_RESOURCE) {
@@ -930,10 +899,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
930 if (ret) 899 if (ret)
931 goto free_clk; 900 goto free_clk;
932 901
933 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; 902 dev_info(&pdev->dev, "DMA: supported\n");
934 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" 903 dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
935 "Using RX channel = %d , TX channel = %d and " 904 "event queue: %d\n", dma_rx_chan, dma_tx_chan,
936 "event queue = %d", dma_rx_chan, dma_tx_chan,
937 dma_eventq); 905 dma_eventq);
938 } 906 }
939 907