diff options
author | Brian Niebuhr <bniebuhr@efjohnson.com> | 2010-10-05 02:08:41 -0400 |
---|---|---|
committer | Sekhar Nori <nsekhar@ti.com> | 2010-11-18 08:08:35 -0500 |
commit | 9b189fd7584a1d8c68334dd1bc47b363877b314e (patch) | |
tree | b501c938d169bbd948ef434c50aaf74ed2036bb1 /drivers/spi | |
parent | 523c37e7006522e778a1fd0aea2746ceb788572f (diff) |
spi: davinci: handle DMA completion errors correctly
Do not simply clean the DMA channel on a DMA completion
error. Instead, use wcount and rcount members of davinci_spi
to detecion non-completion of DMA and signal EIO to the
application.
Signed-off-by: Brian Niebuhr <bniebuhr@efjohnson.com>
Tested-By: Michael Williamson <michael.williamson@criticallink.com>
Signed-off-by: Sekhar Nori <nsekhar@ti.com>
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/davinci_spi.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c index 975c2a228d0a..a47947da17a3 100644 --- a/drivers/spi/davinci_spi.c +++ b/drivers/spi/davinci_spi.c | |||
@@ -388,24 +388,26 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, | |||
388 | 388 | ||
389 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) | 389 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) |
390 | { | 390 | { |
391 | struct davinci_spi_dma *davinci_spi_dma = data; | 391 | struct davinci_spi *davinci_spi = data; |
392 | struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels; | ||
393 | |||
394 | edma_stop(davinci_spi_dma->dma_rx_channel); | ||
392 | 395 | ||
393 | if (ch_status == DMA_COMPLETE) | 396 | if (ch_status == DMA_COMPLETE) |
394 | edma_stop(davinci_spi_dma->dma_rx_channel); | 397 | davinci_spi->rcount = 0; |
395 | else | ||
396 | edma_clean_channel(davinci_spi_dma->dma_rx_channel); | ||
397 | 398 | ||
398 | complete(&davinci_spi_dma->dma_rx_completion); | 399 | complete(&davinci_spi_dma->dma_rx_completion); |
399 | } | 400 | } |
400 | 401 | ||
401 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) | 402 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) |
402 | { | 403 | { |
403 | struct davinci_spi_dma *davinci_spi_dma = data; | 404 | struct davinci_spi *davinci_spi = data; |
405 | struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels; | ||
406 | |||
407 | edma_stop(davinci_spi_dma->dma_tx_channel); | ||
404 | 408 | ||
405 | if (ch_status == DMA_COMPLETE) | 409 | if (ch_status == DMA_COMPLETE) |
406 | edma_stop(davinci_spi_dma->dma_tx_channel); | 410 | davinci_spi->wcount = 0; |
407 | else | ||
408 | edma_clean_channel(davinci_spi_dma->dma_tx_channel); | ||
409 | 411 | ||
410 | complete(&davinci_spi_dma->dma_tx_completion); | 412 | complete(&davinci_spi_dma->dma_tx_completion); |
411 | } | 413 | } |
@@ -632,7 +634,6 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
632 | { | 634 | { |
633 | struct davinci_spi *davinci_spi; | 635 | struct davinci_spi *davinci_spi; |
634 | int int_status = 0; | 636 | int int_status = 0; |
635 | int count; | ||
636 | unsigned rx_buf_count; | 637 | unsigned rx_buf_count; |
637 | struct davinci_spi_dma *davinci_spi_dma; | 638 | struct davinci_spi_dma *davinci_spi_dma; |
638 | int data_type, ret; | 639 | int data_type, ret; |
@@ -648,20 +649,20 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
648 | 649 | ||
649 | davinci_spi_dma = &davinci_spi->dma_channels; | 650 | davinci_spi_dma = &davinci_spi->dma_channels; |
650 | 651 | ||
652 | /* convert len to words based on bits_per_word */ | ||
653 | data_type = davinci_spi->bytes_per_word[spi->chip_select]; | ||
654 | |||
651 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; | 655 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; |
652 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; | 656 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; |
653 | 657 | ||
654 | davinci_spi->tx = t->tx_buf; | 658 | davinci_spi->tx = t->tx_buf; |
655 | davinci_spi->rx = t->rx_buf; | 659 | davinci_spi->rx = t->rx_buf; |
656 | 660 | davinci_spi->wcount = t->len / data_type; | |
657 | /* convert len to words based on bits_per_word */ | 661 | davinci_spi->rcount = davinci_spi->wcount; |
658 | data_type = davinci_spi->bytes_per_word[spi->chip_select]; | ||
659 | 662 | ||
660 | init_completion(&davinci_spi_dma->dma_rx_completion); | 663 | init_completion(&davinci_spi_dma->dma_rx_completion); |
661 | init_completion(&davinci_spi_dma->dma_tx_completion); | 664 | init_completion(&davinci_spi_dma->dma_tx_completion); |
662 | 665 | ||
663 | count = t->len / data_type; /* the number of elements */ | ||
664 | |||
665 | /* disable all interrupts for dma transfers */ | 666 | /* disable all interrupts for dma transfers */ |
666 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | 667 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); |
667 | /* Enable SPI */ | 668 | /* Enable SPI */ |
@@ -680,18 +681,18 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
680 | */ | 681 | */ |
681 | 682 | ||
682 | if (t->tx_buf) { | 683 | if (t->tx_buf) { |
683 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, | 684 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, |
684 | DMA_TO_DEVICE); | 685 | davinci_spi->wcount, DMA_TO_DEVICE); |
685 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | 686 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { |
686 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | 687 | dev_dbg(sdev, "Unable to DMA map %d bytes TX buffer\n", |
687 | " TX buffer\n", count); | 688 | davinci_spi->wcount); |
688 | return -ENOMEM; | 689 | return -ENOMEM; |
689 | } | 690 | } |
690 | } | 691 | } |
691 | 692 | ||
692 | param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_tx_channel); | 693 | param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_tx_channel); |
693 | param.src = t->tx_buf ? t->tx_dma : tx_reg; | 694 | param.src = t->tx_buf ? t->tx_dma : tx_reg; |
694 | param.a_b_cnt = count << 16 | data_type; | 695 | param.a_b_cnt = davinci_spi->wcount << 16 | data_type; |
695 | param.dst = tx_reg; | 696 | param.dst = tx_reg; |
696 | param.src_dst_bidx = t->tx_buf ? data_type : 0; | 697 | param.src_dst_bidx = t->tx_buf ? data_type : 0; |
697 | param.link_bcntrld = 0xffff; | 698 | param.link_bcntrld = 0xffff; |
@@ -715,7 +716,7 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
715 | 716 | ||
716 | if (t->rx_buf) { | 717 | if (t->rx_buf) { |
717 | rx_buf = t->rx_buf; | 718 | rx_buf = t->rx_buf; |
718 | rx_buf_count = count; | 719 | rx_buf_count = davinci_spi->rcount; |
719 | } else { | 720 | } else { |
720 | rx_buf = davinci_spi->rx_tmp_buf; | 721 | rx_buf = davinci_spi->rx_tmp_buf; |
721 | rx_buf_count = sizeof(davinci_spi->rx_tmp_buf); | 722 | rx_buf_count = sizeof(davinci_spi->rx_tmp_buf); |
@@ -727,13 +728,14 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
727 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | 728 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", |
728 | rx_buf_count); | 729 | rx_buf_count); |
729 | if (t->tx_buf) | 730 | if (t->tx_buf) |
730 | dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); | 731 | dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount, |
732 | DMA_TO_DEVICE); | ||
731 | return -ENOMEM; | 733 | return -ENOMEM; |
732 | } | 734 | } |
733 | 735 | ||
734 | param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_rx_channel); | 736 | param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_rx_channel); |
735 | param.src = rx_reg; | 737 | param.src = rx_reg; |
736 | param.a_b_cnt = count << 16 | data_type; | 738 | param.a_b_cnt = davinci_spi->rcount << 16 | data_type; |
737 | param.dst = t->rx_dma; | 739 | param.dst = t->rx_dma; |
738 | param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; | 740 | param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; |
739 | param.link_bcntrld = 0xffff; | 741 | param.link_bcntrld = 0xffff; |
@@ -754,7 +756,8 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
754 | wait_for_completion_interruptible(&davinci_spi_dma->dma_rx_completion); | 756 | wait_for_completion_interruptible(&davinci_spi_dma->dma_rx_completion); |
755 | 757 | ||
756 | if (t->tx_buf) | 758 | if (t->tx_buf) |
757 | dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE); | 759 | dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount, |
760 | DMA_TO_DEVICE); | ||
758 | 761 | ||
759 | dma_unmap_single(NULL, t->rx_dma, rx_buf_count, DMA_FROM_DEVICE); | 762 | dma_unmap_single(NULL, t->rx_dma, rx_buf_count, DMA_FROM_DEVICE); |
760 | 763 | ||
@@ -770,15 +773,21 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | |||
770 | if (ret != 0) | 773 | if (ret != 0) |
771 | return ret; | 774 | return ret; |
772 | 775 | ||
776 | if (davinci_spi->rcount != 0 || davinci_spi->wcount != 0) { | ||
777 | dev_err(sdev, "SPI data transfer error\n"); | ||
778 | return -EIO; | ||
779 | } | ||
780 | |||
773 | return t->len; | 781 | return t->len; |
774 | } | 782 | } |
775 | 783 | ||
776 | static int davinci_spi_request_dma(struct davinci_spi_dma *davinci_spi_dma) | 784 | static int davinci_spi_request_dma(struct davinci_spi *davinci_spi) |
777 | { | 785 | { |
778 | int r; | 786 | int r; |
787 | struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels; | ||
779 | 788 | ||
780 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_channel, | 789 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_channel, |
781 | davinci_spi_dma_rx_callback, davinci_spi_dma, | 790 | davinci_spi_dma_rx_callback, davinci_spi, |
782 | davinci_spi_dma->eventq); | 791 | davinci_spi_dma->eventq); |
783 | if (r < 0) { | 792 | if (r < 0) { |
784 | pr_err("Unable to request DMA channel for SPI RX\n"); | 793 | pr_err("Unable to request DMA channel for SPI RX\n"); |
@@ -787,7 +796,7 @@ static int davinci_spi_request_dma(struct davinci_spi_dma *davinci_spi_dma) | |||
787 | } | 796 | } |
788 | 797 | ||
789 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_channel, | 798 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_channel, |
790 | davinci_spi_dma_tx_callback, davinci_spi_dma, | 799 | davinci_spi_dma_tx_callback, davinci_spi, |
791 | davinci_spi_dma->eventq); | 800 | davinci_spi_dma->eventq); |
792 | if (r < 0) { | 801 | if (r < 0) { |
793 | pr_err("Unable to request DMA channel for SPI TX\n"); | 802 | pr_err("Unable to request DMA channel for SPI TX\n"); |
@@ -929,7 +938,7 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
929 | davinci_spi->dma_channels.dma_tx_channel = dma_tx_chan; | 938 | davinci_spi->dma_channels.dma_tx_channel = dma_tx_chan; |
930 | davinci_spi->dma_channels.eventq = dma_eventq; | 939 | davinci_spi->dma_channels.eventq = dma_eventq; |
931 | 940 | ||
932 | ret = davinci_spi_request_dma(&davinci_spi->dma_channels); | 941 | ret = davinci_spi_request_dma(davinci_spi); |
933 | if (ret) | 942 | if (ret) |
934 | goto free_clk; | 943 | goto free_clk; |
935 | 944 | ||