diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2011-03-04 03:56:21 -0500 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-03-25 10:38:56 -0400 |
commit | 51fc7b2ce74d31b7d9a6bd3c57bf6c1cc77e837d (patch) | |
tree | 65871eed7be0a2df91ac52d84d838367eec0c435 /drivers | |
parent | 4f665cb614b8a258b507cc47753dd3f7dd45aac6 (diff) |
mmc: tmio-mmc: Improve DMA stability on sh-mobile
On some SDHI tmio implementations the order of DMA and command completion
interrupts swaps, which leads to malfunction. This patch postpones
DMA activation until the MMC command completion IRQ time.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mmc/host/tmio_mmc.c | 63 |
1 files changed, 34 insertions, 29 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ab1adeabdd22..e88627b33377 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -485,7 +485,10 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | |||
485 | unsigned int count; | 485 | unsigned int count; |
486 | unsigned long flags; | 486 | unsigned long flags; |
487 | 487 | ||
488 | if (!data) { | 488 | if (host->chan_tx || host->chan_rx) { |
489 | pr_err("PIO IRQ in DMA mode!\n"); | ||
490 | return; | ||
491 | } else if (!data) { | ||
489 | pr_debug("Spurious PIO IRQ\n"); | 492 | pr_debug("Spurious PIO IRQ\n"); |
490 | return; | 493 | return; |
491 | } | 494 | } |
@@ -648,6 +651,8 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | |||
648 | if (host->data->flags & MMC_DATA_READ) { | 651 | if (host->data->flags & MMC_DATA_READ) { |
649 | if (!host->chan_rx) | 652 | if (!host->chan_rx) |
650 | enable_mmc_irqs(host, TMIO_MASK_READOP); | 653 | enable_mmc_irqs(host, TMIO_MASK_READOP); |
654 | else | ||
655 | tasklet_schedule(&host->dma_issue); | ||
651 | } else { | 656 | } else { |
652 | if (!host->chan_tx) | 657 | if (!host->chan_tx) |
653 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | 658 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); |
@@ -779,18 +784,6 @@ static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | |||
779 | #endif | 784 | #endif |
780 | } | 785 | } |
781 | 786 | ||
782 | static void tmio_dma_complete(void *arg) | ||
783 | { | ||
784 | struct tmio_mmc_host *host = arg; | ||
785 | |||
786 | dev_dbg(&host->pdev->dev, "Command completed\n"); | ||
787 | |||
788 | if (!host->data) | ||
789 | dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); | ||
790 | else | ||
791 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
792 | } | ||
793 | |||
794 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | 787 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
795 | { | 788 | { |
796 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | 789 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
@@ -817,6 +810,8 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
817 | goto pio; | 810 | goto pio; |
818 | } | 811 | } |
819 | 812 | ||
813 | disable_mmc_irqs(host, TMIO_STAT_RXRDY); | ||
814 | |||
820 | /* The only sg element can be unaligned, use our bounce buffer then */ | 815 | /* The only sg element can be unaligned, use our bounce buffer then */ |
821 | if (!aligned) { | 816 | if (!aligned) { |
822 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | 817 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); |
@@ -827,14 +822,11 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
827 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | 822 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); |
828 | if (ret > 0) | 823 | if (ret > 0) |
829 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 824 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
830 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 825 | DMA_FROM_DEVICE, DMA_CTRL_ACK); |
831 | 826 | ||
832 | if (desc) { | 827 | if (desc) |
833 | desc->callback = tmio_dma_complete; | ||
834 | desc->callback_param = host; | ||
835 | cookie = dmaengine_submit(desc); | 828 | cookie = dmaengine_submit(desc); |
836 | dma_async_issue_pending(chan); | 829 | |
837 | } | ||
838 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | 830 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", |
839 | __func__, host->sg_len, ret, cookie, host->mrq); | 831 | __func__, host->sg_len, ret, cookie, host->mrq); |
840 | 832 | ||
@@ -886,6 +878,8 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |||
886 | goto pio; | 878 | goto pio; |
887 | } | 879 | } |
888 | 880 | ||
881 | disable_mmc_irqs(host, TMIO_STAT_TXRQ); | ||
882 | |||
889 | /* The only sg element can be unaligned, use our bounce buffer then */ | 883 | /* The only sg element can be unaligned, use our bounce buffer then */ |
890 | if (!aligned) { | 884 | if (!aligned) { |
891 | unsigned long flags; | 885 | unsigned long flags; |
@@ -900,13 +894,11 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |||
900 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | 894 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); |
901 | if (ret > 0) | 895 | if (ret > 0) |
902 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 896 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
903 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 897 | DMA_TO_DEVICE, DMA_CTRL_ACK); |
904 | 898 | ||
905 | if (desc) { | 899 | if (desc) |
906 | desc->callback = tmio_dma_complete; | ||
907 | desc->callback_param = host; | ||
908 | cookie = dmaengine_submit(desc); | 900 | cookie = dmaengine_submit(desc); |
909 | } | 901 | |
910 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | 902 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", |
911 | __func__, host->sg_len, ret, cookie, host->mrq); | 903 | __func__, host->sg_len, ret, cookie, host->mrq); |
912 | 904 | ||
@@ -947,17 +939,30 @@ static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | |||
947 | static void tmio_issue_tasklet_fn(unsigned long priv) | 939 | static void tmio_issue_tasklet_fn(unsigned long priv) |
948 | { | 940 | { |
949 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | 941 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; |
950 | struct dma_chan *chan = host->chan_tx; | 942 | struct dma_chan *chan = NULL; |
943 | |||
944 | spin_lock_irq(&host->lock); | ||
945 | |||
946 | if (host && host->data) { | ||
947 | if (host->data->flags & MMC_DATA_READ) | ||
948 | chan = host->chan_rx; | ||
949 | else | ||
950 | chan = host->chan_tx; | ||
951 | } | ||
952 | |||
953 | spin_unlock_irq(&host->lock); | ||
951 | 954 | ||
952 | dma_async_issue_pending(chan); | 955 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); |
956 | |||
957 | if (chan) | ||
958 | dma_async_issue_pending(chan); | ||
953 | } | 959 | } |
954 | 960 | ||
955 | static void tmio_tasklet_fn(unsigned long arg) | 961 | static void tmio_tasklet_fn(unsigned long arg) |
956 | { | 962 | { |
957 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | 963 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; |
958 | unsigned long flags; | ||
959 | 964 | ||
960 | spin_lock_irqsave(&host->lock, flags); | 965 | spin_lock_irq(&host->lock); |
961 | 966 | ||
962 | if (!host->data) | 967 | if (!host->data) |
963 | goto out; | 968 | goto out; |
@@ -973,7 +978,7 @@ static void tmio_tasklet_fn(unsigned long arg) | |||
973 | 978 | ||
974 | tmio_mmc_do_data_irq(host); | 979 | tmio_mmc_do_data_irq(host); |
975 | out: | 980 | out: |
976 | spin_unlock_irqrestore(&host->lock, flags); | 981 | spin_unlock_irq(&host->lock); |
977 | } | 982 | } |
978 | 983 | ||
979 | /* It might be necessary to make filter MFD specific */ | 984 | /* It might be necessary to make filter MFD specific */ |