diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2010-02-11 11:50:05 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-03-01 21:09:00 -0500 |
commit | 47a4dc26eeb89a3746f9b1e2092602b40469640a (patch) | |
tree | 69eb685635ca18f42ac8e245c9be7032a8dd41e7 /drivers/dma | |
parent | 920925f90fa6455f7e8c9db0e215e706cd7dedeb (diff) |
dmaengine: shdma: fix DMA error handling.
Present DMA error ISR in shdma.c is bogus, it locks the system hard in multiple
ways. Fix it to abort all queued transactions on all channels on the affected
controller and giving submitters a chance to get a DMA_ERROR status for aborted
transactions. Afterwards further functionality is again possible without the
need to re-load the driver.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/shdma.c | 89 |
1 files changed, 53 insertions, 36 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index b75ce8b84c46..77311698c046 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -73,7 +73,7 @@ static void sh_dmae_ctl_stop(int id) | |||
73 | { | 73 | { |
74 | unsigned short dmaor = dmaor_read_reg(id); | 74 | unsigned short dmaor = dmaor_read_reg(id); |
75 | 75 | ||
76 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | 76 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); |
77 | dmaor_write_reg(id, dmaor); | 77 | dmaor_write_reg(id, dmaor); |
78 | } | 78 | } |
79 | 79 | ||
@@ -86,7 +86,7 @@ static int sh_dmae_rst(int id) | |||
86 | 86 | ||
87 | dmaor_write_reg(id, dmaor); | 87 | dmaor_write_reg(id, dmaor); |
88 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { | 88 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { |
89 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | 89 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); |
90 | return -EINVAL; | 90 | return -EINVAL; |
91 | } | 91 | } |
92 | return 0; | 92 | return 0; |
@@ -661,7 +661,7 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | |||
661 | 661 | ||
662 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 662 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
663 | { | 663 | { |
664 | struct sh_desc *sd; | 664 | struct sh_desc *desc; |
665 | 665 | ||
666 | spin_lock_bh(&sh_chan->desc_lock); | 666 | spin_lock_bh(&sh_chan->desc_lock); |
667 | /* DMA work check */ | 667 | /* DMA work check */ |
@@ -671,10 +671,10 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
671 | } | 671 | } |
672 | 672 | ||
673 | /* Find the first not transferred desciptor */ | 673 | /* Find the first not transferred desciptor */ |
674 | list_for_each_entry(sd, &sh_chan->ld_queue, node) | 674 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
675 | if (sd->mark == DESC_SUBMITTED) { | 675 | if (desc->mark == DESC_SUBMITTED) { |
676 | /* Get the ld start address from ld_queue */ | 676 | /* Get the ld start address from ld_queue */ |
677 | dmae_set_reg(sh_chan, &sd->hw); | 677 | dmae_set_reg(sh_chan, &desc->hw); |
678 | dmae_start(sh_chan); | 678 | dmae_start(sh_chan); |
679 | break; | 679 | break; |
680 | } | 680 | } |
@@ -696,6 +696,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
696 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 696 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
697 | dma_cookie_t last_used; | 697 | dma_cookie_t last_used; |
698 | dma_cookie_t last_complete; | 698 | dma_cookie_t last_complete; |
699 | enum dma_status status; | ||
699 | 700 | ||
700 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 701 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
701 | 702 | ||
@@ -709,7 +710,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
709 | if (used) | 710 | if (used) |
710 | *used = last_used; | 711 | *used = last_used; |
711 | 712 | ||
712 | return dma_async_is_complete(cookie, last_complete, last_used); | 713 | spin_lock_bh(&sh_chan->desc_lock); |
714 | |||
715 | status = dma_async_is_complete(cookie, last_complete, last_used); | ||
716 | |||
717 | /* | ||
718 | * If we don't find cookie on the queue, it has been aborted and we have | ||
719 | * to report error | ||
720 | */ | ||
721 | if (status != DMA_SUCCESS) { | ||
722 | struct sh_desc *desc; | ||
723 | status = DMA_ERROR; | ||
724 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
725 | if (desc->cookie == cookie) { | ||
726 | status = DMA_IN_PROGRESS; | ||
727 | break; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | spin_unlock_bh(&sh_chan->desc_lock); | ||
732 | |||
733 | return status; | ||
713 | } | 734 | } |
714 | 735 | ||
715 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 736 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
@@ -732,40 +753,36 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
732 | #if defined(CONFIG_CPU_SH4) | 753 | #if defined(CONFIG_CPU_SH4) |
733 | static irqreturn_t sh_dmae_err(int irq, void *data) | 754 | static irqreturn_t sh_dmae_err(int irq, void *data) |
734 | { | 755 | { |
735 | int err = 0; | ||
736 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 756 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
757 | int i; | ||
737 | 758 | ||
738 | /* IRQ Multi */ | 759 | /* halt the dma controller */ |
739 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 760 | sh_dmae_ctl_stop(0); |
740 | int __maybe_unused cnt = 0; | 761 | if (shdev->pdata.mode & SHDMA_DMAOR1) |
741 | switch (irq) { | 762 | sh_dmae_ctl_stop(1); |
742 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 763 | |
743 | case DMTE6_IRQ: | 764 | /* We cannot detect, which channel caused the error, have to reset all */ |
744 | cnt++; | 765 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
745 | #endif | 766 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
746 | case DMTE0_IRQ: | 767 | if (sh_chan) { |
747 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | 768 | struct sh_desc *desc; |
748 | disable_irq(irq); | 769 | /* Stop the channel */ |
749 | return IRQ_HANDLED; | 770 | dmae_halt(sh_chan); |
771 | /* Complete all */ | ||
772 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | ||
773 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
774 | desc->mark = DESC_IDLE; | ||
775 | if (tx->callback) | ||
776 | tx->callback(tx->callback_param); | ||
750 | } | 777 | } |
751 | default: | 778 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); |
752 | return IRQ_NONE; | ||
753 | } | 779 | } |
754 | } else { | ||
755 | /* reset dma controller */ | ||
756 | err = sh_dmae_rst(0); | ||
757 | if (err) | ||
758 | return err; | ||
759 | #ifdef SH_DMAC_BASE1 | ||
760 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
761 | err = sh_dmae_rst(1); | ||
762 | if (err) | ||
763 | return err; | ||
764 | } | ||
765 | #endif | ||
766 | disable_irq(irq); | ||
767 | return IRQ_HANDLED; | ||
768 | } | 780 | } |
781 | sh_dmae_rst(0); | ||
782 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
783 | sh_dmae_rst(1); | ||
784 | |||
785 | return IRQ_HANDLED; | ||
769 | } | 786 | } |
770 | #endif | 787 | #endif |
771 | 788 | ||