diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/shdma.c | 104 |
1 files changed, 68 insertions, 36 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d50da41ac328..727f51e903d9 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -48,7 +48,7 @@ enum sh_dmae_desc_status { | |||
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Used for write-side mutual exclusion for the global device list, | 50 | * Used for write-side mutual exclusion for the global device list, |
51 | * read-side synchronization by way of RCU. | 51 | * read-side synchronization by way of RCU, and per-controller data. |
52 | */ | 52 | */ |
53 | static DEFINE_SPINLOCK(sh_dmae_lock); | 53 | static DEFINE_SPINLOCK(sh_dmae_lock); |
54 | static LIST_HEAD(sh_dmae_devices); | 54 | static LIST_HEAD(sh_dmae_devices); |
@@ -85,22 +85,35 @@ static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | |||
85 | */ | 85 | */ |
86 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | 86 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
87 | { | 87 | { |
88 | unsigned short dmaor = dmaor_read(shdev); | 88 | unsigned short dmaor; |
89 | unsigned long flags; | ||
90 | |||
91 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
89 | 92 | ||
93 | dmaor = dmaor_read(shdev); | ||
90 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | 94 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
95 | |||
96 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
91 | } | 97 | } |
92 | 98 | ||
93 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | 99 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
94 | { | 100 | { |
95 | unsigned short dmaor; | 101 | unsigned short dmaor; |
102 | unsigned long flags; | ||
96 | 103 | ||
97 | sh_dmae_ctl_stop(shdev); | 104 | spin_lock_irqsave(&sh_dmae_lock, flags); |
98 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; | ||
99 | 105 | ||
100 | dmaor_write(shdev, dmaor); | 106 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); |
101 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { | 107 | |
102 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); | 108 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); |
103 | return -EINVAL; | 109 | |
110 | dmaor = dmaor_read(shdev); | ||
111 | |||
112 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
113 | |||
114 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | ||
115 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | ||
116 | return -EIO; | ||
104 | } | 117 | } |
105 | return 0; | 118 | return 0; |
106 | } | 119 | } |
@@ -184,7 +197,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) | |||
184 | 197 | ||
185 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 198 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
186 | { | 199 | { |
187 | /* When DMA was working, can not set data to CHCR */ | 200 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ |
188 | if (dmae_is_busy(sh_chan)) | 201 | if (dmae_is_busy(sh_chan)) |
189 | return -EBUSY; | 202 | return -EBUSY; |
190 | 203 | ||
@@ -374,7 +387,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
374 | LIST_HEAD(list); | 387 | LIST_HEAD(list); |
375 | int descs = sh_chan->descs_allocated; | 388 | int descs = sh_chan->descs_allocated; |
376 | 389 | ||
390 | /* Protect against ISR */ | ||
391 | spin_lock_irq(&sh_chan->desc_lock); | ||
377 | dmae_halt(sh_chan); | 392 | dmae_halt(sh_chan); |
393 | spin_unlock_irq(&sh_chan->desc_lock); | ||
394 | |||
395 | /* Now no new interrupts will occur */ | ||
378 | 396 | ||
379 | /* Prepared and not submitted descriptors can still be on the queue */ | 397 | /* Prepared and not submitted descriptors can still be on the queue */ |
380 | if (!list_empty(&sh_chan->ld_queue)) | 398 | if (!list_empty(&sh_chan->ld_queue)) |
@@ -384,6 +402,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
384 | /* The caller is holding dma_list_mutex */ | 402 | /* The caller is holding dma_list_mutex */ |
385 | struct sh_dmae_slave *param = chan->private; | 403 | struct sh_dmae_slave *param = chan->private; |
386 | clear_bit(param->slave_id, sh_dmae_slave_used); | 404 | clear_bit(param->slave_id, sh_dmae_slave_used); |
405 | chan->private = NULL; | ||
387 | } | 406 | } |
388 | 407 | ||
389 | spin_lock_bh(&sh_chan->desc_lock); | 408 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -563,8 +582,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
563 | if (!chan || !len) | 582 | if (!chan || !len) |
564 | return NULL; | 583 | return NULL; |
565 | 584 | ||
566 | chan->private = NULL; | ||
567 | |||
568 | sh_chan = to_sh_chan(chan); | 585 | sh_chan = to_sh_chan(chan); |
569 | 586 | ||
570 | sg_init_table(&sg, 1); | 587 | sg_init_table(&sg, 1); |
@@ -620,9 +637,9 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
620 | if (!chan) | 637 | if (!chan) |
621 | return -EINVAL; | 638 | return -EINVAL; |
622 | 639 | ||
640 | spin_lock_bh(&sh_chan->desc_lock); | ||
623 | dmae_halt(sh_chan); | 641 | dmae_halt(sh_chan); |
624 | 642 | ||
625 | spin_lock_bh(&sh_chan->desc_lock); | ||
626 | if (!list_empty(&sh_chan->ld_queue)) { | 643 | if (!list_empty(&sh_chan->ld_queue)) { |
627 | /* Record partial transfer */ | 644 | /* Record partial transfer */ |
628 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | 645 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, |
@@ -716,6 +733,14 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
716 | list_move(&desc->node, &sh_chan->ld_free); | 733 | list_move(&desc->node, &sh_chan->ld_free); |
717 | } | 734 | } |
718 | } | 735 | } |
736 | |||
737 | if (all && !callback) | ||
738 | /* | ||
739 | * Terminating and the loop completed normally: forgive | ||
740 | * uncompleted cookies | ||
741 | */ | ||
742 | sh_chan->completed_cookie = sh_chan->common.cookie; | ||
743 | |||
719 | spin_unlock_bh(&sh_chan->desc_lock); | 744 | spin_unlock_bh(&sh_chan->desc_lock); |
720 | 745 | ||
721 | if (callback) | 746 | if (callback) |
@@ -733,10 +758,6 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | |||
733 | { | 758 | { |
734 | while (__ld_cleanup(sh_chan, all)) | 759 | while (__ld_cleanup(sh_chan, all)) |
735 | ; | 760 | ; |
736 | |||
737 | if (all) | ||
738 | /* Terminating - forgive uncompleted cookies */ | ||
739 | sh_chan->completed_cookie = sh_chan->common.cookie; | ||
740 | } | 761 | } |
741 | 762 | ||
742 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 763 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
@@ -782,8 +803,10 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | |||
782 | 803 | ||
783 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 804 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
784 | 805 | ||
785 | last_used = chan->cookie; | 806 | /* First read completed cookie to avoid a skew */ |
786 | last_complete = sh_chan->completed_cookie; | 807 | last_complete = sh_chan->completed_cookie; |
808 | rmb(); | ||
809 | last_used = chan->cookie; | ||
787 | BUG_ON(last_complete < 0); | 810 | BUG_ON(last_complete < 0); |
788 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 811 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
789 | 812 | ||
@@ -813,8 +836,12 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | |||
813 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 836 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
814 | { | 837 | { |
815 | irqreturn_t ret = IRQ_NONE; | 838 | irqreturn_t ret = IRQ_NONE; |
816 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 839 | struct sh_dmae_chan *sh_chan = data; |
817 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 840 | u32 chcr; |
841 | |||
842 | spin_lock(&sh_chan->desc_lock); | ||
843 | |||
844 | chcr = sh_dmae_readl(sh_chan, CHCR); | ||
818 | 845 | ||
819 | if (chcr & CHCR_TE) { | 846 | if (chcr & CHCR_TE) { |
820 | /* DMA stop */ | 847 | /* DMA stop */ |
@@ -824,10 +851,13 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
824 | tasklet_schedule(&sh_chan->tasklet); | 851 | tasklet_schedule(&sh_chan->tasklet); |
825 | } | 852 | } |
826 | 853 | ||
854 | spin_unlock(&sh_chan->desc_lock); | ||
855 | |||
827 | return ret; | 856 | return ret; |
828 | } | 857 | } |
829 | 858 | ||
830 | static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev) | 859 | /* Called from error IRQ or NMI */ |
860 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | ||
831 | { | 861 | { |
832 | unsigned int handled = 0; | 862 | unsigned int handled = 0; |
833 | int i; | 863 | int i; |
@@ -839,22 +869,32 @@ static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev) | |||
839 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 869 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
840 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 870 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
841 | struct sh_desc *desc; | 871 | struct sh_desc *desc; |
872 | LIST_HEAD(dl); | ||
842 | 873 | ||
843 | if (!sh_chan) | 874 | if (!sh_chan) |
844 | continue; | 875 | continue; |
845 | 876 | ||
877 | spin_lock(&sh_chan->desc_lock); | ||
878 | |||
846 | /* Stop the channel */ | 879 | /* Stop the channel */ |
847 | dmae_halt(sh_chan); | 880 | dmae_halt(sh_chan); |
848 | 881 | ||
882 | list_splice_init(&sh_chan->ld_queue, &dl); | ||
883 | |||
884 | spin_unlock(&sh_chan->desc_lock); | ||
885 | |||
849 | /* Complete all */ | 886 | /* Complete all */ |
850 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 887 | list_for_each_entry(desc, &dl, node) { |
851 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 888 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
852 | desc->mark = DESC_IDLE; | 889 | desc->mark = DESC_IDLE; |
853 | if (tx->callback) | 890 | if (tx->callback) |
854 | tx->callback(tx->callback_param); | 891 | tx->callback(tx->callback_param); |
855 | } | 892 | } |
856 | 893 | ||
857 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | 894 | spin_lock(&sh_chan->desc_lock); |
895 | list_splice(&dl, &sh_chan->ld_free); | ||
896 | spin_unlock(&sh_chan->desc_lock); | ||
897 | |||
858 | handled++; | 898 | handled++; |
859 | } | 899 | } |
860 | 900 | ||
@@ -867,10 +907,11 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
867 | { | 907 | { |
868 | struct sh_dmae_device *shdev = data; | 908 | struct sh_dmae_device *shdev = data; |
869 | 909 | ||
870 | if (dmaor_read(shdev) & DMAOR_AE) | 910 | if (!(dmaor_read(shdev) & DMAOR_AE)) |
871 | return IRQ_RETVAL(sh_dmae_reset(data)); | ||
872 | else | ||
873 | return IRQ_NONE; | 911 | return IRQ_NONE; |
912 | |||
913 | sh_dmae_reset(data); | ||
914 | return IRQ_HANDLED; | ||
874 | } | 915 | } |
875 | 916 | ||
876 | static void dmae_do_tasklet(unsigned long data) | 917 | static void dmae_do_tasklet(unsigned long data) |
@@ -902,17 +943,11 @@ static void dmae_do_tasklet(unsigned long data) | |||
902 | 943 | ||
903 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | 944 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) |
904 | { | 945 | { |
905 | unsigned int handled; | ||
906 | |||
907 | /* Fast path out if NMIF is not asserted for this controller */ | 946 | /* Fast path out if NMIF is not asserted for this controller */ |
908 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | 947 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) |
909 | return false; | 948 | return false; |
910 | 949 | ||
911 | handled = sh_dmae_reset(shdev); | 950 | return sh_dmae_reset(shdev); |
912 | if (handled) | ||
913 | return true; | ||
914 | |||
915 | return false; | ||
916 | } | 951 | } |
917 | 952 | ||
918 | static int sh_dmae_nmi_handler(struct notifier_block *self, | 953 | static int sh_dmae_nmi_handler(struct notifier_block *self, |
@@ -982,9 +1017,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | |||
982 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 1017 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
983 | (unsigned long)new_sh_chan); | 1018 | (unsigned long)new_sh_chan); |
984 | 1019 | ||
985 | /* Init the channel */ | ||
986 | dmae_init(new_sh_chan); | ||
987 | |||
988 | spin_lock_init(&new_sh_chan->desc_lock); | 1020 | spin_lock_init(&new_sh_chan->desc_lock); |
989 | 1021 | ||
990 | /* Init descripter manage list */ | 1022 | /* Init descripter manage list */ |
@@ -1115,7 +1147,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1115 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | 1147 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); |
1116 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | 1148 | spin_unlock_irqrestore(&sh_dmae_lock, flags); |
1117 | 1149 | ||
1118 | /* reset dma controller */ | 1150 | /* reset dma controller - only needed as a test */ |
1119 | err = sh_dmae_rst(shdev); | 1151 | err = sh_dmae_rst(shdev); |
1120 | if (err) | 1152 | if (err) |
1121 | goto rst_err; | 1153 | goto rst_err; |