diff options
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r-- | drivers/dma/fsldma.c | 220 |
1 files changed, 98 insertions, 122 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 5da1a4a817e..6e9ad6edc4a 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -68,11 +68,6 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan) | |||
68 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | 68 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
69 | } | 69 | } |
70 | 70 | ||
71 | static dma_addr_t get_ndar(struct fsldma_chan *chan) | ||
72 | { | ||
73 | return DMA_IN(chan, &chan->regs->ndar, 64); | ||
74 | } | ||
75 | |||
76 | static u32 get_bcr(struct fsldma_chan *chan) | 71 | static u32 get_bcr(struct fsldma_chan *chan) |
77 | { | 72 | { |
78 | return DMA_IN(chan, &chan->regs->bcr, 32); | 73 | return DMA_IN(chan, &chan->regs->bcr, 32); |
@@ -143,13 +138,11 @@ static void dma_init(struct fsldma_chan *chan) | |||
143 | case FSL_DMA_IP_85XX: | 138 | case FSL_DMA_IP_85XX: |
144 | /* Set the channel to below modes: | 139 | /* Set the channel to below modes: |
145 | * EIE - Error interrupt enable | 140 | * EIE - Error interrupt enable |
146 | * EOSIE - End of segments interrupt enable (basic mode) | ||
147 | * EOLNIE - End of links interrupt enable | 141 | * EOLNIE - End of links interrupt enable |
148 | * BWC - Bandwidth sharing among channels | 142 | * BWC - Bandwidth sharing among channels |
149 | */ | 143 | */ |
150 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | 144 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC |
151 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE | 145 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); |
152 | | FSL_DMA_MR_EOSIE, 32); | ||
153 | break; | 146 | break; |
154 | case FSL_DMA_IP_83XX: | 147 | case FSL_DMA_IP_83XX: |
155 | /* Set the channel to below modes: | 148 | /* Set the channel to below modes: |
@@ -168,25 +161,32 @@ static int dma_is_idle(struct fsldma_chan *chan) | |||
168 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 161 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
169 | } | 162 | } |
170 | 163 | ||
164 | /* | ||
165 | * Start the DMA controller | ||
166 | * | ||
167 | * Preconditions: | ||
168 | * - the CDAR register must point to the start descriptor | ||
169 | * - the MRn[CS] bit must be cleared | ||
170 | */ | ||
171 | static void dma_start(struct fsldma_chan *chan) | 171 | static void dma_start(struct fsldma_chan *chan) |
172 | { | 172 | { |
173 | u32 mode; | 173 | u32 mode; |
174 | 174 | ||
175 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 175 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
176 | 176 | ||
177 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 177 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
178 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 178 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
179 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); | 179 | mode |= FSL_DMA_MR_EMP_EN; |
180 | mode |= FSL_DMA_MR_EMP_EN; | 180 | } else { |
181 | } else { | 181 | mode &= ~FSL_DMA_MR_EMP_EN; |
182 | mode &= ~FSL_DMA_MR_EMP_EN; | ||
183 | } | ||
184 | } | 182 | } |
185 | 183 | ||
186 | if (chan->feature & FSL_DMA_CHAN_START_EXT) | 184 | if (chan->feature & FSL_DMA_CHAN_START_EXT) { |
187 | mode |= FSL_DMA_MR_EMS_EN; | 185 | mode |= FSL_DMA_MR_EMS_EN; |
188 | else | 186 | } else { |
187 | mode &= ~FSL_DMA_MR_EMS_EN; | ||
189 | mode |= FSL_DMA_MR_CS; | 188 | mode |= FSL_DMA_MR_CS; |
189 | } | ||
190 | 190 | ||
191 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 191 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
192 | } | 192 | } |
@@ -760,14 +760,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
760 | 760 | ||
761 | switch (cmd) { | 761 | switch (cmd) { |
762 | case DMA_TERMINATE_ALL: | 762 | case DMA_TERMINATE_ALL: |
763 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
764 | |||
763 | /* Halt the DMA engine */ | 765 | /* Halt the DMA engine */ |
764 | dma_halt(chan); | 766 | dma_halt(chan); |
765 | 767 | ||
766 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
767 | |||
768 | /* Remove and free all of the descriptors in the LD queue */ | 768 | /* Remove and free all of the descriptors in the LD queue */ |
769 | fsldma_free_desc_list(chan, &chan->ld_pending); | 769 | fsldma_free_desc_list(chan, &chan->ld_pending); |
770 | fsldma_free_desc_list(chan, &chan->ld_running); | 770 | fsldma_free_desc_list(chan, &chan->ld_running); |
771 | chan->idle = true; | ||
771 | 772 | ||
772 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 773 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
773 | return 0; | 774 | return 0; |
@@ -805,76 +806,43 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
805 | } | 806 | } |
806 | 807 | ||
807 | /** | 808 | /** |
808 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 809 | * fsl_chan_ld_cleanup - Clean up link descriptors |
809 | * @chan : Freescale DMA channel | 810 | * @chan : Freescale DMA channel |
810 | * | 811 | * |
811 | * CONTEXT: hardirq | 812 | * This function is run after the queue of running descriptors has been |
813 | * executed by the DMA engine. It will run any callbacks, and then free | ||
814 | * the descriptors. | ||
815 | * | ||
816 | * HARDWARE STATE: idle | ||
812 | */ | 817 | */ |
813 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) | 818 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) |
814 | { | 819 | { |
815 | struct fsl_desc_sw *desc; | 820 | struct fsl_desc_sw *desc, *_desc; |
816 | unsigned long flags; | 821 | unsigned long flags; |
817 | dma_cookie_t cookie; | ||
818 | 822 | ||
819 | spin_lock_irqsave(&chan->desc_lock, flags); | 823 | spin_lock_irqsave(&chan->desc_lock, flags); |
820 | 824 | ||
825 | /* if the ld_running list is empty, there is nothing to do */ | ||
821 | if (list_empty(&chan->ld_running)) { | 826 | if (list_empty(&chan->ld_running)) { |
822 | chan_dbg(chan, "no running descriptors\n"); | 827 | chan_dbg(chan, "no descriptors to cleanup\n"); |
823 | goto out_unlock; | 828 | goto out_unlock; |
824 | } | 829 | } |
825 | 830 | ||
826 | /* Get the last descriptor, update the cookie to that */ | 831 | /* |
832 | * Get the last descriptor, update the cookie to it | ||
833 | * | ||
834 | * This is done before callbacks run so that clients can check the | ||
835 | * status of their DMA transfer inside the callback. | ||
836 | */ | ||
827 | desc = to_fsl_desc(chan->ld_running.prev); | 837 | desc = to_fsl_desc(chan->ld_running.prev); |
828 | if (dma_is_idle(chan)) | 838 | chan->completed_cookie = desc->async_tx.cookie; |
829 | cookie = desc->async_tx.cookie; | 839 | chan_dbg(chan, "completed_cookie = %d\n", chan->completed_cookie); |
830 | else { | ||
831 | cookie = desc->async_tx.cookie - 1; | ||
832 | if (unlikely(cookie < DMA_MIN_COOKIE)) | ||
833 | cookie = DMA_MAX_COOKIE; | ||
834 | } | ||
835 | |||
836 | chan->completed_cookie = cookie; | ||
837 | |||
838 | out_unlock: | ||
839 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * fsldma_desc_status - Check the status of a descriptor | ||
844 | * @chan: Freescale DMA channel | ||
845 | * @desc: DMA SW descriptor | ||
846 | * | ||
847 | * This function will return the status of the given descriptor | ||
848 | */ | ||
849 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, | ||
850 | struct fsl_desc_sw *desc) | ||
851 | { | ||
852 | return dma_async_is_complete(desc->async_tx.cookie, | ||
853 | chan->completed_cookie, | ||
854 | chan->common.cookie); | ||
855 | } | ||
856 | |||
857 | /** | ||
858 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
859 | * @chan : Freescale DMA channel | ||
860 | * | ||
861 | * This function clean up the ld_queue of DMA channel. | ||
862 | */ | ||
863 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | ||
864 | { | ||
865 | struct fsl_desc_sw *desc, *_desc; | ||
866 | unsigned long flags; | ||
867 | |||
868 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
869 | 840 | ||
870 | chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie); | 841 | /* Run the callback for each descriptor, in order */ |
871 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { | 842 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { |
872 | dma_async_tx_callback callback; | 843 | dma_async_tx_callback callback; |
873 | void *callback_param; | 844 | void *callback_param; |
874 | 845 | ||
875 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) | ||
876 | break; | ||
877 | |||
878 | /* Remove from the list of running transactions */ | 846 | /* Remove from the list of running transactions */ |
879 | list_del(&desc->node); | 847 | list_del(&desc->node); |
880 | 848 | ||
@@ -898,6 +866,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | |||
898 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 866 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
899 | } | 867 | } |
900 | 868 | ||
869 | out_unlock: | ||
901 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 870 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
902 | } | 871 | } |
903 | 872 | ||
@@ -905,10 +874,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | |||
905 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | 874 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
906 | * @chan : Freescale DMA channel | 875 | * @chan : Freescale DMA channel |
907 | * | 876 | * |
908 | * This will make sure that any pending transactions will be run. | 877 | * HARDWARE STATE: idle |
909 | * If the DMA controller is idle, it will be started. Otherwise, | ||
910 | * the DMA controller's interrupt handler will start any pending | ||
911 | * transactions when it becomes idle. | ||
912 | */ | 878 | */ |
913 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | 879 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
914 | { | 880 | { |
@@ -927,23 +893,16 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |||
927 | } | 893 | } |
928 | 894 | ||
929 | /* | 895 | /* |
930 | * The DMA controller is not idle, which means the interrupt | 896 | * The DMA controller is not idle, which means that the interrupt |
931 | * handler will start any queued transactions when it runs | 897 | * handler will start any queued transactions when it runs after |
932 | * at the end of the current transaction | 898 | * this transaction finishes |
933 | */ | 899 | */ |
934 | if (!dma_is_idle(chan)) { | 900 | if (!chan->idle) { |
935 | chan_dbg(chan, "DMA controller still busy\n"); | 901 | chan_dbg(chan, "DMA controller still busy\n"); |
936 | goto out_unlock; | 902 | goto out_unlock; |
937 | } | 903 | } |
938 | 904 | ||
939 | /* | 905 | /* |
940 | * TODO: | ||
941 | * make sure the dma_halt() function really un-wedges the | ||
942 | * controller as much as possible | ||
943 | */ | ||
944 | dma_halt(chan); | ||
945 | |||
946 | /* | ||
947 | * If there are some link descriptors which have not been | 906 | * If there are some link descriptors which have not been |
948 | * transferred, we need to start the controller | 907 | * transferred, we need to start the controller |
949 | */ | 908 | */ |
@@ -952,15 +911,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |||
952 | * Move all elements from the queue of pending transactions | 911 | * Move all elements from the queue of pending transactions |
953 | * onto the list of running transactions | 912 | * onto the list of running transactions |
954 | */ | 913 | */ |
914 | chan_dbg(chan, "idle, starting controller\n"); | ||
955 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | 915 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); |
956 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | 916 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); |
957 | 917 | ||
958 | /* | 918 | /* |
919 | * The 85xx DMA controller doesn't clear the channel start bit | ||
920 | * automatically at the end of a transfer. Therefore we must clear | ||
921 | * it in software before starting the transfer. | ||
922 | */ | ||
923 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
924 | u32 mode; | ||
925 | |||
926 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
927 | mode &= ~FSL_DMA_MR_CS; | ||
928 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
929 | } | ||
930 | |||
931 | /* | ||
959 | * Program the descriptor's address into the DMA controller, | 932 | * Program the descriptor's address into the DMA controller, |
960 | * then start the DMA transaction | 933 | * then start the DMA transaction |
961 | */ | 934 | */ |
962 | set_cdar(chan, desc->async_tx.phys); | 935 | set_cdar(chan, desc->async_tx.phys); |
936 | get_cdar(chan); | ||
937 | |||
963 | dma_start(chan); | 938 | dma_start(chan); |
939 | chan->idle = false; | ||
964 | 940 | ||
965 | out_unlock: | 941 | out_unlock: |
966 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 942 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
@@ -985,16 +961,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
985 | struct dma_tx_state *txstate) | 961 | struct dma_tx_state *txstate) |
986 | { | 962 | { |
987 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 963 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
988 | dma_cookie_t last_used; | ||
989 | dma_cookie_t last_complete; | 964 | dma_cookie_t last_complete; |
965 | dma_cookie_t last_used; | ||
966 | unsigned long flags; | ||
990 | 967 | ||
991 | fsl_chan_ld_cleanup(chan); | 968 | spin_lock_irqsave(&chan->desc_lock, flags); |
992 | 969 | ||
993 | last_used = dchan->cookie; | ||
994 | last_complete = chan->completed_cookie; | 970 | last_complete = chan->completed_cookie; |
971 | last_used = dchan->cookie; | ||
995 | 972 | ||
996 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 973 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
997 | 974 | ||
975 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
998 | return dma_async_is_complete(cookie, last_complete, last_used); | 976 | return dma_async_is_complete(cookie, last_complete, last_used); |
999 | } | 977 | } |
1000 | 978 | ||
@@ -1005,8 +983,6 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
1005 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | 983 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
1006 | { | 984 | { |
1007 | struct fsldma_chan *chan = data; | 985 | struct fsldma_chan *chan = data; |
1008 | int update_cookie = 0; | ||
1009 | int xfer_ld_q = 0; | ||
1010 | u32 stat; | 986 | u32 stat; |
1011 | 987 | ||
1012 | /* save and clear the status register */ | 988 | /* save and clear the status register */ |
@@ -1014,6 +990,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1014 | set_sr(chan, stat); | 990 | set_sr(chan, stat); |
1015 | chan_dbg(chan, "irq: stat = 0x%x\n", stat); | 991 | chan_dbg(chan, "irq: stat = 0x%x\n", stat); |
1016 | 992 | ||
993 | /* check that this was really our device */ | ||
1017 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 994 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
1018 | if (!stat) | 995 | if (!stat) |
1019 | return IRQ_NONE; | 996 | return IRQ_NONE; |
@@ -1028,28 +1005,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1028 | */ | 1005 | */ |
1029 | if (stat & FSL_DMA_SR_PE) { | 1006 | if (stat & FSL_DMA_SR_PE) { |
1030 | chan_dbg(chan, "irq: Programming Error INT\n"); | 1007 | chan_dbg(chan, "irq: Programming Error INT\n"); |
1031 | if (get_bcr(chan) == 0) { | ||
1032 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | ||
1033 | * Now, update the completed cookie, and continue the | ||
1034 | * next uncompleted transfer. | ||
1035 | */ | ||
1036 | update_cookie = 1; | ||
1037 | xfer_ld_q = 1; | ||
1038 | } | ||
1039 | stat &= ~FSL_DMA_SR_PE; | 1008 | stat &= ~FSL_DMA_SR_PE; |
1040 | } | 1009 | if (get_bcr(chan) != 0) |
1041 | 1010 | chan_err(chan, "Programming Error!\n"); | |
1042 | /* | ||
1043 | * If the link descriptor segment transfer finishes, | ||
1044 | * we will recycle the used descriptor. | ||
1045 | */ | ||
1046 | if (stat & FSL_DMA_SR_EOSI) { | ||
1047 | chan_dbg(chan, "irq: End-of-segments INT\n"); | ||
1048 | chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n", | ||
1049 | (unsigned long long)get_cdar(chan), | ||
1050 | (unsigned long long)get_ndar(chan)); | ||
1051 | stat &= ~FSL_DMA_SR_EOSI; | ||
1052 | update_cookie = 1; | ||
1053 | } | 1011 | } |
1054 | 1012 | ||
1055 | /* | 1013 | /* |
@@ -1059,8 +1017,6 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1059 | if (stat & FSL_DMA_SR_EOCDI) { | 1017 | if (stat & FSL_DMA_SR_EOCDI) { |
1060 | chan_dbg(chan, "irq: End-of-Chain link INT\n"); | 1018 | chan_dbg(chan, "irq: End-of-Chain link INT\n"); |
1061 | stat &= ~FSL_DMA_SR_EOCDI; | 1019 | stat &= ~FSL_DMA_SR_EOCDI; |
1062 | update_cookie = 1; | ||
1063 | xfer_ld_q = 1; | ||
1064 | } | 1020 | } |
1065 | 1021 | ||
1066 | /* | 1022 | /* |
@@ -1071,25 +1027,44 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1071 | if (stat & FSL_DMA_SR_EOLNI) { | 1027 | if (stat & FSL_DMA_SR_EOLNI) { |
1072 | chan_dbg(chan, "irq: End-of-link INT\n"); | 1028 | chan_dbg(chan, "irq: End-of-link INT\n"); |
1073 | stat &= ~FSL_DMA_SR_EOLNI; | 1029 | stat &= ~FSL_DMA_SR_EOLNI; |
1074 | xfer_ld_q = 1; | ||
1075 | } | 1030 | } |
1076 | 1031 | ||
1077 | if (update_cookie) | 1032 | /* check that the DMA controller is really idle */ |
1078 | fsl_dma_update_completed_cookie(chan); | 1033 | if (!dma_is_idle(chan)) |
1079 | if (xfer_ld_q) | 1034 | chan_err(chan, "irq: controller not idle!\n"); |
1080 | fsl_chan_xfer_ld_queue(chan); | 1035 | |
1036 | /* check that we handled all of the bits */ | ||
1081 | if (stat) | 1037 | if (stat) |
1082 | chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat); | 1038 | chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); |
1083 | 1039 | ||
1084 | chan_dbg(chan, "irq: Exit\n"); | 1040 | /* |
1041 | * Schedule the tasklet to handle all cleanup of the current | ||
1042 | * transaction. It will start a new transaction if there is | ||
1043 | * one pending. | ||
1044 | */ | ||
1085 | tasklet_schedule(&chan->tasklet); | 1045 | tasklet_schedule(&chan->tasklet); |
1046 | chan_dbg(chan, "irq: Exit\n"); | ||
1086 | return IRQ_HANDLED; | 1047 | return IRQ_HANDLED; |
1087 | } | 1048 | } |
1088 | 1049 | ||
1089 | static void dma_do_tasklet(unsigned long data) | 1050 | static void dma_do_tasklet(unsigned long data) |
1090 | { | 1051 | { |
1091 | struct fsldma_chan *chan = (struct fsldma_chan *)data; | 1052 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
1053 | unsigned long flags; | ||
1054 | |||
1055 | chan_dbg(chan, "tasklet entry\n"); | ||
1056 | |||
1057 | /* run all callbacks, free all used descriptors */ | ||
1092 | fsl_chan_ld_cleanup(chan); | 1058 | fsl_chan_ld_cleanup(chan); |
1059 | |||
1060 | /* the channel is now idle */ | ||
1061 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
1062 | chan->idle = true; | ||
1063 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
1064 | |||
1065 | /* start any pending transactions automatically */ | ||
1066 | fsl_chan_xfer_ld_queue(chan); | ||
1067 | chan_dbg(chan, "tasklet exit\n"); | ||
1093 | } | 1068 | } |
1094 | 1069 | ||
1095 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | 1070 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
@@ -1269,6 +1244,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1269 | spin_lock_init(&chan->desc_lock); | 1244 | spin_lock_init(&chan->desc_lock); |
1270 | INIT_LIST_HEAD(&chan->ld_pending); | 1245 | INIT_LIST_HEAD(&chan->ld_pending); |
1271 | INIT_LIST_HEAD(&chan->ld_running); | 1246 | INIT_LIST_HEAD(&chan->ld_running); |
1247 | chan->idle = true; | ||
1272 | 1248 | ||
1273 | chan->common.device = &fdev->common; | 1249 | chan->common.device = &fdev->common; |
1274 | 1250 | ||