diff options
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r-- | drivers/dma/dw_dmac.c | 182 |
1 files changed, 82 insertions, 100 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 72129615757..d3c5a5a88f1 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -105,13 +105,13 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
105 | 105 | ||
106 | spin_lock_irqsave(&dwc->lock, flags); | 106 | spin_lock_irqsave(&dwc->lock, flags); |
107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
108 | i++; | ||
108 | if (async_tx_test_ack(&desc->txd)) { | 109 | if (async_tx_test_ack(&desc->txd)) { |
109 | list_del(&desc->desc_node); | 110 | list_del(&desc->desc_node); |
110 | ret = desc; | 111 | ret = desc; |
111 | break; | 112 | break; |
112 | } | 113 | } |
113 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 114 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
114 | i++; | ||
115 | } | 115 | } |
116 | spin_unlock_irqrestore(&dwc->lock, flags); | 116 | spin_unlock_irqrestore(&dwc->lock, flags); |
117 | 117 | ||
@@ -191,6 +191,42 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
191 | 191 | ||
192 | /*----------------------------------------------------------------------*/ | 192 | /*----------------------------------------------------------------------*/ |
193 | 193 | ||
194 | static inline unsigned int dwc_fast_fls(unsigned long long v) | ||
195 | { | ||
196 | /* | ||
197 | * We can be a lot more clever here, but this should take care | ||
198 | * of the most common optimization. | ||
199 | */ | ||
200 | if (!(v & 7)) | ||
201 | return 3; | ||
202 | else if (!(v & 3)) | ||
203 | return 2; | ||
204 | else if (!(v & 1)) | ||
205 | return 1; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | ||
210 | { | ||
211 | dev_err(chan2dev(&dwc->chan), | ||
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
213 | channel_readl(dwc, SAR), | ||
214 | channel_readl(dwc, DAR), | ||
215 | channel_readl(dwc, LLP), | ||
216 | channel_readl(dwc, CTL_HI), | ||
217 | channel_readl(dwc, CTL_LO)); | ||
218 | } | ||
219 | |||
220 | |||
221 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
222 | { | ||
223 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
224 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
225 | cpu_relax(); | ||
226 | } | ||
227 | |||
228 | /*----------------------------------------------------------------------*/ | ||
229 | |||
194 | /* Called with dwc->lock held and bh disabled */ | 230 | /* Called with dwc->lock held and bh disabled */ |
195 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 231 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
196 | { | 232 | { |
@@ -200,13 +236,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
200 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 236 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
201 | dev_err(chan2dev(&dwc->chan), | 237 | dev_err(chan2dev(&dwc->chan), |
202 | "BUG: Attempted to start non-idle channel\n"); | 238 | "BUG: Attempted to start non-idle channel\n"); |
203 | dev_err(chan2dev(&dwc->chan), | 239 | dwc_dump_chan_regs(dwc); |
204 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
205 | channel_readl(dwc, SAR), | ||
206 | channel_readl(dwc, DAR), | ||
207 | channel_readl(dwc, LLP), | ||
208 | channel_readl(dwc, CTL_HI), | ||
209 | channel_readl(dwc, CTL_LO)); | ||
210 | 240 | ||
211 | /* The tasklet will hopefully advance the queue... */ | 241 | /* The tasklet will hopefully advance the queue... */ |
212 | return; | 242 | return; |
@@ -290,9 +320,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
290 | "BUG: XFER bit set, but channel not idle!\n"); | 320 | "BUG: XFER bit set, but channel not idle!\n"); |
291 | 321 | ||
292 | /* Try to continue after resetting the channel... */ | 322 | /* Try to continue after resetting the channel... */ |
293 | channel_clear_bit(dw, CH_EN, dwc->mask); | 323 | dwc_chan_disable(dw, dwc); |
294 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
295 | cpu_relax(); | ||
296 | } | 324 | } |
297 | 325 | ||
298 | /* | 326 | /* |
@@ -337,7 +365,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
337 | return; | 365 | return; |
338 | } | 366 | } |
339 | 367 | ||
340 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 368 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, |
369 | (unsigned long long)llp); | ||
341 | 370 | ||
342 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 371 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
343 | /* check first descriptors addr */ | 372 | /* check first descriptors addr */ |
@@ -373,9 +402,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
373 | "BUG: All descriptors done, but channel not idle!\n"); | 402 | "BUG: All descriptors done, but channel not idle!\n"); |
374 | 403 | ||
375 | /* Try to continue after resetting the channel... */ | 404 | /* Try to continue after resetting the channel... */ |
376 | channel_clear_bit(dw, CH_EN, dwc->mask); | 405 | dwc_chan_disable(dw, dwc); |
377 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
378 | cpu_relax(); | ||
379 | 406 | ||
380 | if (!list_empty(&dwc->queue)) { | 407 | if (!list_empty(&dwc->queue)) { |
381 | list_move(dwc->queue.next, &dwc->active_list); | 408 | list_move(dwc->queue.next, &dwc->active_list); |
@@ -384,12 +411,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
384 | spin_unlock_irqrestore(&dwc->lock, flags); | 411 | spin_unlock_irqrestore(&dwc->lock, flags); |
385 | } | 412 | } |
386 | 413 | ||
387 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 414 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
388 | { | 415 | { |
389 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 416 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
390 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 417 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
391 | lli->sar, lli->dar, lli->llp, | 418 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
392 | lli->ctlhi, lli->ctllo); | ||
393 | } | 419 | } |
394 | 420 | ||
395 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 421 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -487,17 +513,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
487 | 513 | ||
488 | spin_lock_irqsave(&dwc->lock, flags); | 514 | spin_lock_irqsave(&dwc->lock, flags); |
489 | 515 | ||
490 | dev_err(chan2dev(&dwc->chan), | 516 | dwc_dump_chan_regs(dwc); |
491 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
492 | channel_readl(dwc, SAR), | ||
493 | channel_readl(dwc, DAR), | ||
494 | channel_readl(dwc, LLP), | ||
495 | channel_readl(dwc, CTL_HI), | ||
496 | channel_readl(dwc, CTL_LO)); | ||
497 | 517 | ||
498 | channel_clear_bit(dw, CH_EN, dwc->mask); | 518 | dwc_chan_disable(dw, dwc); |
499 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
500 | cpu_relax(); | ||
501 | 519 | ||
502 | /* make sure DMA does not restart by loading a new list */ | 520 | /* make sure DMA does not restart by loading a new list */ |
503 | channel_writel(dwc, LLP, 0); | 521 | channel_writel(dwc, LLP, 0); |
@@ -527,7 +545,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
527 | status_xfer = dma_readl(dw, RAW.XFER); | 545 | status_xfer = dma_readl(dw, RAW.XFER); |
528 | status_err = dma_readl(dw, RAW.ERROR); | 546 | status_err = dma_readl(dw, RAW.ERROR); |
529 | 547 | ||
530 | dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); | 548 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
531 | 549 | ||
532 | for (i = 0; i < dw->dma.chancnt; i++) { | 550 | for (i = 0; i < dw->dma.chancnt; i++) { |
533 | dwc = &dw->chan[i]; | 551 | dwc = &dw->chan[i]; |
@@ -551,7 +569,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
551 | struct dw_dma *dw = dev_id; | 569 | struct dw_dma *dw = dev_id; |
552 | u32 status; | 570 | u32 status; |
553 | 571 | ||
554 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | 572 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, |
555 | dma_readl(dw, STATUS_INT)); | 573 | dma_readl(dw, STATUS_INT)); |
556 | 574 | ||
557 | /* | 575 | /* |
@@ -597,12 +615,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
597 | * for DMA. But this is hard to do in a race-free manner. | 615 | * for DMA. But this is hard to do in a race-free manner. |
598 | */ | 616 | */ |
599 | if (list_empty(&dwc->active_list)) { | 617 | if (list_empty(&dwc->active_list)) { |
600 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 618 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, |
601 | desc->txd.cookie); | 619 | desc->txd.cookie); |
602 | list_add_tail(&desc->desc_node, &dwc->active_list); | 620 | list_add_tail(&desc->desc_node, &dwc->active_list); |
603 | dwc_dostart(dwc, dwc_first_active(dwc)); | 621 | dwc_dostart(dwc, dwc_first_active(dwc)); |
604 | } else { | 622 | } else { |
605 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 623 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, |
606 | desc->txd.cookie); | 624 | desc->txd.cookie); |
607 | 625 | ||
608 | list_add_tail(&desc->desc_node, &dwc->queue); | 626 | list_add_tail(&desc->desc_node, &dwc->queue); |
@@ -627,26 +645,17 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
627 | unsigned int dst_width; | 645 | unsigned int dst_width; |
628 | u32 ctllo; | 646 | u32 ctllo; |
629 | 647 | ||
630 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 648 | dev_vdbg(chan2dev(chan), |
631 | dest, src, len, flags); | 649 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, |
650 | (unsigned long long)dest, (unsigned long long)src, | ||
651 | len, flags); | ||
632 | 652 | ||
633 | if (unlikely(!len)) { | 653 | if (unlikely(!len)) { |
634 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 654 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
635 | return NULL; | 655 | return NULL; |
636 | } | 656 | } |
637 | 657 | ||
638 | /* | 658 | src_width = dst_width = dwc_fast_fls(src | dest | len); |
639 | * We can be a lot more clever here, but this should take care | ||
640 | * of the most common optimization. | ||
641 | */ | ||
642 | if (!((src | dest | len) & 7)) | ||
643 | src_width = dst_width = 3; | ||
644 | else if (!((src | dest | len) & 3)) | ||
645 | src_width = dst_width = 2; | ||
646 | else if (!((src | dest | len) & 1)) | ||
647 | src_width = dst_width = 1; | ||
648 | else | ||
649 | src_width = dst_width = 0; | ||
650 | 659 | ||
651 | ctllo = DWC_DEFAULT_CTLLO(chan) | 660 | ctllo = DWC_DEFAULT_CTLLO(chan) |
652 | | DWC_CTLL_DST_WIDTH(dst_width) | 661 | | DWC_CTLL_DST_WIDTH(dst_width) |
@@ -720,7 +729,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
720 | struct scatterlist *sg; | 729 | struct scatterlist *sg; |
721 | size_t total_len = 0; | 730 | size_t total_len = 0; |
722 | 731 | ||
723 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | 732 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
724 | 733 | ||
725 | if (unlikely(!dws || !sg_len)) | 734 | if (unlikely(!dws || !sg_len)) |
726 | return NULL; | 735 | return NULL; |
@@ -746,14 +755,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
746 | mem = sg_dma_address(sg); | 755 | mem = sg_dma_address(sg); |
747 | len = sg_dma_len(sg); | 756 | len = sg_dma_len(sg); |
748 | 757 | ||
749 | if (!((mem | len) & 7)) | 758 | mem_width = dwc_fast_fls(mem | len); |
750 | mem_width = 3; | ||
751 | else if (!((mem | len) & 3)) | ||
752 | mem_width = 2; | ||
753 | else if (!((mem | len) & 1)) | ||
754 | mem_width = 1; | ||
755 | else | ||
756 | mem_width = 0; | ||
757 | 759 | ||
758 | slave_sg_todev_fill_desc: | 760 | slave_sg_todev_fill_desc: |
759 | desc = dwc_desc_get(dwc); | 761 | desc = dwc_desc_get(dwc); |
@@ -813,14 +815,7 @@ slave_sg_todev_fill_desc: | |||
813 | mem = sg_dma_address(sg); | 815 | mem = sg_dma_address(sg); |
814 | len = sg_dma_len(sg); | 816 | len = sg_dma_len(sg); |
815 | 817 | ||
816 | if (!((mem | len) & 7)) | 818 | mem_width = dwc_fast_fls(mem | len); |
817 | mem_width = 3; | ||
818 | else if (!((mem | len) & 3)) | ||
819 | mem_width = 2; | ||
820 | else if (!((mem | len) & 1)) | ||
821 | mem_width = 1; | ||
822 | else | ||
823 | mem_width = 0; | ||
824 | 819 | ||
825 | slave_sg_fromdev_fill_desc: | 820 | slave_sg_fromdev_fill_desc: |
826 | desc = dwc_desc_get(dwc); | 821 | desc = dwc_desc_get(dwc); |
@@ -950,9 +945,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
950 | } else if (cmd == DMA_TERMINATE_ALL) { | 945 | } else if (cmd == DMA_TERMINATE_ALL) { |
951 | spin_lock_irqsave(&dwc->lock, flags); | 946 | spin_lock_irqsave(&dwc->lock, flags); |
952 | 947 | ||
953 | channel_clear_bit(dw, CH_EN, dwc->mask); | 948 | dwc_chan_disable(dw, dwc); |
954 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
955 | cpu_relax(); | ||
956 | 949 | ||
957 | dwc->paused = false; | 950 | dwc->paused = false; |
958 | 951 | ||
@@ -1014,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1014 | int i; | 1007 | int i; |
1015 | unsigned long flags; | 1008 | unsigned long flags; |
1016 | 1009 | ||
1017 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 1010 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1018 | 1011 | ||
1019 | /* ASSERT: channel is idle */ | 1012 | /* ASSERT: channel is idle */ |
1020 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1013 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -1057,8 +1050,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1057 | 1050 | ||
1058 | spin_unlock_irqrestore(&dwc->lock, flags); | 1051 | spin_unlock_irqrestore(&dwc->lock, flags); |
1059 | 1052 | ||
1060 | dev_dbg(chan2dev(chan), | 1053 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
1061 | "alloc_chan_resources allocated %d descriptors\n", i); | ||
1062 | 1054 | ||
1063 | return i; | 1055 | return i; |
1064 | } | 1056 | } |
@@ -1071,7 +1063,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1071 | unsigned long flags; | 1063 | unsigned long flags; |
1072 | LIST_HEAD(list); | 1064 | LIST_HEAD(list); |
1073 | 1065 | ||
1074 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 1066 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
1075 | dwc->descs_allocated); | 1067 | dwc->descs_allocated); |
1076 | 1068 | ||
1077 | /* ASSERT: channel is idle */ | 1069 | /* ASSERT: channel is idle */ |
@@ -1097,7 +1089,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1097 | kfree(desc); | 1089 | kfree(desc); |
1098 | } | 1090 | } |
1099 | 1091 | ||
1100 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | 1092 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1101 | } | 1093 | } |
1102 | 1094 | ||
1103 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1095 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
@@ -1126,13 +1118,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1126 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1118 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
1127 | dev_err(chan2dev(&dwc->chan), | 1119 | dev_err(chan2dev(&dwc->chan), |
1128 | "BUG: Attempted to start non-idle channel\n"); | 1120 | "BUG: Attempted to start non-idle channel\n"); |
1129 | dev_err(chan2dev(&dwc->chan), | 1121 | dwc_dump_chan_regs(dwc); |
1130 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
1131 | channel_readl(dwc, SAR), | ||
1132 | channel_readl(dwc, DAR), | ||
1133 | channel_readl(dwc, LLP), | ||
1134 | channel_readl(dwc, CTL_HI), | ||
1135 | channel_readl(dwc, CTL_LO)); | ||
1136 | spin_unlock_irqrestore(&dwc->lock, flags); | 1122 | spin_unlock_irqrestore(&dwc->lock, flags); |
1137 | return -EBUSY; | 1123 | return -EBUSY; |
1138 | } | 1124 | } |
@@ -1167,9 +1153,7 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) | |||
1167 | 1153 | ||
1168 | spin_lock_irqsave(&dwc->lock, flags); | 1154 | spin_lock_irqsave(&dwc->lock, flags); |
1169 | 1155 | ||
1170 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1156 | dwc_chan_disable(dw, dwc); |
1171 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
1172 | cpu_relax(); | ||
1173 | 1157 | ||
1174 | spin_unlock_irqrestore(&dwc->lock, flags); | 1158 | spin_unlock_irqrestore(&dwc->lock, flags); |
1175 | } | 1159 | } |
@@ -1308,9 +1292,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1308 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | 1292 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, |
1309 | sizeof(last->lli), DMA_TO_DEVICE); | 1293 | sizeof(last->lli), DMA_TO_DEVICE); |
1310 | 1294 | ||
1311 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " | 1295 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1312 | "period %zu periods %d\n", buf_addr, buf_len, | 1296 | "period %zu periods %d\n", (unsigned long long)buf_addr, |
1313 | period_len, periods); | 1297 | buf_len, period_len, periods); |
1314 | 1298 | ||
1315 | cdesc->periods = periods; | 1299 | cdesc->periods = periods; |
1316 | dwc->cdesc = cdesc; | 1300 | dwc->cdesc = cdesc; |
@@ -1340,16 +1324,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1340 | int i; | 1324 | int i; |
1341 | unsigned long flags; | 1325 | unsigned long flags; |
1342 | 1326 | ||
1343 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | 1327 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
1344 | 1328 | ||
1345 | if (!cdesc) | 1329 | if (!cdesc) |
1346 | return; | 1330 | return; |
1347 | 1331 | ||
1348 | spin_lock_irqsave(&dwc->lock, flags); | 1332 | spin_lock_irqsave(&dwc->lock, flags); |
1349 | 1333 | ||
1350 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1334 | dwc_chan_disable(dw, dwc); |
1351 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
1352 | cpu_relax(); | ||
1353 | 1335 | ||
1354 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1336 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1355 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1337 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
@@ -1386,7 +1368,7 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1386 | dw->chan[i].initialized = false; | 1368 | dw->chan[i].initialized = false; |
1387 | } | 1369 | } |
1388 | 1370 | ||
1389 | static int __init dw_probe(struct platform_device *pdev) | 1371 | static int __devinit dw_probe(struct platform_device *pdev) |
1390 | { | 1372 | { |
1391 | struct dw_dma_platform_data *pdata; | 1373 | struct dw_dma_platform_data *pdata; |
1392 | struct resource *io; | 1374 | struct resource *io; |
@@ -1432,9 +1414,15 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1432 | } | 1414 | } |
1433 | clk_prepare_enable(dw->clk); | 1415 | clk_prepare_enable(dw->clk); |
1434 | 1416 | ||
1417 | /* Calculate all channel mask before DMA setup */ | ||
1418 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
1419 | |||
1435 | /* force dma off, just in case */ | 1420 | /* force dma off, just in case */ |
1436 | dw_dma_off(dw); | 1421 | dw_dma_off(dw); |
1437 | 1422 | ||
1423 | /* disable BLOCK interrupts as well */ | ||
1424 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1425 | |||
1438 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | 1426 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1439 | if (err) | 1427 | if (err) |
1440 | goto err_irq; | 1428 | goto err_irq; |
@@ -1443,8 +1431,6 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1443 | 1431 | ||
1444 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1432 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1445 | 1433 | ||
1446 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
1447 | |||
1448 | INIT_LIST_HEAD(&dw->dma.channels); | 1434 | INIT_LIST_HEAD(&dw->dma.channels); |
1449 | for (i = 0; i < pdata->nr_channels; i++) { | 1435 | for (i = 0; i < pdata->nr_channels; i++) { |
1450 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1436 | struct dw_dma_chan *dwc = &dw->chan[i]; |
@@ -1474,17 +1460,13 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1474 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1460 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1475 | } | 1461 | } |
1476 | 1462 | ||
1477 | /* Clear/disable all interrupts on all channels. */ | 1463 | /* Clear all interrupts on all channels. */ |
1478 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 1464 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1465 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | ||
1479 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 1466 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1480 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 1467 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1481 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 1468 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1482 | 1469 | ||
1483 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1484 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1485 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1486 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1487 | |||
1488 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1470 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1489 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1471 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1490 | if (pdata->is_private) | 1472 | if (pdata->is_private) |
@@ -1523,7 +1505,7 @@ err_kfree: | |||
1523 | return err; | 1505 | return err; |
1524 | } | 1506 | } |
1525 | 1507 | ||
1526 | static int __exit dw_remove(struct platform_device *pdev) | 1508 | static int __devexit dw_remove(struct platform_device *pdev) |
1527 | { | 1509 | { |
1528 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1510 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1529 | struct dw_dma_chan *dwc, *_dwc; | 1511 | struct dw_dma_chan *dwc, *_dwc; |
@@ -1602,7 +1584,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table); | |||
1602 | #endif | 1584 | #endif |
1603 | 1585 | ||
1604 | static struct platform_driver dw_driver = { | 1586 | static struct platform_driver dw_driver = { |
1605 | .remove = __exit_p(dw_remove), | 1587 | .remove = __devexit_p(dw_remove), |
1606 | .shutdown = dw_shutdown, | 1588 | .shutdown = dw_shutdown, |
1607 | .driver = { | 1589 | .driver = { |
1608 | .name = "dw_dmac", | 1590 | .name = "dw_dmac", |