aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw_dmac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r--drivers/dma/dw_dmac.c228
1 files changed, 128 insertions, 100 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9b592b02b5f4..7439079f5eed 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -9,6 +9,7 @@
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12#include <linux/bitops.h>
12#include <linux/clk.h> 13#include <linux/clk.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
@@ -22,6 +23,7 @@
22#include <linux/slab.h> 23#include <linux/slab.h>
23 24
24#include "dw_dmac_regs.h" 25#include "dw_dmac_regs.h"
26#include "dmaengine.h"
25 27
26/* 28/*
27 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 29 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
@@ -33,19 +35,23 @@
33 * which does not support descriptor writeback. 35 * which does not support descriptor writeback.
34 */ 36 */
35 37
36#define DWC_DEFAULT_CTLLO(private) ({ \ 38#define DWC_DEFAULT_CTLLO(_chan) ({ \
37 struct dw_dma_slave *__slave = (private); \ 39 struct dw_dma_slave *__slave = (_chan->private); \
38 int dms = __slave ? __slave->dst_master : 0; \ 40 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
39 int sms = __slave ? __slave->src_master : 1; \ 41 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
40 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ 42 int _dms = __slave ? __slave->dst_master : 0; \
41 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ 43 int _sms = __slave ? __slave->src_master : 1; \
44 u8 _smsize = __slave ? _sconfig->src_maxburst : \
45 DW_DMA_MSIZE_16; \
46 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
47 DW_DMA_MSIZE_16; \
42 \ 48 \
43 (DWC_CTLL_DST_MSIZE(dmsize) \ 49 (DWC_CTLL_DST_MSIZE(_dmsize) \
44 | DWC_CTLL_SRC_MSIZE(smsize) \ 50 | DWC_CTLL_SRC_MSIZE(_smsize) \
45 | DWC_CTLL_LLP_D_EN \ 51 | DWC_CTLL_LLP_D_EN \
46 | DWC_CTLL_LLP_S_EN \ 52 | DWC_CTLL_LLP_S_EN \
47 | DWC_CTLL_DMS(dms) \ 53 | DWC_CTLL_DMS(_dms) \
48 | DWC_CTLL_SMS(sms)); \ 54 | DWC_CTLL_SMS(_sms)); \
49 }) 55 })
50 56
51/* 57/*
@@ -151,21 +157,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
151 } 157 }
152} 158}
153 159
154/* Called with dwc->lock held and bh disabled */
155static dma_cookie_t
156dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
157{
158 dma_cookie_t cookie = dwc->chan.cookie;
159
160 if (++cookie < 0)
161 cookie = 1;
162
163 dwc->chan.cookie = cookie;
164 desc->txd.cookie = cookie;
165
166 return cookie;
167}
168
169static void dwc_initialize(struct dw_dma_chan *dwc) 160static void dwc_initialize(struct dw_dma_chan *dwc)
170{ 161{
171 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -192,7 +183,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
192 183
193 /* Enable interrupts */ 184 /* Enable interrupts */
194 channel_set_bit(dw, MASK.XFER, dwc->mask); 185 channel_set_bit(dw, MASK.XFER, dwc->mask);
195 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
196 channel_set_bit(dw, MASK.ERROR, dwc->mask); 186 channel_set_bit(dw, MASK.ERROR, dwc->mask);
197 187
198 dwc->initialized = true; 188 dwc->initialized = true;
@@ -245,7 +235,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
245 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 235 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
246 236
247 spin_lock_irqsave(&dwc->lock, flags); 237 spin_lock_irqsave(&dwc->lock, flags);
248 dwc->completed = txd->cookie; 238 dma_cookie_complete(txd);
249 if (callback_required) { 239 if (callback_required) {
250 callback = txd->callback; 240 callback = txd->callback;
251 param = txd->callback_param; 241 param = txd->callback_param;
@@ -329,12 +319,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
329 unsigned long flags; 319 unsigned long flags;
330 320
331 spin_lock_irqsave(&dwc->lock, flags); 321 spin_lock_irqsave(&dwc->lock, flags);
332 /*
333 * Clear block interrupt flag before scanning so that we don't
334 * miss any, and read LLP before RAW_XFER to ensure it is
335 * valid if we decide to scan the list.
336 */
337 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
338 llp = channel_readl(dwc, LLP); 322 llp = channel_readl(dwc, LLP);
339 status_xfer = dma_readl(dw, RAW.XFER); 323 status_xfer = dma_readl(dw, RAW.XFER);
340 324
@@ -470,17 +454,16 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
470 454
471/* called with dwc->lock held and all DMAC interrupts disabled */ 455/* called with dwc->lock held and all DMAC interrupts disabled */
472static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 456static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
473 u32 status_block, u32 status_err, u32 status_xfer) 457 u32 status_err, u32 status_xfer)
474{ 458{
475 unsigned long flags; 459 unsigned long flags;
476 460
477 if (status_block & dwc->mask) { 461 if (dwc->mask) {
478 void (*callback)(void *param); 462 void (*callback)(void *param);
479 void *callback_param; 463 void *callback_param;
480 464
481 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 465 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
482 channel_readl(dwc, LLP)); 466 channel_readl(dwc, LLP));
483 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
484 467
485 callback = dwc->cdesc->period_callback; 468 callback = dwc->cdesc->period_callback;
486 callback_param = dwc->cdesc->period_callback_param; 469 callback_param = dwc->cdesc->period_callback_param;
@@ -520,7 +503,6 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
520 channel_writel(dwc, CTL_LO, 0); 503 channel_writel(dwc, CTL_LO, 0);
521 channel_writel(dwc, CTL_HI, 0); 504 channel_writel(dwc, CTL_HI, 0);
522 505
523 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
524 dma_writel(dw, CLEAR.ERROR, dwc->mask); 506 dma_writel(dw, CLEAR.ERROR, dwc->mask);
525 dma_writel(dw, CLEAR.XFER, dwc->mask); 507 dma_writel(dw, CLEAR.XFER, dwc->mask);
526 508
@@ -537,36 +519,29 @@ static void dw_dma_tasklet(unsigned long data)
537{ 519{
538 struct dw_dma *dw = (struct dw_dma *)data; 520 struct dw_dma *dw = (struct dw_dma *)data;
539 struct dw_dma_chan *dwc; 521 struct dw_dma_chan *dwc;
540 u32 status_block;
541 u32 status_xfer; 522 u32 status_xfer;
542 u32 status_err; 523 u32 status_err;
543 int i; 524 int i;
544 525
545 status_block = dma_readl(dw, RAW.BLOCK);
546 status_xfer = dma_readl(dw, RAW.XFER); 526 status_xfer = dma_readl(dw, RAW.XFER);
547 status_err = dma_readl(dw, RAW.ERROR); 527 status_err = dma_readl(dw, RAW.ERROR);
548 528
549 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", 529 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
550 status_block, status_err);
551 530
552 for (i = 0; i < dw->dma.chancnt; i++) { 531 for (i = 0; i < dw->dma.chancnt; i++) {
553 dwc = &dw->chan[i]; 532 dwc = &dw->chan[i];
554 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 533 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
555 dwc_handle_cyclic(dw, dwc, status_block, status_err, 534 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
556 status_xfer);
557 else if (status_err & (1 << i)) 535 else if (status_err & (1 << i))
558 dwc_handle_error(dw, dwc); 536 dwc_handle_error(dw, dwc);
559 else if ((status_block | status_xfer) & (1 << i)) 537 else if (status_xfer & (1 << i))
560 dwc_scan_descriptors(dw, dwc); 538 dwc_scan_descriptors(dw, dwc);
561 } 539 }
562 540
563 /* 541 /*
564 * Re-enable interrupts. Block Complete interrupts are only 542 * Re-enable interrupts.
565 * enabled if the INT_EN bit in the descriptor is set. This
566 * will trigger a scan before the whole list is done.
567 */ 543 */
568 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 544 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
569 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
570 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 545 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
571} 546}
572 547
@@ -583,7 +558,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
583 * softirq handler. 558 * softirq handler.
584 */ 559 */
585 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 560 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
586 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
587 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 561 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
588 562
589 status = dma_readl(dw, STATUS_INT); 563 status = dma_readl(dw, STATUS_INT);
@@ -594,7 +568,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
594 568
595 /* Try to recover */ 569 /* Try to recover */
596 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 570 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
597 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
598 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 571 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
599 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 572 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
600 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 573 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -615,7 +588,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
615 unsigned long flags; 588 unsigned long flags;
616 589
617 spin_lock_irqsave(&dwc->lock, flags); 590 spin_lock_irqsave(&dwc->lock, flags);
618 cookie = dwc_assign_cookie(dwc, desc); 591 cookie = dma_cookie_assign(tx);
619 592
620 /* 593 /*
621 * REVISIT: We should attempt to chain as many descriptors as 594 * REVISIT: We should attempt to chain as many descriptors as
@@ -674,7 +647,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
674 else 647 else
675 src_width = dst_width = 0; 648 src_width = dst_width = 0;
676 649
677 ctllo = DWC_DEFAULT_CTLLO(chan->private) 650 ctllo = DWC_DEFAULT_CTLLO(chan)
678 | DWC_CTLL_DST_WIDTH(dst_width) 651 | DWC_CTLL_DST_WIDTH(dst_width)
679 | DWC_CTLL_SRC_WIDTH(src_width) 652 | DWC_CTLL_SRC_WIDTH(src_width)
680 | DWC_CTLL_DST_INC 653 | DWC_CTLL_DST_INC
@@ -731,10 +704,11 @@ err_desc_get:
731static struct dma_async_tx_descriptor * 704static struct dma_async_tx_descriptor *
732dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 705dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
733 unsigned int sg_len, enum dma_transfer_direction direction, 706 unsigned int sg_len, enum dma_transfer_direction direction,
734 unsigned long flags) 707 unsigned long flags, void *context)
735{ 708{
736 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 709 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
737 struct dw_dma_slave *dws = chan->private; 710 struct dw_dma_slave *dws = chan->private;
711 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
738 struct dw_desc *prev; 712 struct dw_desc *prev;
739 struct dw_desc *first; 713 struct dw_desc *first;
740 u32 ctllo; 714 u32 ctllo;
@@ -750,25 +724,34 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
750 if (unlikely(!dws || !sg_len)) 724 if (unlikely(!dws || !sg_len))
751 return NULL; 725 return NULL;
752 726
753 reg_width = dws->reg_width;
754 prev = first = NULL; 727 prev = first = NULL;
755 728
756 switch (direction) { 729 switch (direction) {
757 case DMA_MEM_TO_DEV: 730 case DMA_MEM_TO_DEV:
758 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 731 reg_width = __fls(sconfig->dst_addr_width);
732 reg = sconfig->dst_addr;
733 ctllo = (DWC_DEFAULT_CTLLO(chan)
759 | DWC_CTLL_DST_WIDTH(reg_width) 734 | DWC_CTLL_DST_WIDTH(reg_width)
760 | DWC_CTLL_DST_FIX 735 | DWC_CTLL_DST_FIX
761 | DWC_CTLL_SRC_INC 736 | DWC_CTLL_SRC_INC);
762 | DWC_CTLL_FC(dws->fc)); 737
763 reg = dws->tx_reg; 738 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
739 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
740
764 for_each_sg(sgl, sg, sg_len, i) { 741 for_each_sg(sgl, sg, sg_len, i) {
765 struct dw_desc *desc; 742 struct dw_desc *desc;
766 u32 len, dlen, mem; 743 u32 len, dlen, mem;
767 744
768 mem = sg_phys(sg); 745 mem = sg_phys(sg);
769 len = sg_dma_len(sg); 746 len = sg_dma_len(sg);
770 mem_width = 2; 747
771 if (unlikely(mem & 3 || len & 3)) 748 if (!((mem | len) & 7))
749 mem_width = 3;
750 else if (!((mem | len) & 3))
751 mem_width = 2;
752 else if (!((mem | len) & 1))
753 mem_width = 1;
754 else
772 mem_width = 0; 755 mem_width = 0;
773 756
774slave_sg_todev_fill_desc: 757slave_sg_todev_fill_desc:
@@ -812,21 +795,30 @@ slave_sg_todev_fill_desc:
812 } 795 }
813 break; 796 break;
814 case DMA_DEV_TO_MEM: 797 case DMA_DEV_TO_MEM:
815 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 798 reg_width = __fls(sconfig->src_addr_width);
799 reg = sconfig->src_addr;
800 ctllo = (DWC_DEFAULT_CTLLO(chan)
816 | DWC_CTLL_SRC_WIDTH(reg_width) 801 | DWC_CTLL_SRC_WIDTH(reg_width)
817 | DWC_CTLL_DST_INC 802 | DWC_CTLL_DST_INC
818 | DWC_CTLL_SRC_FIX 803 | DWC_CTLL_SRC_FIX);
819 | DWC_CTLL_FC(dws->fc)); 804
805 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
806 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
820 807
821 reg = dws->rx_reg;
822 for_each_sg(sgl, sg, sg_len, i) { 808 for_each_sg(sgl, sg, sg_len, i) {
823 struct dw_desc *desc; 809 struct dw_desc *desc;
824 u32 len, dlen, mem; 810 u32 len, dlen, mem;
825 811
826 mem = sg_phys(sg); 812 mem = sg_phys(sg);
827 len = sg_dma_len(sg); 813 len = sg_dma_len(sg);
828 mem_width = 2; 814
829 if (unlikely(mem & 3 || len & 3)) 815 if (!((mem | len) & 7))
816 mem_width = 3;
817 else if (!((mem | len) & 3))
818 mem_width = 2;
819 else if (!((mem | len) & 1))
820 mem_width = 1;
821 else
830 mem_width = 0; 822 mem_width = 0;
831 823
832slave_sg_fromdev_fill_desc: 824slave_sg_fromdev_fill_desc:
@@ -890,6 +882,39 @@ err_desc_get:
890 return NULL; 882 return NULL;
891} 883}
892 884
885/*
886 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
887 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
888 *
889 * NOTE: burst size 2 is not supported by controller.
890 *
891 * This can be done by finding least significant bit set: n & (n - 1)
892 */
893static inline void convert_burst(u32 *maxburst)
894{
895 if (*maxburst > 1)
896 *maxburst = fls(*maxburst) - 2;
897 else
898 *maxburst = 0;
899}
900
901static int
902set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
903{
904 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
905
906 /* Check if it is chan is configured for slave transfers */
907 if (!chan->private)
908 return -EINVAL;
909
910 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
911
912 convert_burst(&dwc->dma_sconfig.src_maxburst);
913 convert_burst(&dwc->dma_sconfig.dst_maxburst);
914
915 return 0;
916}
917
893static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 918static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
894 unsigned long arg) 919 unsigned long arg)
895{ 920{
@@ -939,8 +964,11 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
939 /* Flush all pending and queued descriptors */ 964 /* Flush all pending and queued descriptors */
940 list_for_each_entry_safe(desc, _desc, &list, desc_node) 965 list_for_each_entry_safe(desc, _desc, &list, desc_node)
941 dwc_descriptor_complete(dwc, desc, false); 966 dwc_descriptor_complete(dwc, desc, false);
942 } else 967 } else if (cmd == DMA_SLAVE_CONFIG) {
968 return set_runtime_config(chan, (struct dma_slave_config *)arg);
969 } else {
943 return -ENXIO; 970 return -ENXIO;
971 }
944 972
945 return 0; 973 return 0;
946} 974}
@@ -951,28 +979,17 @@ dwc_tx_status(struct dma_chan *chan,
951 struct dma_tx_state *txstate) 979 struct dma_tx_state *txstate)
952{ 980{
953 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 981 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
954 dma_cookie_t last_used; 982 enum dma_status ret;
955 dma_cookie_t last_complete;
956 int ret;
957
958 last_complete = dwc->completed;
959 last_used = chan->cookie;
960 983
961 ret = dma_async_is_complete(cookie, last_complete, last_used); 984 ret = dma_cookie_status(chan, cookie, txstate);
962 if (ret != DMA_SUCCESS) { 985 if (ret != DMA_SUCCESS) {
963 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 986 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
964 987
965 last_complete = dwc->completed; 988 ret = dma_cookie_status(chan, cookie, txstate);
966 last_used = chan->cookie;
967
968 ret = dma_async_is_complete(cookie, last_complete, last_used);
969 } 989 }
970 990
971 if (ret != DMA_SUCCESS) 991 if (ret != DMA_SUCCESS)
972 dma_set_tx_state(txstate, last_complete, last_used, 992 dma_set_residue(txstate, dwc_first_active(dwc)->len);
973 dwc_first_active(dwc)->len);
974 else
975 dma_set_tx_state(txstate, last_complete, last_used, 0);
976 993
977 if (dwc->paused) 994 if (dwc->paused)
978 return DMA_PAUSED; 995 return DMA_PAUSED;
@@ -1004,7 +1021,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1004 return -EIO; 1021 return -EIO;
1005 } 1022 }
1006 1023
1007 dwc->completed = chan->cookie = 1; 1024 dma_cookie_init(chan);
1008 1025
1009 /* 1026 /*
1010 * NOTE: some controllers may have additional features that we 1027 * NOTE: some controllers may have additional features that we
@@ -1068,7 +1085,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1068 1085
1069 /* Disable interrupts */ 1086 /* Disable interrupts */
1070 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1087 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1071 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1072 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1088 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1073 1089
1074 spin_unlock_irqrestore(&dwc->lock, flags); 1090 spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1120,7 +1136,6 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1120 return -EBUSY; 1136 return -EBUSY;
1121 } 1137 }
1122 1138
1123 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1124 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1139 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1125 dma_writel(dw, CLEAR.XFER, dwc->mask); 1140 dma_writel(dw, CLEAR.XFER, dwc->mask);
1126 1141
@@ -1175,11 +1190,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1175 enum dma_transfer_direction direction) 1190 enum dma_transfer_direction direction)
1176{ 1191{
1177 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1192 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1193 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1178 struct dw_cyclic_desc *cdesc; 1194 struct dw_cyclic_desc *cdesc;
1179 struct dw_cyclic_desc *retval = NULL; 1195 struct dw_cyclic_desc *retval = NULL;
1180 struct dw_desc *desc; 1196 struct dw_desc *desc;
1181 struct dw_desc *last = NULL; 1197 struct dw_desc *last = NULL;
1182 struct dw_dma_slave *dws = chan->private;
1183 unsigned long was_cyclic; 1198 unsigned long was_cyclic;
1184 unsigned int reg_width; 1199 unsigned int reg_width;
1185 unsigned int periods; 1200 unsigned int periods;
@@ -1203,7 +1218,12 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1203 } 1218 }
1204 1219
1205 retval = ERR_PTR(-EINVAL); 1220 retval = ERR_PTR(-EINVAL);
1206 reg_width = dws->reg_width; 1221
1222 if (direction == DMA_MEM_TO_DEV)
1223 reg_width = __ffs(sconfig->dst_addr_width);
1224 else
1225 reg_width = __ffs(sconfig->src_addr_width);
1226
1207 periods = buf_len / period_len; 1227 periods = buf_len / period_len;
1208 1228
1209 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1229 /* Check for too big/unaligned periods and unaligned DMA buffer. */
@@ -1236,26 +1256,34 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1236 1256
1237 switch (direction) { 1257 switch (direction) {
1238 case DMA_MEM_TO_DEV: 1258 case DMA_MEM_TO_DEV:
1239 desc->lli.dar = dws->tx_reg; 1259 desc->lli.dar = sconfig->dst_addr;
1240 desc->lli.sar = buf_addr + (period_len * i); 1260 desc->lli.sar = buf_addr + (period_len * i);
1241 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1261 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1242 | DWC_CTLL_DST_WIDTH(reg_width) 1262 | DWC_CTLL_DST_WIDTH(reg_width)
1243 | DWC_CTLL_SRC_WIDTH(reg_width) 1263 | DWC_CTLL_SRC_WIDTH(reg_width)
1244 | DWC_CTLL_DST_FIX 1264 | DWC_CTLL_DST_FIX
1245 | DWC_CTLL_SRC_INC 1265 | DWC_CTLL_SRC_INC
1246 | DWC_CTLL_FC(dws->fc)
1247 | DWC_CTLL_INT_EN); 1266 | DWC_CTLL_INT_EN);
1267
1268 desc->lli.ctllo |= sconfig->device_fc ?
1269 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1270 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1271
1248 break; 1272 break;
1249 case DMA_DEV_TO_MEM: 1273 case DMA_DEV_TO_MEM:
1250 desc->lli.dar = buf_addr + (period_len * i); 1274 desc->lli.dar = buf_addr + (period_len * i);
1251 desc->lli.sar = dws->rx_reg; 1275 desc->lli.sar = sconfig->src_addr;
1252 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1276 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1253 | DWC_CTLL_SRC_WIDTH(reg_width) 1277 | DWC_CTLL_SRC_WIDTH(reg_width)
1254 | DWC_CTLL_DST_WIDTH(reg_width) 1278 | DWC_CTLL_DST_WIDTH(reg_width)
1255 | DWC_CTLL_DST_INC 1279 | DWC_CTLL_DST_INC
1256 | DWC_CTLL_SRC_FIX 1280 | DWC_CTLL_SRC_FIX
1257 | DWC_CTLL_FC(dws->fc)
1258 | DWC_CTLL_INT_EN); 1281 | DWC_CTLL_INT_EN);
1282
1283 desc->lli.ctllo |= sconfig->device_fc ?
1284 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1285 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1286
1259 break; 1287 break;
1260 default: 1288 default:
1261 break; 1289 break;
@@ -1322,7 +1350,6 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1322 while (dma_readl(dw, CH_EN) & dwc->mask) 1350 while (dma_readl(dw, CH_EN) & dwc->mask)
1323 cpu_relax(); 1351 cpu_relax();
1324 1352
1325 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1326 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1353 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1327 dma_writel(dw, CLEAR.XFER, dwc->mask); 1354 dma_writel(dw, CLEAR.XFER, dwc->mask);
1328 1355
@@ -1347,7 +1374,6 @@ static void dw_dma_off(struct dw_dma *dw)
1347 dma_writel(dw, CFG, 0); 1374 dma_writel(dw, CFG, 0);
1348 1375
1349 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1376 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1350 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1351 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1377 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1352 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1378 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1353 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1379 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1369,7 +1395,7 @@ static int __init dw_probe(struct platform_device *pdev)
1369 int err; 1395 int err;
1370 int i; 1396 int i;
1371 1397
1372 pdata = pdev->dev.platform_data; 1398 pdata = dev_get_platdata(&pdev->dev);
1373 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) 1399 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1374 return -EINVAL; 1400 return -EINVAL;
1375 1401
@@ -1423,7 +1449,7 @@ static int __init dw_probe(struct platform_device *pdev)
1423 struct dw_dma_chan *dwc = &dw->chan[i]; 1449 struct dw_dma_chan *dwc = &dw->chan[i];
1424 1450
1425 dwc->chan.device = &dw->dma; 1451 dwc->chan.device = &dw->dma;
1426 dwc->chan.cookie = dwc->completed = 1; 1452 dma_cookie_init(&dwc->chan);
1427 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1453 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1428 list_add_tail(&dwc->chan.device_node, 1454 list_add_tail(&dwc->chan.device_node,
1429 &dw->dma.channels); 1455 &dw->dma.channels);
@@ -1432,7 +1458,7 @@ static int __init dw_probe(struct platform_device *pdev)
1432 1458
1433 /* 7 is highest priority & 0 is lowest. */ 1459 /* 7 is highest priority & 0 is lowest. */
1434 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1460 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1435 dwc->priority = 7 - i; 1461 dwc->priority = pdata->nr_channels - i - 1;
1436 else 1462 else
1437 dwc->priority = i; 1463 dwc->priority = i;
1438 1464
@@ -1449,13 +1475,11 @@ static int __init dw_probe(struct platform_device *pdev)
1449 1475
1450 /* Clear/disable all interrupts on all channels. */ 1476 /* Clear/disable all interrupts on all channels. */
1451 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1477 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1452 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1453 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1478 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1454 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1479 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1455 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1480 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1456 1481
1457 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1482 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1458 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1459 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1483 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1460 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1484 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1461 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1485 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1562,6 +1586,10 @@ static int dw_resume_noirq(struct device *dev)
1562static const struct dev_pm_ops dw_dev_pm_ops = { 1586static const struct dev_pm_ops dw_dev_pm_ops = {
1563 .suspend_noirq = dw_suspend_noirq, 1587 .suspend_noirq = dw_suspend_noirq,
1564 .resume_noirq = dw_resume_noirq, 1588 .resume_noirq = dw_resume_noirq,
1589 .freeze_noirq = dw_suspend_noirq,
1590 .thaw_noirq = dw_resume_noirq,
1591 .restore_noirq = dw_resume_noirq,
1592 .poweroff_noirq = dw_suspend_noirq,
1565}; 1593};
1566 1594
1567static struct platform_driver dw_driver = { 1595static struct platform_driver dw_driver = {