aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-08-01 19:46:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-08-01 19:46:37 -0400
commit12ff47e7f5fb64c566f62e6cf6a3b291c51bd337 (patch)
treed9fba3780142af380ccfaf90d8b13363e3475bd7 /drivers/dma
parent73bcbac130a59f236ae78ed70ef7a05b45caa19e (diff)
parent1ae105aa7416087f2920c35c3cd16831d0d09c9c (diff)
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (37 commits) Improve slave/cyclic DMA engine documentation dmaengine: pl08x: handle the rest of enums in pl08x_width DMA: PL08x: cleanup selection of burst size DMA: PL08x: avoid recalculating cctl at each prepare DMA: PL08x: cleanup selection of buswidth DMA: PL08x: constify plchan->cd and plat->slave_channels DMA: PL08x: separately store source/destination cctl DMA: PL08x: separately store source/destination slave address DMA: PL08x: clean up LLI debugging DMA: PL08x: select LLI bus only once per LLI setup DMA: PL08x: remove unused constants ARM: mxs-dma: reset after disable channel dma: intel_mid_dma: remove redundant pci_set_drvdata calls dma: mxs-dma: fix unterminated platform_device_id table dmaengine: pl330: make platform data optional dmaengine: imx-sdma: return proper error if kzalloc fails pch_dma: Fix CTL register access issue dmaengine: mxs-dma: skip request_irq for NO_IRQ dmaengine/coh901318: fix slave submission semantics dmaengine/ste_dma40: allow memory buswidth/burst to be configured ... Fix trivial whitespace conflict in drivers/dma/mv_xor.c
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c246
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c19
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c3
-rw-r--r--drivers/dma/mxs-dma.c13
-rw-r--r--drivers/dma/pch_dma.c127
-rw-r--r--drivers/dma/pl330.c64
-rw-r--r--drivers/dma/ste_dma40.c270
-rw-r--r--drivers/dma/ste_dma40_ll.h3
15 files changed, 442 insertions, 326 deletions
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index a4af8589330c..734ed0206cd5 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -9,6 +9,5 @@ TODO for slave dma
9 - mxs-dma.c 9 - mxs-dma.c
10 - dw_dmac 10 - dw_dmac
11 - intel_mid_dma 11 - intel_mid_dma
12 - ste_dma40
134. Check other subsystems for dma drivers and merge/move to dmaengine 124. Check other subsystems for dma drivers and merge/move to dmaengine
145. Remove dma_slave_config's dma direction. 135. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e6d7228b1479..196a7378d332 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -156,14 +156,10 @@ struct pl08x_driver_data {
156#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 156#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
157#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 157#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
158 158
159/* Minimum period between work queue runs */
160#define PL08X_WQ_PERIODMIN 20
161
162/* Size (bytes) of each LLI buffer allocated for one transfer */ 159/* Size (bytes) of each LLI buffer allocated for one transfer */
163# define PL08X_LLI_TSFR_SIZE 0x2000 160# define PL08X_LLI_TSFR_SIZE 0x2000
164 161
165/* Maximum times we call dma_pool_alloc on this pool without freeing */ 162/* Maximum times we call dma_pool_alloc on this pool without freeing */
166#define PL08X_MAX_ALLOCS 0x40
167#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 163#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
168#define PL08X_ALIGN 8 164#define PL08X_ALIGN 8
169 165
@@ -495,10 +491,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
495 491
496struct pl08x_lli_build_data { 492struct pl08x_lli_build_data {
497 struct pl08x_txd *txd; 493 struct pl08x_txd *txd;
498 struct pl08x_driver_data *pl08x;
499 struct pl08x_bus_data srcbus; 494 struct pl08x_bus_data srcbus;
500 struct pl08x_bus_data dstbus; 495 struct pl08x_bus_data dstbus;
501 size_t remainder; 496 size_t remainder;
497 u32 lli_bus;
502}; 498};
503 499
504/* 500/*
@@ -551,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
551 llis_va[num_llis].src = bd->srcbus.addr; 547 llis_va[num_llis].src = bd->srcbus.addr;
552 llis_va[num_llis].dst = bd->dstbus.addr; 548 llis_va[num_llis].dst = bd->dstbus.addr;
553 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 549 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
554 if (bd->pl08x->lli_buses & PL08X_AHB2) 550 llis_va[num_llis].lli |= bd->lli_bus;
555 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
556 551
557 if (cctl & PL080_CONTROL_SRC_INCR) 552 if (cctl & PL080_CONTROL_SRC_INCR)
558 bd->srcbus.addr += len; 553 bd->srcbus.addr += len;
@@ -605,9 +600,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
605 cctl = txd->cctl; 600 cctl = txd->cctl;
606 601
607 bd.txd = txd; 602 bd.txd = txd;
608 bd.pl08x = pl08x;
609 bd.srcbus.addr = txd->src_addr; 603 bd.srcbus.addr = txd->src_addr;
610 bd.dstbus.addr = txd->dst_addr; 604 bd.dstbus.addr = txd->dst_addr;
605 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
611 606
612 /* Find maximum width of the source bus */ 607 /* Find maximum width of the source bus */
613 bd.srcbus.maxwidth = 608 bd.srcbus.maxwidth =
@@ -622,25 +617,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
622 /* Set up the bus widths to the maximum */ 617 /* Set up the bus widths to the maximum */
623 bd.srcbus.buswidth = bd.srcbus.maxwidth; 618 bd.srcbus.buswidth = bd.srcbus.maxwidth;
624 bd.dstbus.buswidth = bd.dstbus.maxwidth; 619 bd.dstbus.buswidth = bd.dstbus.maxwidth;
625 dev_vdbg(&pl08x->adev->dev,
626 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
627 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
628
629 620
630 /* 621 /*
631 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 622 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
632 */ 623 */
633 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 624 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
634 PL080_CONTROL_TRANSFER_SIZE_MASK; 625 PL080_CONTROL_TRANSFER_SIZE_MASK;
635 dev_vdbg(&pl08x->adev->dev,
636 "%s max bytes per lli = %zu\n",
637 __func__, max_bytes_per_lli);
638 626
639 /* We need to count this down to zero */ 627 /* We need to count this down to zero */
640 bd.remainder = txd->len; 628 bd.remainder = txd->len;
641 dev_vdbg(&pl08x->adev->dev,
642 "%s remainder = %zu\n",
643 __func__, bd.remainder);
644 629
645 /* 630 /*
646 * Choose bus to align to 631 * Choose bus to align to
@@ -649,6 +634,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
649 */ 634 */
650 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 635 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
651 636
637 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
638 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
639 bd.srcbus.buswidth,
640 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
641 bd.dstbus.buswidth,
642 bd.remainder, max_bytes_per_lli);
643 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
644 mbus == &bd.srcbus ? "src" : "dst",
645 sbus == &bd.srcbus ? "src" : "dst");
646
652 if (txd->len < mbus->buswidth) { 647 if (txd->len < mbus->buswidth) {
653 /* Less than a bus width available - send as single bytes */ 648 /* Less than a bus width available - send as single bytes */
654 while (bd.remainder) { 649 while (bd.remainder) {
@@ -840,15 +835,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
840 { 835 {
841 int i; 836 int i;
842 837
838 dev_vdbg(&pl08x->adev->dev,
839 "%-3s %-9s %-10s %-10s %-10s %s\n",
840 "lli", "", "csrc", "cdst", "clli", "cctl");
843 for (i = 0; i < num_llis; i++) { 841 for (i = 0; i < num_llis; i++) {
844 dev_vdbg(&pl08x->adev->dev, 842 dev_vdbg(&pl08x->adev->dev,
845 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", 843 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
846 i, 844 i, &llis_va[i], llis_va[i].src,
847 &llis_va[i], 845 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
848 llis_va[i].src,
849 llis_va[i].dst,
850 llis_va[i].cctl,
851 llis_va[i].lli
852 ); 846 );
853 } 847 }
854 } 848 }
@@ -1054,64 +1048,105 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1054 1048
1055/* PrimeCell DMA extension */ 1049/* PrimeCell DMA extension */
1056struct burst_table { 1050struct burst_table {
1057 int burstwords; 1051 u32 burstwords;
1058 u32 reg; 1052 u32 reg;
1059}; 1053};
1060 1054
1061static const struct burst_table burst_sizes[] = { 1055static const struct burst_table burst_sizes[] = {
1062 { 1056 {
1063 .burstwords = 256, 1057 .burstwords = 256,
1064 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | 1058 .reg = PL080_BSIZE_256,
1065 (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
1066 }, 1059 },
1067 { 1060 {
1068 .burstwords = 128, 1061 .burstwords = 128,
1069 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | 1062 .reg = PL080_BSIZE_128,
1070 (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
1071 }, 1063 },
1072 { 1064 {
1073 .burstwords = 64, 1065 .burstwords = 64,
1074 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | 1066 .reg = PL080_BSIZE_64,
1075 (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
1076 }, 1067 },
1077 { 1068 {
1078 .burstwords = 32, 1069 .burstwords = 32,
1079 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | 1070 .reg = PL080_BSIZE_32,
1080 (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
1081 }, 1071 },
1082 { 1072 {
1083 .burstwords = 16, 1073 .burstwords = 16,
1084 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | 1074 .reg = PL080_BSIZE_16,
1085 (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
1086 }, 1075 },
1087 { 1076 {
1088 .burstwords = 8, 1077 .burstwords = 8,
1089 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | 1078 .reg = PL080_BSIZE_8,
1090 (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
1091 }, 1079 },
1092 { 1080 {
1093 .burstwords = 4, 1081 .burstwords = 4,
1094 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | 1082 .reg = PL080_BSIZE_4,
1095 (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
1096 }, 1083 },
1097 { 1084 {
1098 .burstwords = 1, 1085 .burstwords = 0,
1099 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1086 .reg = PL080_BSIZE_1,
1100 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
1101 }, 1087 },
1102}; 1088};
1103 1089
1090/*
1091 * Given the source and destination available bus masks, select which
1092 * will be routed to each port. We try to have source and destination
1093 * on separate ports, but always respect the allowable settings.
1094 */
1095static u32 pl08x_select_bus(u8 src, u8 dst)
1096{
1097 u32 cctl = 0;
1098
1099 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1100 cctl |= PL080_CONTROL_DST_AHB2;
1101 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1102 cctl |= PL080_CONTROL_SRC_AHB2;
1103
1104 return cctl;
1105}
1106
1107static u32 pl08x_cctl(u32 cctl)
1108{
1109 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1110 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1111 PL080_CONTROL_PROT_MASK);
1112
1113 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1114 return cctl | PL080_CONTROL_PROT_SYS;
1115}
1116
1117static u32 pl08x_width(enum dma_slave_buswidth width)
1118{
1119 switch (width) {
1120 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1121 return PL080_WIDTH_8BIT;
1122 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1123 return PL080_WIDTH_16BIT;
1124 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1125 return PL080_WIDTH_32BIT;
1126 default:
1127 return ~0;
1128 }
1129}
1130
1131static u32 pl08x_burst(u32 maxburst)
1132{
1133 int i;
1134
1135 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1136 if (burst_sizes[i].burstwords <= maxburst)
1137 break;
1138
1139 return burst_sizes[i].reg;
1140}
1141
1104static int dma_set_runtime_config(struct dma_chan *chan, 1142static int dma_set_runtime_config(struct dma_chan *chan,
1105 struct dma_slave_config *config) 1143 struct dma_slave_config *config)
1106{ 1144{
1107 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1145 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1108 struct pl08x_driver_data *pl08x = plchan->host; 1146 struct pl08x_driver_data *pl08x = plchan->host;
1109 struct pl08x_channel_data *cd = plchan->cd;
1110 enum dma_slave_buswidth addr_width; 1147 enum dma_slave_buswidth addr_width;
1111 dma_addr_t addr; 1148 u32 width, burst, maxburst;
1112 u32 maxburst;
1113 u32 cctl = 0; 1149 u32 cctl = 0;
1114 int i;
1115 1150
1116 if (!plchan->slave) 1151 if (!plchan->slave)
1117 return -EINVAL; 1152 return -EINVAL;
@@ -1119,11 +1154,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1119 /* Transfer direction */ 1154 /* Transfer direction */
1120 plchan->runtime_direction = config->direction; 1155 plchan->runtime_direction = config->direction;
1121 if (config->direction == DMA_TO_DEVICE) { 1156 if (config->direction == DMA_TO_DEVICE) {
1122 addr = config->dst_addr;
1123 addr_width = config->dst_addr_width; 1157 addr_width = config->dst_addr_width;
1124 maxburst = config->dst_maxburst; 1158 maxburst = config->dst_maxburst;
1125 } else if (config->direction == DMA_FROM_DEVICE) { 1159 } else if (config->direction == DMA_FROM_DEVICE) {
1126 addr = config->src_addr;
1127 addr_width = config->src_addr_width; 1160 addr_width = config->src_addr_width;
1128 maxburst = config->src_maxburst; 1161 maxburst = config->src_maxburst;
1129 } else { 1162 } else {
@@ -1132,46 +1165,40 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1132 return -EINVAL; 1165 return -EINVAL;
1133 } 1166 }
1134 1167
1135 switch (addr_width) { 1168 width = pl08x_width(addr_width);
1136 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1169 if (width == ~0) {
1137 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1138 (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
1139 break;
1140 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1141 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1142 (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
1143 break;
1144 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1145 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1146 (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
1147 break;
1148 default:
1149 dev_err(&pl08x->adev->dev, 1170 dev_err(&pl08x->adev->dev,
1150 "bad runtime_config: alien address width\n"); 1171 "bad runtime_config: alien address width\n");
1151 return -EINVAL; 1172 return -EINVAL;
1152 } 1173 }
1153 1174
1175 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1176 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1177
1154 /* 1178 /*
1155 * Now decide on a maxburst:
1156 * If this channel will only request single transfers, set this 1179 * If this channel will only request single transfers, set this
1157 * down to ONE element. Also select one element if no maxburst 1180 * down to ONE element. Also select one element if no maxburst
1158 * is specified. 1181 * is specified.
1159 */ 1182 */
1160 if (plchan->cd->single || maxburst == 0) { 1183 if (plchan->cd->single)
1161 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1184 maxburst = 1;
1162 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1185
1186 burst = pl08x_burst(maxburst);
1187 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1188 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1189
1190 if (plchan->runtime_direction == DMA_FROM_DEVICE) {
1191 plchan->src_addr = config->src_addr;
1192 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
1193 pl08x_select_bus(plchan->cd->periph_buses,
1194 pl08x->mem_buses);
1163 } else { 1195 } else {
1164 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1196 plchan->dst_addr = config->dst_addr;
1165 if (burst_sizes[i].burstwords <= maxburst) 1197 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1166 break; 1198 pl08x_select_bus(pl08x->mem_buses,
1167 cctl |= burst_sizes[i].reg; 1199 plchan->cd->periph_buses);
1168 } 1200 }
1169 1201
1170 plchan->runtime_addr = addr;
1171
1172 /* Modify the default channel data to fit PrimeCell request */
1173 cd->cctl = cctl;
1174
1175 dev_dbg(&pl08x->adev->dev, 1202 dev_dbg(&pl08x->adev->dev,
1176 "configured channel %s (%s) for %s, data width %d, " 1203 "configured channel %s (%s) for %s, data width %d, "
1177 "maxburst %d words, LE, CCTL=0x%08x\n", 1204 "maxburst %d words, LE, CCTL=0x%08x\n",
@@ -1270,23 +1297,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1270 return 0; 1297 return 0;
1271} 1298}
1272 1299
1273/*
1274 * Given the source and destination available bus masks, select which
1275 * will be routed to each port. We try to have source and destination
1276 * on separate ports, but always respect the allowable settings.
1277 */
1278static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
1279{
1280 u32 cctl = 0;
1281
1282 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1283 cctl |= PL080_CONTROL_DST_AHB2;
1284 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1285 cctl |= PL080_CONTROL_SRC_AHB2;
1286
1287 return cctl;
1288}
1289
1290static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1300static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1291 unsigned long flags) 1301 unsigned long flags)
1292{ 1302{
@@ -1338,8 +1348,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1338 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1348 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1339 1349
1340 if (pl08x->vd->dualmaster) 1350 if (pl08x->vd->dualmaster)
1341 txd->cctl |= pl08x_select_bus(pl08x, 1351 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1342 pl08x->mem_buses, pl08x->mem_buses); 1352 pl08x->mem_buses);
1343 1353
1344 ret = pl08x_prep_channel_resources(plchan, txd); 1354 ret = pl08x_prep_channel_resources(plchan, txd);
1345 if (ret) 1355 if (ret)
@@ -1356,7 +1366,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1356 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1366 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1357 struct pl08x_driver_data *pl08x = plchan->host; 1367 struct pl08x_driver_data *pl08x = plchan->host;
1358 struct pl08x_txd *txd; 1368 struct pl08x_txd *txd;
1359 u8 src_buses, dst_buses;
1360 int ret; 1369 int ret;
1361 1370
1362 /* 1371 /*
@@ -1390,42 +1399,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1390 txd->direction = direction; 1399 txd->direction = direction;
1391 txd->len = sgl->length; 1400 txd->len = sgl->length;
1392 1401
1393 txd->cctl = plchan->cd->cctl &
1394 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1395 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1396 PL080_CONTROL_PROT_MASK);
1397
1398 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1399 txd->cctl |= PL080_CONTROL_PROT_SYS;
1400
1401 if (direction == DMA_TO_DEVICE) { 1402 if (direction == DMA_TO_DEVICE) {
1402 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1403 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1403 txd->cctl |= PL080_CONTROL_SRC_INCR; 1404 txd->cctl = plchan->dst_cctl;
1404 txd->src_addr = sgl->dma_address; 1405 txd->src_addr = sgl->dma_address;
1405 if (plchan->runtime_addr) 1406 txd->dst_addr = plchan->dst_addr;
1406 txd->dst_addr = plchan->runtime_addr;
1407 else
1408 txd->dst_addr = plchan->cd->addr;
1409 src_buses = pl08x->mem_buses;
1410 dst_buses = plchan->cd->periph_buses;
1411 } else if (direction == DMA_FROM_DEVICE) { 1407 } else if (direction == DMA_FROM_DEVICE) {
1412 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1408 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1413 txd->cctl |= PL080_CONTROL_DST_INCR; 1409 txd->cctl = plchan->src_cctl;
1414 if (plchan->runtime_addr) 1410 txd->src_addr = plchan->src_addr;
1415 txd->src_addr = plchan->runtime_addr;
1416 else
1417 txd->src_addr = plchan->cd->addr;
1418 txd->dst_addr = sgl->dma_address; 1411 txd->dst_addr = sgl->dma_address;
1419 src_buses = plchan->cd->periph_buses;
1420 dst_buses = pl08x->mem_buses;
1421 } else { 1412 } else {
1422 dev_err(&pl08x->adev->dev, 1413 dev_err(&pl08x->adev->dev,
1423 "%s direction unsupported\n", __func__); 1414 "%s direction unsupported\n", __func__);
1424 return NULL; 1415 return NULL;
1425 } 1416 }
1426 1417
1427 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
1428
1429 ret = pl08x_prep_channel_resources(plchan, txd); 1418 ret = pl08x_prep_channel_resources(plchan, txd);
1430 if (ret) 1419 if (ret)
1431 return NULL; 1420 return NULL;
@@ -1676,6 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1676 return mask ? IRQ_HANDLED : IRQ_NONE; 1665 return mask ? IRQ_HANDLED : IRQ_NONE;
1677} 1666}
1678 1667
1668static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1669{
1670 u32 cctl = pl08x_cctl(chan->cd->cctl);
1671
1672 chan->slave = true;
1673 chan->name = chan->cd->bus_id;
1674 chan->src_addr = chan->cd->addr;
1675 chan->dst_addr = chan->cd->addr;
1676 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1677 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1678 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1679 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1680}
1681
1679/* 1682/*
1680 * Initialise the DMAC memcpy/slave channels. 1683 * Initialise the DMAC memcpy/slave channels.
1681 * Make a local wrapper to hold required data 1684 * Make a local wrapper to hold required data
@@ -1707,9 +1710,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1707 chan->state = PL08X_CHAN_IDLE; 1710 chan->state = PL08X_CHAN_IDLE;
1708 1711
1709 if (slave) { 1712 if (slave) {
1710 chan->slave = true;
1711 chan->name = pl08x->pd->slave_channels[i].bus_id;
1712 chan->cd = &pl08x->pd->slave_channels[i]; 1713 chan->cd = &pl08x->pd->slave_channels[i];
1714 pl08x_dma_slave_init(chan);
1713 } else { 1715 } else {
1714 chan->cd = &pl08x->pd->memcpy_channel; 1716 chan->cd = &pl08x->pd->memcpy_channel;
1715 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1717 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 36144f88d718..6a483eac7b3f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1216 atdma->dma_common.cap_mask = pdata->cap_mask; 1216 atdma->dma_common.cap_mask = pdata->cap_mask;
1217 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; 1217 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1218 1218
1219 size = io->end - io->start + 1; 1219 size = resource_size(io);
1220 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1220 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1221 err = -EBUSY; 1221 err = -EBUSY;
1222 goto err_kfree; 1222 goto err_kfree;
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
1362 atdma->regs = NULL; 1362 atdma->regs = NULL;
1363 1363
1364 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1364 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365 release_mem_region(io->start, io->end - io->start + 1); 1365 release_mem_region(io->start, resource_size(io));
1366 1366
1367 kfree(atdma); 1367 kfree(atdma);
1368 1368
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a92d95eac86b..4234f416ef11 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -41,6 +41,8 @@ struct coh901318_desc {
41 struct coh901318_lli *lli; 41 struct coh901318_lli *lli;
42 enum dma_data_direction dir; 42 enum dma_data_direction dir;
43 unsigned long flags; 43 unsigned long flags;
44 u32 head_config;
45 u32 head_ctrl;
44}; 46};
45 47
46struct coh901318_base { 48struct coh901318_base {
@@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
661 663
662 coh901318_desc_submit(cohc, cohd); 664 coh901318_desc_submit(cohc, cohd);
663 665
666 /* Program the transaction head */
667 coh901318_set_conf(cohc, cohd->head_config);
668 coh901318_set_ctrl(cohc, cohd->head_ctrl);
664 coh901318_prep_linked_list(cohc, cohd->lli); 669 coh901318_prep_linked_list(cohc, cohd->lli);
665 670
666 /* start dma job on this channel */ 671 /* start dma job on this channel */
@@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1091 } else 1096 } else
1092 goto err_direction; 1097 goto err_direction;
1093 1098
1094 coh901318_set_conf(cohc, config);
1095
1096 /* The dma only supports transmitting packages up to 1099 /* The dma only supports transmitting packages up to
1097 * MAX_DMA_PACKET_SIZE. Calculate to total number of 1100 * MAX_DMA_PACKET_SIZE. Calculate to total number of
1098 * dma elemts required to send the entire sg list 1101 * dma elemts required to send the entire sg list
@@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1129 if (ret) 1132 if (ret)
1130 goto err_lli_fill; 1133 goto err_lli_fill;
1131 1134
1132 /*
1133 * Set the default ctrl for the channel to the one from the lli,
1134 * things may have changed due to odd buffer alignment etc.
1135 */
1136 coh901318_set_ctrl(cohc, lli->control);
1137 1135
1138 COH_DBG(coh901318_list_print(cohc, lli)); 1136 COH_DBG(coh901318_list_print(cohc, lli));
1139 1137
1140 /* Pick a descriptor to handle this transfer */ 1138 /* Pick a descriptor to handle this transfer */
1141 cohd = coh901318_desc_get(cohc); 1139 cohd = coh901318_desc_get(cohc);
1140 cohd->head_config = config;
1141 /*
1142 * Set the default head ctrl for the channel to the one from the
1143 * lli, things may have changed due to odd buffer alignment
1144 * etc.
1145 */
1146 cohd->head_ctrl = lli->control;
1142 cohd->dir = direction; 1147 cohd->dir = direction;
1143 cohd->flags = flags; 1148 cohd->flags = flags;
1144 cohd->desc.tx_submit = coh901318_tx_submit; 1149 cohd->desc.tx_submit = coh901318_tx_submit;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 48694c34d96b..26374b2a55a2 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
510 dma_chan_name(chan)); 510 dma_chan_name(chan));
511 list_del_rcu(&device->global_node); 511 list_del_rcu(&device->global_node);
512 } else if (err) 512 } else if (err)
513 pr_err("dmaengine: failed to get %s: (%d)\n", 513 pr_debug("dmaengine: failed to get %s: (%d)\n",
514 dma_chan_name(chan), err); 514 dma_chan_name(chan), err);
515 else 515 else
516 break; 516 break;
517 if (--device->privatecnt == 0) 517 if (--device->privatecnt == 0)
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 0766c1e53b1d..5d7a49bd7c26 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
902 * 902 *
903 * Returns a valid DMA descriptor or %NULL in case of failure. 903 * Returns a valid DMA descriptor or %NULL in case of failure.
904 */ 904 */
905struct dma_async_tx_descriptor * 905static struct dma_async_tx_descriptor *
906ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, 906ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
907 dma_addr_t src, size_t len, unsigned long flags) 907 dma_addr_t src, size_t len, unsigned long flags)
908{ 908{
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1eb60ded2f0d..7bd7e98548cd 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1305,8 +1305,10 @@ static int __init sdma_probe(struct platform_device *pdev)
1305 goto err_request_irq; 1305 goto err_request_irq;
1306 1306
1307 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1307 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1308 if (!sdma->script_addrs) 1308 if (!sdma->script_addrs) {
1309 ret = -ENOMEM;
1309 goto err_alloc; 1310 goto err_alloc;
1311 }
1310 1312
1311 if (of_id) 1313 if (of_id)
1312 pdev->id_entry = of_id->data; 1314 pdev->id_entry = of_id->data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f653517ef744..8a3fdd87db97 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
1351 return -EAGAIN; 1351 return -EAGAIN;
1352 } 1352 }
1353 device->state = SUSPENDED; 1353 device->state = SUSPENDED;
1354 pci_set_drvdata(pci, device);
1355 pci_save_state(pci); 1354 pci_save_state(pci);
1356 pci_disable_device(pci); 1355 pci_disable_device(pci);
1357 pci_set_power_state(pci, PCI_D3hot); 1356 pci_set_power_state(pci, PCI_D3hot);
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
1380 } 1379 }
1381 device->state = RUNNING; 1380 device->state = RUNNING;
1382 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1381 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1383 pci_set_drvdata(pci, device);
1384 return 0; 1382 return 0;
1385} 1383}
1386 1384
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index fd7d2b308cf2..6815905a772f 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev)
1706 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); 1706 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
1707 1707
1708 /* Remap IPU common registers */ 1708 /* Remap IPU common registers */
1709 ipu_data.reg_ipu = ioremap(mem_ipu->start, 1709 ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
1710 mem_ipu->end - mem_ipu->start + 1);
1711 if (!ipu_data.reg_ipu) { 1710 if (!ipu_data.reg_ipu) {
1712 ret = -ENOMEM; 1711 ret = -ENOMEM;
1713 goto err_ioremap_ipu; 1712 goto err_ioremap_ipu;
1714 } 1713 }
1715 1714
1716 /* Remap Image Converter and Image DMA Controller registers */ 1715 /* Remap Image Converter and Image DMA Controller registers */
1717 ipu_data.reg_ic = ioremap(mem_ic->start, 1716 ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
1718 mem_ic->end - mem_ic->start + 1);
1719 if (!ipu_data.reg_ic) { 1717 if (!ipu_data.reg_ic) {
1720 ret = -ENOMEM; 1718 ret = -ENOMEM;
1721 goto err_ioremap_ic; 1719 goto err_ioremap_ic;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 06f9f27dbe7c..9a353c2216d0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
1304 if (!res) 1304 if (!res)
1305 return -ENODEV; 1305 return -ENODEV;
1306 1306
1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308 resource_size(res));
1308 if (!msp->xor_base) 1309 if (!msp->xor_base)
1309 return -EBUSY; 1310 return -EBUSY;
1310 1311
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 88aad4f54002..be641cbd36fc 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
327 327
328 memset(mxs_chan->ccw, 0, PAGE_SIZE); 328 memset(mxs_chan->ccw, 0, PAGE_SIZE);
329 329
330 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 330 if (mxs_chan->chan_irq != NO_IRQ) {
331 0, "mxs-dma", mxs_dma); 331 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
332 if (ret) 332 0, "mxs-dma", mxs_dma);
333 goto err_irq; 333 if (ret)
334 goto err_irq;
335 }
334 336
335 ret = clk_enable(mxs_dma->clk); 337 ret = clk_enable(mxs_dma->clk);
336 if (ret) 338 if (ret)
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
535 switch (cmd) { 537 switch (cmd) {
536 case DMA_TERMINATE_ALL: 538 case DMA_TERMINATE_ALL:
537 mxs_dma_disable_chan(mxs_chan); 539 mxs_dma_disable_chan(mxs_chan);
540 mxs_dma_reset_chan(mxs_chan);
538 break; 541 break;
539 case DMA_PAUSE: 542 case DMA_PAUSE:
540 mxs_dma_pause_chan(mxs_chan); 543 mxs_dma_pause_chan(mxs_chan);
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
707 }, { 710 }, {
708 .name = "mxs-dma-apbx", 711 .name = "mxs-dma-apbx",
709 .driver_data = MXS_DMA_APBX, 712 .driver_data = MXS_DMA_APBX,
713 }, {
714 /* end of list */
710 } 715 }
711}; 716};
712 717
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ff5b38f9d45b..1ac8d4b580b7 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -45,7 +45,8 @@
45#define DMA_STATUS_MASK_BITS 0x3 45#define DMA_STATUS_MASK_BITS 0x3
46#define DMA_STATUS_SHIFT_BITS 16 46#define DMA_STATUS_SHIFT_BITS 16
47#define DMA_STATUS_IRQ(x) (0x1 << (x)) 47#define DMA_STATUS_IRQ(x) (0x1 << (x))
48#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) 48#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
49#define DMA_STATUS2_ERR(x) (0x1 << (x))
49 50
50#define DMA_DESC_WIDTH_SHIFT_BITS 12 51#define DMA_DESC_WIDTH_SHIFT_BITS 12
51#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) 52#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
@@ -61,6 +62,9 @@
61 62
62#define MAX_CHAN_NR 8 63#define MAX_CHAN_NR 8
63 64
65#define DMA_MASK_CTL0_MODE 0x33333333
66#define DMA_MASK_CTL2_MODE 0x00003333
67
64static unsigned int init_nr_desc_per_channel = 64; 68static unsigned int init_nr_desc_per_channel = 64;
65module_param(init_nr_desc_per_channel, uint, 0644); 69module_param(init_nr_desc_per_channel, uint, 0644);
66MODULE_PARM_DESC(init_nr_desc_per_channel, 70MODULE_PARM_DESC(init_nr_desc_per_channel,
@@ -133,6 +137,7 @@ struct pch_dma {
133#define PCH_DMA_CTL3 0x0C 137#define PCH_DMA_CTL3 0x0C
134#define PCH_DMA_STS0 0x10 138#define PCH_DMA_STS0 0x10
135#define PCH_DMA_STS1 0x14 139#define PCH_DMA_STS1 0x14
140#define PCH_DMA_STS2 0x18
136 141
137#define dma_readl(pd, name) \ 142#define dma_readl(pd, name) \
138 readl((pd)->membase + PCH_DMA_##name) 143 readl((pd)->membase + PCH_DMA_##name)
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
183{ 188{
184 struct pch_dma *pd = to_pd(chan->device); 189 struct pch_dma *pd = to_pd(chan->device);
185 u32 val; 190 u32 val;
191 int pos;
192
193 if (chan->chan_id < 8)
194 pos = chan->chan_id;
195 else
196 pos = chan->chan_id + 8;
186 197
187 val = dma_readl(pd, CTL2); 198 val = dma_readl(pd, CTL2);
188 199
189 if (enable) 200 if (enable)
190 val |= 0x1 << chan->chan_id; 201 val |= 0x1 << pos;
191 else 202 else
192 val &= ~(0x1 << chan->chan_id); 203 val &= ~(0x1 << pos);
193 204
194 dma_writel(pd, CTL2, val); 205 dma_writel(pd, CTL2, val);
195 206
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
202 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 213 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
203 struct pch_dma *pd = to_pd(chan->device); 214 struct pch_dma *pd = to_pd(chan->device);
204 u32 val; 215 u32 val;
216 u32 mask_mode;
217 u32 mask_ctl;
205 218
206 if (chan->chan_id < 8) { 219 if (chan->chan_id < 8) {
207 val = dma_readl(pd, CTL0); 220 val = dma_readl(pd, CTL0);
208 221
222 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
223 (DMA_CTL0_BITS_PER_CH * chan->chan_id);
224 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
225 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
226 val &= mask_mode;
209 if (pd_chan->dir == DMA_TO_DEVICE) 227 if (pd_chan->dir == DMA_TO_DEVICE)
210 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 228 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
211 DMA_CTL0_DIR_SHIFT_BITS); 229 DMA_CTL0_DIR_SHIFT_BITS);
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
213 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 231 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
214 DMA_CTL0_DIR_SHIFT_BITS)); 232 DMA_CTL0_DIR_SHIFT_BITS));
215 233
234 val |= mask_ctl;
216 dma_writel(pd, CTL0, val); 235 dma_writel(pd, CTL0, val);
217 } else { 236 } else {
218 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ 237 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
219 val = dma_readl(pd, CTL3); 238 val = dma_readl(pd, CTL3);
220 239
240 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
241 (DMA_CTL0_BITS_PER_CH * ch);
242 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
243 (DMA_CTL0_BITS_PER_CH * ch));
244 val &= mask_mode;
221 if (pd_chan->dir == DMA_TO_DEVICE) 245 if (pd_chan->dir == DMA_TO_DEVICE)
222 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + 246 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
223 DMA_CTL0_DIR_SHIFT_BITS); 247 DMA_CTL0_DIR_SHIFT_BITS);
224 else 248 else
225 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + 249 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
226 DMA_CTL0_DIR_SHIFT_BITS)); 250 DMA_CTL0_DIR_SHIFT_BITS));
227 251 val |= mask_ctl;
228 dma_writel(pd, CTL3, val); 252 dma_writel(pd, CTL3, val);
229 } 253 }
230 254
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
236{ 260{
237 struct pch_dma *pd = to_pd(chan->device); 261 struct pch_dma *pd = to_pd(chan->device);
238 u32 val; 262 u32 val;
263 u32 mask_ctl;
264 u32 mask_dir;
239 265
240 if (chan->chan_id < 8) { 266 if (chan->chan_id < 8) {
267 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
268 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
269 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
270 DMA_CTL0_DIR_SHIFT_BITS);
241 val = dma_readl(pd, CTL0); 271 val = dma_readl(pd, CTL0);
242 272 val &= mask_dir;
243 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
244 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
245 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); 273 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
246 274 val |= mask_ctl;
247 dma_writel(pd, CTL0, val); 275 dma_writel(pd, CTL0, val);
248 } else { 276 } else {
249 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ 277 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
250 278 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
279 (DMA_CTL0_BITS_PER_CH * ch));
280 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
281 DMA_CTL0_DIR_SHIFT_BITS);
251 val = dma_readl(pd, CTL3); 282 val = dma_readl(pd, CTL3);
252 283 val &= mask_dir;
253 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
254 (DMA_CTL0_BITS_PER_CH * ch));
255 val |= mode << (DMA_CTL0_BITS_PER_CH * ch); 284 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
256 285 val |= mask_ctl;
257 dma_writel(pd, CTL3, val); 286 dma_writel(pd, CTL3, val);
258
259 } 287 }
260 288
261 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", 289 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
262 chan->chan_id, val); 290 chan->chan_id, val);
263} 291}
264 292
265static u32 pdc_get_status(struct pch_dma_chan *pd_chan) 293static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
266{ 294{
267 struct pch_dma *pd = to_pd(pd_chan->chan.device); 295 struct pch_dma *pd = to_pd(pd_chan->chan.device);
268 u32 val; 296 u32 val;
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
272 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); 300 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
273} 301}
274 302
303static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
304{
305 struct pch_dma *pd = to_pd(pd_chan->chan.device);
306 u32 val;
307
308 val = dma_readl(pd, STS2);
309 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
310 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
311}
312
275static bool pdc_is_idle(struct pch_dma_chan *pd_chan) 313static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
276{ 314{
277 if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) 315 u32 sts;
316
317 if (pd_chan->chan.chan_id < 8)
318 sts = pdc_get_status0(pd_chan);
319 else
320 sts = pdc_get_status2(pd_chan);
321
322
323 if (sts == DMA_STATUS_IDLE)
278 return true; 324 return true;
279 else 325 else
280 return false; 326 return false;
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
495 list_add_tail(&desc->desc_node, &tmp_list); 541 list_add_tail(&desc->desc_node, &tmp_list);
496 } 542 }
497 543
498 spin_lock_bh(&pd_chan->lock); 544 spin_lock_irq(&pd_chan->lock);
499 list_splice(&tmp_list, &pd_chan->free_list); 545 list_splice(&tmp_list, &pd_chan->free_list);
500 pd_chan->descs_allocated = i; 546 pd_chan->descs_allocated = i;
501 pd_chan->completed_cookie = chan->cookie = 1; 547 pd_chan->completed_cookie = chan->cookie = 1;
502 spin_unlock_bh(&pd_chan->lock); 548 spin_unlock_irq(&pd_chan->lock);
503 549
504 pdc_enable_irq(chan, 1); 550 pdc_enable_irq(chan, 1);
505 551
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
517 BUG_ON(!list_empty(&pd_chan->active_list)); 563 BUG_ON(!list_empty(&pd_chan->active_list));
518 BUG_ON(!list_empty(&pd_chan->queue)); 564 BUG_ON(!list_empty(&pd_chan->queue));
519 565
520 spin_lock_bh(&pd_chan->lock); 566 spin_lock_irq(&pd_chan->lock);
521 list_splice_init(&pd_chan->free_list, &tmp_list); 567 list_splice_init(&pd_chan->free_list, &tmp_list);
522 pd_chan->descs_allocated = 0; 568 pd_chan->descs_allocated = 0;
523 spin_unlock_bh(&pd_chan->lock); 569 spin_unlock_irq(&pd_chan->lock);
524 570
525 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) 571 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
526 pci_pool_free(pd->pool, desc, desc->txd.phys); 572 pci_pool_free(pd->pool, desc, desc->txd.phys);
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
536 dma_cookie_t last_completed; 582 dma_cookie_t last_completed;
537 int ret; 583 int ret;
538 584
539 spin_lock_bh(&pd_chan->lock); 585 spin_lock_irq(&pd_chan->lock);
540 last_completed = pd_chan->completed_cookie; 586 last_completed = pd_chan->completed_cookie;
541 last_used = chan->cookie; 587 last_used = chan->cookie;
542 spin_unlock_bh(&pd_chan->lock); 588 spin_unlock_irq(&pd_chan->lock);
543 589
544 ret = dma_async_is_complete(cookie, last_completed, last_used); 590 ret = dma_async_is_complete(cookie, last_completed, last_used);
545 591
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
654 if (cmd != DMA_TERMINATE_ALL) 700 if (cmd != DMA_TERMINATE_ALL)
655 return -ENXIO; 701 return -ENXIO;
656 702
657 spin_lock_bh(&pd_chan->lock); 703 spin_lock_irq(&pd_chan->lock);
658 704
659 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); 705 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
660 706
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
664 list_for_each_entry_safe(desc, _d, &list, desc_node) 710 list_for_each_entry_safe(desc, _d, &list, desc_node)
665 pdc_chain_complete(pd_chan, desc); 711 pdc_chain_complete(pd_chan, desc);
666 712
667 spin_unlock_bh(&pd_chan->lock); 713 spin_unlock_irq(&pd_chan->lock);
668 714
669 return 0; 715 return 0;
670} 716}
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
693 struct pch_dma *pd = (struct pch_dma *)devid; 739 struct pch_dma *pd = (struct pch_dma *)devid;
694 struct pch_dma_chan *pd_chan; 740 struct pch_dma_chan *pd_chan;
695 u32 sts0; 741 u32 sts0;
742 u32 sts2;
696 int i; 743 int i;
697 int ret = IRQ_NONE; 744 int ret0 = IRQ_NONE;
745 int ret2 = IRQ_NONE;
698 746
699 sts0 = dma_readl(pd, STS0); 747 sts0 = dma_readl(pd, STS0);
748 sts2 = dma_readl(pd, STS2);
700 749
701 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); 750 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
702 751
703 for (i = 0; i < pd->dma.chancnt; i++) { 752 for (i = 0; i < pd->dma.chancnt; i++) {
704 pd_chan = &pd->channels[i]; 753 pd_chan = &pd->channels[i];
705 754
706 if (sts0 & DMA_STATUS_IRQ(i)) { 755 if (i < 8) {
707 if (sts0 & DMA_STATUS_ERR(i)) 756 if (sts0 & DMA_STATUS_IRQ(i)) {
708 set_bit(0, &pd_chan->err_status); 757 if (sts0 & DMA_STATUS0_ERR(i))
758 set_bit(0, &pd_chan->err_status);
709 759
710 tasklet_schedule(&pd_chan->tasklet); 760 tasklet_schedule(&pd_chan->tasklet);
711 ret = IRQ_HANDLED; 761 ret0 = IRQ_HANDLED;
712 } 762 }
763 } else {
764 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
765 if (sts2 & DMA_STATUS2_ERR(i))
766 set_bit(0, &pd_chan->err_status);
713 767
768 tasklet_schedule(&pd_chan->tasklet);
769 ret2 = IRQ_HANDLED;
770 }
771 }
714 } 772 }
715 773
716 /* clear interrupt bits in status register */ 774 /* clear interrupt bits in status register */
717 dma_writel(pd, STS0, sts0); 775 if (ret0)
776 dma_writel(pd, STS0, sts0);
777 if (ret2)
778 dma_writel(pd, STS2, sts2);
718 779
719 return ret; 780 return ret0 | ret2;
720} 781}
721 782
722#ifdef CONFIG_PM 783#ifdef CONFIG_PM
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6abe1ec1f2ce..00eee59e8b33 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
82 spinlock_t pool_lock; 82 spinlock_t pool_lock;
83 83
84 /* Peripheral channels connected to this DMAC */ 84 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan peripherals[0]; /* keep at end */ 85 struct dma_pl330_chan *peripherals; /* keep at end */
86}; 86};
87 87
88struct dma_pl330_desc { 88struct dma_pl330_desc {
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
451 desc->txd.cookie = 0; 451 desc->txd.cookie = 0;
452 async_tx_ack(&desc->txd); 452 async_tx_ack(&desc->txd);
453 453
454 desc->req.rqtype = peri->rqtype; 454 if (peri) {
455 desc->req.peri = peri->peri_id; 455 desc->req.rqtype = peri->rqtype;
456 desc->req.peri = peri->peri_id;
457 } else {
458 desc->req.rqtype = MEMTOMEM;
459 desc->req.peri = 0;
460 }
456 461
457 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); 462 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
458 463
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
529 struct pl330_info *pi; 534 struct pl330_info *pi;
530 int burst; 535 int burst;
531 536
532 if (unlikely(!pch || !len || !peri)) 537 if (unlikely(!pch || !len))
533 return NULL; 538 return NULL;
534 539
535 if (peri->rqtype != MEMTOMEM) 540 if (peri && peri->rqtype != MEMTOMEM)
536 return NULL; 541 return NULL;
537 542
538 pi = &pch->dmac->pif; 543 pi = &pch->dmac->pif;
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
577 int i, burst_size; 582 int i, burst_size;
578 dma_addr_t addr; 583 dma_addr_t addr;
579 584
580 if (unlikely(!pch || !sgl || !sg_len)) 585 if (unlikely(!pch || !sgl || !sg_len || !peri))
581 return NULL; 586 return NULL;
582 587
583 /* Make sure the direction is consistent */ 588 /* Make sure the direction is consistent */
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
666 struct dma_device *pd; 671 struct dma_device *pd;
667 struct resource *res; 672 struct resource *res;
668 int i, ret, irq; 673 int i, ret, irq;
674 int num_chan;
669 675
670 pdat = adev->dev.platform_data; 676 pdat = adev->dev.platform_data;
671 677
672 if (!pdat || !pdat->nr_valid_peri) {
673 dev_err(&adev->dev, "platform data missing\n");
674 return -ENODEV;
675 }
676
677 /* Allocate a new DMAC and its Channels */ 678 /* Allocate a new DMAC and its Channels */
678 pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) 679 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
679 + sizeof(*pdmac), GFP_KERNEL);
680 if (!pdmac) { 680 if (!pdmac) {
681 dev_err(&adev->dev, "unable to allocate mem\n"); 681 dev_err(&adev->dev, "unable to allocate mem\n");
682 return -ENOMEM; 682 return -ENOMEM;
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
685 pi = &pdmac->pif; 685 pi = &pdmac->pif;
686 pi->dev = &adev->dev; 686 pi->dev = &adev->dev;
687 pi->pl330_data = NULL; 687 pi->pl330_data = NULL;
688 pi->mcbufsz = pdat->mcbuf_sz; 688 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
689 689
690 res = &adev->res; 690 res = &adev->res;
691 request_mem_region(res->start, resource_size(res), "dma-pl330"); 691 request_mem_region(res->start, resource_size(res), "dma-pl330");
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
717 INIT_LIST_HEAD(&pd->channels); 717 INIT_LIST_HEAD(&pd->channels);
718 718
719 /* Initialize channel parameters */ 719 /* Initialize channel parameters */
720 for (i = 0; i < pdat->nr_valid_peri; i++) { 720 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
721 struct dma_pl330_peri *peri = &pdat->peri[i]; 721 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
722 pch = &pdmac->peripherals[i];
723 722
724 switch (peri->rqtype) { 723 for (i = 0; i < num_chan; i++) {
725 case MEMTOMEM: 724 pch = &pdmac->peripherals[i];
725 if (pdat) {
726 struct dma_pl330_peri *peri = &pdat->peri[i];
727
728 switch (peri->rqtype) {
729 case MEMTOMEM:
730 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
731 break;
732 case MEMTODEV:
733 case DEVTOMEM:
734 dma_cap_set(DMA_SLAVE, pd->cap_mask);
735 break;
736 default:
737 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
738 continue;
739 }
740 pch->chan.private = peri;
741 } else {
726 dma_cap_set(DMA_MEMCPY, pd->cap_mask); 742 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
727 break; 743 pch->chan.private = NULL;
728 case MEMTODEV:
729 case DEVTOMEM:
730 dma_cap_set(DMA_SLAVE, pd->cap_mask);
731 break;
732 default:
733 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
734 continue;
735 } 744 }
736 745
737 INIT_LIST_HEAD(&pch->work_list); 746 INIT_LIST_HEAD(&pch->work_list);
738 spin_lock_init(&pch->lock); 747 spin_lock_init(&pch->lock);
739 pch->pl330_chid = NULL; 748 pch->pl330_chid = NULL;
740 pch->chan.private = peri;
741 pch->chan.device = pd; 749 pch->chan.device = pd;
742 pch->chan.chan_id = i; 750 pch->chan.chan_id = i;
743 pch->dmac = pdmac; 751 pch->dmac = pdmac;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 29d1addbe0cf..cd3a7c726bf8 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/amba/bus.h>
17 18
18#include <plat/ste_dma40.h> 19#include <plat/ste_dma40.h>
19 20
@@ -45,9 +46,6 @@
45#define D40_ALLOC_PHY (1 << 30) 46#define D40_ALLOC_PHY (1 << 30)
46#define D40_ALLOC_LOG_FREE 0 47#define D40_ALLOC_LOG_FREE 0
47 48
48/* Hardware designer of the block */
49#define D40_HW_DESIGNER 0x8
50
51/** 49/**
52 * enum 40_command - The different commands and/or statuses. 50 * enum 40_command - The different commands and/or statuses.
53 * 51 *
@@ -186,6 +184,8 @@ struct d40_base;
186 * @log_def: Default logical channel settings. 184 * @log_def: Default logical channel settings.
187 * @lcla: Space for one dst src pair for logical channel transfers. 185 * @lcla: Space for one dst src pair for logical channel transfers.
188 * @lcpa: Pointer to dst and src lcpa settings. 186 * @lcpa: Pointer to dst and src lcpa settings.
187 * @runtime_addr: runtime configured address.
188 * @runtime_direction: runtime configured direction.
189 * 189 *
190 * This struct can either "be" a logical or a physical channel. 190 * This struct can either "be" a logical or a physical channel.
191 */ 191 */
@@ -200,6 +200,7 @@ struct d40_chan {
200 struct dma_chan chan; 200 struct dma_chan chan;
201 struct tasklet_struct tasklet; 201 struct tasklet_struct tasklet;
202 struct list_head client; 202 struct list_head client;
203 struct list_head pending_queue;
203 struct list_head active; 204 struct list_head active;
204 struct list_head queue; 205 struct list_head queue;
205 struct stedma40_chan_cfg dma_cfg; 206 struct stedma40_chan_cfg dma_cfg;
@@ -645,7 +646,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
645 646
646static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 647static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
647{ 648{
648 list_add_tail(&desc->node, &d40c->queue); 649 list_add_tail(&desc->node, &d40c->pending_queue);
650}
651
652static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
653{
654 struct d40_desc *d;
655
656 if (list_empty(&d40c->pending_queue))
657 return NULL;
658
659 d = list_first_entry(&d40c->pending_queue,
660 struct d40_desc,
661 node);
662 return d;
649} 663}
650 664
651static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 665static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
@@ -802,6 +816,11 @@ static void d40_term_all(struct d40_chan *d40c)
802 d40_desc_free(d40c, d40d); 816 d40_desc_free(d40c, d40d);
803 } 817 }
804 818
819 /* Release pending descriptors */
820 while ((d40d = d40_first_pending(d40c))) {
821 d40_desc_remove(d40d);
822 d40_desc_free(d40c, d40d);
823 }
805 824
806 d40c->pending_tx = 0; 825 d40c->pending_tx = 0;
807 d40c->busy = false; 826 d40c->busy = false;
@@ -2092,7 +2111,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2092 struct scatterlist *sg; 2111 struct scatterlist *sg;
2093 int i; 2112 int i;
2094 2113
2095 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); 2114 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2096 for (i = 0; i < periods; i++) { 2115 for (i = 0; i < periods; i++) {
2097 sg_dma_address(&sg[i]) = dma_addr; 2116 sg_dma_address(&sg[i]) = dma_addr;
2098 sg_dma_len(&sg[i]) = period_len; 2117 sg_dma_len(&sg[i]) = period_len;
@@ -2152,24 +2171,87 @@ static void d40_issue_pending(struct dma_chan *chan)
2152 2171
2153 spin_lock_irqsave(&d40c->lock, flags); 2172 spin_lock_irqsave(&d40c->lock, flags);
2154 2173
2155 /* Busy means that pending jobs are already being processed */ 2174 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2175
2176 /* Busy means that queued jobs are already being processed */
2156 if (!d40c->busy) 2177 if (!d40c->busy)
2157 (void) d40_queue_start(d40c); 2178 (void) d40_queue_start(d40c);
2158 2179
2159 spin_unlock_irqrestore(&d40c->lock, flags); 2180 spin_unlock_irqrestore(&d40c->lock, flags);
2160} 2181}
2161 2182
2183static int
2184dma40_config_to_halfchannel(struct d40_chan *d40c,
2185 struct stedma40_half_channel_info *info,
2186 enum dma_slave_buswidth width,
2187 u32 maxburst)
2188{
2189 enum stedma40_periph_data_width addr_width;
2190 int psize;
2191
2192 switch (width) {
2193 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2194 addr_width = STEDMA40_BYTE_WIDTH;
2195 break;
2196 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2197 addr_width = STEDMA40_HALFWORD_WIDTH;
2198 break;
2199 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2200 addr_width = STEDMA40_WORD_WIDTH;
2201 break;
2202 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2203 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2204 break;
2205 default:
2206 dev_err(d40c->base->dev,
2207 "illegal peripheral address width "
2208 "requested (%d)\n",
2209 width);
2210 return -EINVAL;
2211 }
2212
2213 if (chan_is_logical(d40c)) {
2214 if (maxburst >= 16)
2215 psize = STEDMA40_PSIZE_LOG_16;
2216 else if (maxburst >= 8)
2217 psize = STEDMA40_PSIZE_LOG_8;
2218 else if (maxburst >= 4)
2219 psize = STEDMA40_PSIZE_LOG_4;
2220 else
2221 psize = STEDMA40_PSIZE_LOG_1;
2222 } else {
2223 if (maxburst >= 16)
2224 psize = STEDMA40_PSIZE_PHY_16;
2225 else if (maxburst >= 8)
2226 psize = STEDMA40_PSIZE_PHY_8;
2227 else if (maxburst >= 4)
2228 psize = STEDMA40_PSIZE_PHY_4;
2229 else
2230 psize = STEDMA40_PSIZE_PHY_1;
2231 }
2232
2233 info->data_width = addr_width;
2234 info->psize = psize;
2235 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2236
2237 return 0;
2238}
2239
2162/* Runtime reconfiguration extension */ 2240/* Runtime reconfiguration extension */
2163static void d40_set_runtime_config(struct dma_chan *chan, 2241static int d40_set_runtime_config(struct dma_chan *chan,
2164 struct dma_slave_config *config) 2242 struct dma_slave_config *config)
2165{ 2243{
2166 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2244 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2167 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; 2245 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2168 enum dma_slave_buswidth config_addr_width; 2246 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2169 dma_addr_t config_addr; 2247 dma_addr_t config_addr;
2170 u32 config_maxburst; 2248 u32 src_maxburst, dst_maxburst;
2171 enum stedma40_periph_data_width addr_width; 2249 int ret;
2172 int psize; 2250
2251 src_addr_width = config->src_addr_width;
2252 src_maxburst = config->src_maxburst;
2253 dst_addr_width = config->dst_addr_width;
2254 dst_maxburst = config->dst_maxburst;
2173 2255
2174 if (config->direction == DMA_FROM_DEVICE) { 2256 if (config->direction == DMA_FROM_DEVICE) {
2175 dma_addr_t dev_addr_rx = 2257 dma_addr_t dev_addr_rx =
@@ -2188,8 +2270,11 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2188 cfg->dir); 2270 cfg->dir);
2189 cfg->dir = STEDMA40_PERIPH_TO_MEM; 2271 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2190 2272
2191 config_addr_width = config->src_addr_width; 2273 /* Configure the memory side */
2192 config_maxburst = config->src_maxburst; 2274 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2275 dst_addr_width = src_addr_width;
2276 if (dst_maxburst == 0)
2277 dst_maxburst = src_maxburst;
2193 2278
2194 } else if (config->direction == DMA_TO_DEVICE) { 2279 } else if (config->direction == DMA_TO_DEVICE) {
2195 dma_addr_t dev_addr_tx = 2280 dma_addr_t dev_addr_tx =
@@ -2208,68 +2293,39 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2208 cfg->dir); 2293 cfg->dir);
2209 cfg->dir = STEDMA40_MEM_TO_PERIPH; 2294 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2210 2295
2211 config_addr_width = config->dst_addr_width; 2296 /* Configure the memory side */
2212 config_maxburst = config->dst_maxburst; 2297 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2213 2298 src_addr_width = dst_addr_width;
2299 if (src_maxburst == 0)
2300 src_maxburst = dst_maxburst;
2214 } else { 2301 } else {
2215 dev_err(d40c->base->dev, 2302 dev_err(d40c->base->dev,
2216 "unrecognized channel direction %d\n", 2303 "unrecognized channel direction %d\n",
2217 config->direction); 2304 config->direction);
2218 return; 2305 return -EINVAL;
2219 } 2306 }
2220 2307
2221 switch (config_addr_width) { 2308 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2222 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2223 addr_width = STEDMA40_BYTE_WIDTH;
2224 break;
2225 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2226 addr_width = STEDMA40_HALFWORD_WIDTH;
2227 break;
2228 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2229 addr_width = STEDMA40_WORD_WIDTH;
2230 break;
2231 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2232 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2233 break;
2234 default:
2235 dev_err(d40c->base->dev, 2309 dev_err(d40c->base->dev,
2236 "illegal peripheral address width " 2310 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2237 "requested (%d)\n", 2311 src_maxburst,
2238 config->src_addr_width); 2312 src_addr_width,
2239 return; 2313 dst_maxburst,
2314 dst_addr_width);
2315 return -EINVAL;
2240 } 2316 }
2241 2317
2242 if (chan_is_logical(d40c)) { 2318 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2243 if (config_maxburst >= 16) 2319 src_addr_width,
2244 psize = STEDMA40_PSIZE_LOG_16; 2320 src_maxburst);
2245 else if (config_maxburst >= 8) 2321 if (ret)
2246 psize = STEDMA40_PSIZE_LOG_8; 2322 return ret;
2247 else if (config_maxburst >= 4)
2248 psize = STEDMA40_PSIZE_LOG_4;
2249 else
2250 psize = STEDMA40_PSIZE_LOG_1;
2251 } else {
2252 if (config_maxburst >= 16)
2253 psize = STEDMA40_PSIZE_PHY_16;
2254 else if (config_maxburst >= 8)
2255 psize = STEDMA40_PSIZE_PHY_8;
2256 else if (config_maxburst >= 4)
2257 psize = STEDMA40_PSIZE_PHY_4;
2258 else if (config_maxburst >= 2)
2259 psize = STEDMA40_PSIZE_PHY_2;
2260 else
2261 psize = STEDMA40_PSIZE_PHY_1;
2262 }
2263 2323
2264 /* Set up all the endpoint configs */ 2324 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2265 cfg->src_info.data_width = addr_width; 2325 dst_addr_width,
2266 cfg->src_info.psize = psize; 2326 dst_maxburst);
2267 cfg->src_info.big_endian = false; 2327 if (ret)
2268 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2328 return ret;
2269 cfg->dst_info.data_width = addr_width;
2270 cfg->dst_info.psize = psize;
2271 cfg->dst_info.big_endian = false;
2272 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2273 2329
2274 /* Fill in register values */ 2330 /* Fill in register values */
2275 if (chan_is_logical(d40c)) 2331 if (chan_is_logical(d40c))
@@ -2282,12 +2338,14 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2282 d40c->runtime_addr = config_addr; 2338 d40c->runtime_addr = config_addr;
2283 d40c->runtime_direction = config->direction; 2339 d40c->runtime_direction = config->direction;
2284 dev_dbg(d40c->base->dev, 2340 dev_dbg(d40c->base->dev,
2285 "configured channel %s for %s, data width %d, " 2341 "configured channel %s for %s, data width %d/%d, "
2286 "maxburst %d bytes, LE, no flow control\n", 2342 "maxburst %d/%d elements, LE, no flow control\n",
2287 dma_chan_name(chan), 2343 dma_chan_name(chan),
2288 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 2344 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2289 config_addr_width, 2345 src_addr_width, dst_addr_width,
2290 config_maxburst); 2346 src_maxburst, dst_maxburst);
2347
2348 return 0;
2291} 2349}
2292 2350
2293static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2351static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -2308,9 +2366,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2308 case DMA_RESUME: 2366 case DMA_RESUME:
2309 return d40_resume(d40c); 2367 return d40_resume(d40c);
2310 case DMA_SLAVE_CONFIG: 2368 case DMA_SLAVE_CONFIG:
2311 d40_set_runtime_config(chan, 2369 return d40_set_runtime_config(chan,
2312 (struct dma_slave_config *) arg); 2370 (struct dma_slave_config *) arg);
2313 return 0;
2314 default: 2371 default:
2315 break; 2372 break;
2316 } 2373 }
@@ -2341,6 +2398,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2341 2398
2342 INIT_LIST_HEAD(&d40c->active); 2399 INIT_LIST_HEAD(&d40c->active);
2343 INIT_LIST_HEAD(&d40c->queue); 2400 INIT_LIST_HEAD(&d40c->queue);
2401 INIT_LIST_HEAD(&d40c->pending_queue);
2344 INIT_LIST_HEAD(&d40c->client); 2402 INIT_LIST_HEAD(&d40c->client);
2345 2403
2346 tasklet_init(&d40c->tasklet, dma_tasklet, 2404 tasklet_init(&d40c->tasklet, dma_tasklet,
@@ -2502,25 +2560,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
2502 2560
2503static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 2561static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2504{ 2562{
2505 static const struct d40_reg_val dma_id_regs[] = {
2506 /* Peripheral Id */
2507 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2508 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2509 /*
2510 * D40_DREG_PERIPHID2 Depends on HW revision:
2511 * DB8500ed has 0x0008,
2512 * ? has 0x0018,
2513 * DB8500v1 has 0x0028
2514 * DB8500v2 has 0x0038
2515 */
2516 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2517
2518 /* PCell Id */
2519 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2520 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2521 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2522 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2523 };
2524 struct stedma40_platform_data *plat_data; 2563 struct stedma40_platform_data *plat_data;
2525 struct clk *clk = NULL; 2564 struct clk *clk = NULL;
2526 void __iomem *virtbase = NULL; 2565 void __iomem *virtbase = NULL;
@@ -2529,8 +2568,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2529 int num_log_chans = 0; 2568 int num_log_chans = 0;
2530 int num_phy_chans; 2569 int num_phy_chans;
2531 int i; 2570 int i;
2532 u32 val; 2571 u32 pid;
2533 u32 rev; 2572 u32 cid;
2573 u8 rev;
2534 2574
2535 clk = clk_get(&pdev->dev, NULL); 2575 clk = clk_get(&pdev->dev, NULL);
2536 2576
@@ -2554,32 +2594,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2554 if (!virtbase) 2594 if (!virtbase)
2555 goto failure; 2595 goto failure;
2556 2596
2557 /* HW version check */ 2597 /* This is just a regular AMBA PrimeCell ID actually */
2558 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2598 for (pid = 0, i = 0; i < 4; i++)
2559 if (dma_id_regs[i].val != 2599 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2560 readl(virtbase + dma_id_regs[i].reg)) { 2600 & 255) << (i * 8);
2561 d40_err(&pdev->dev, 2601 for (cid = 0, i = 0; i < 4; i++)
2562 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2602 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2563 dma_id_regs[i].val, 2603 & 255) << (i * 8);
2564 dma_id_regs[i].reg,
2565 readl(virtbase + dma_id_regs[i].reg));
2566 goto failure;
2567 }
2568 }
2569 2604
2570 /* Get silicon revision and designer */ 2605 if (cid != AMBA_CID) {
2571 val = readl(virtbase + D40_DREG_PERIPHID2); 2606 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2572 2607 goto failure;
2573 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2608 }
2574 D40_HW_DESIGNER) { 2609 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
2575 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 2610 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2576 val & D40_DREG_PERIPHID2_DESIGNER_MASK, 2611 AMBA_MANF_BITS(pid),
2577 D40_HW_DESIGNER); 2612 AMBA_VENDOR_ST);
2578 goto failure; 2613 goto failure;
2579 } 2614 }
2580 2615 /*
2581 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> 2616 * HW revision:
2582 D40_DREG_PERIPHID2_REV_POS; 2617 * DB8500ed has revision 0
2618 * ? has revision 1
2619 * DB8500v1 has revision 2
2620 * DB8500v2 has revision 3
2621 */
2622 rev = AMBA_REV_BITS(pid);
2583 2623
2584 /* The number of physical channels on this HW */ 2624 /* The number of physical channels on this HW */
2585 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2625 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 195ee65ee7f3..b44c455158de 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -184,9 +184,6 @@
184#define D40_DREG_PERIPHID0 0xFE0 184#define D40_DREG_PERIPHID0 0xFE0
185#define D40_DREG_PERIPHID1 0xFE4 185#define D40_DREG_PERIPHID1 0xFE4
186#define D40_DREG_PERIPHID2 0xFE8 186#define D40_DREG_PERIPHID2 0xFE8
187#define D40_DREG_PERIPHID2_REV_POS 4
188#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
189#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
190#define D40_DREG_PERIPHID3 0xFEC 187#define D40_DREG_PERIPHID3 0xFEC
191#define D40_DREG_CELLID0 0xFF0 188#define D40_DREG_CELLID0 0xFF0
192#define D40_DREG_CELLID1 0xFF4 189#define D40_DREG_CELLID1 0xFF4