aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Gong <b38343@freescale.com>2012-11-23 01:36:04 -0500
committerNitin Garg <nitin.garg@freescale.com>2014-04-16 09:58:20 -0400
commit9ad243853cfe238558fae6614e2a84d5b861a301 (patch)
tree4ed63cf04d98e480662a399a6609ca3a79c971d6
parent8d2a9dfb83a5bf5bca083118c1ca59dda78bd11f (diff)
ENGR00308001 dma: imx-sdma: add support for sdma memory copy
This patch use more common dma interface , including "device_prep_dma_memcpy" and "device_prep_dma_sg". The way in v3.0.35 reuse "device_prep_slave_sg" and need call twice to tell dest dma address in one memory copy(ENGR00233569). It looks tricky something, so give up the original patch. In this patch,"device_prep_dma_memcpy" support memory copy by buffer and "device_prep_dma_sg" support memory copy by scatter-list. You can get the example code from 'linux-test/module_test/mxc_sdma_memcopy_test.' Signed-off-by: Robin Gong <b38343@freescale.com>
-rw-r--r--drivers/dma/imx-sdma.c182
1 files changed, 162 insertions, 20 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 973d3048c4cf..8c026cbd57e0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -233,6 +233,7 @@ struct sdma_context_data {
233} __attribute__ ((packed)); 233} __attribute__ ((packed));
234 234
235#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 235#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
236#define SDMA_BD_MAX_CNT (0xfffc) /* align with 4 bytes */
236 237
237struct sdma_engine; 238struct sdma_engine;
238 239
@@ -276,6 +277,7 @@ struct sdma_channel {
276 unsigned int pc_from_device, pc_to_device; 277 unsigned int pc_from_device, pc_to_device;
277 unsigned int device_to_device; 278 unsigned int device_to_device;
278 unsigned int other_script; 279 unsigned int other_script;
280 unsigned int pc_to_pc;
279 enum sdma_mode mode; 281 enum sdma_mode mode;
280 dma_addr_t per_address, per_address2; 282 dma_addr_t per_address, per_address2;
281 unsigned long event_mask[2]; 283 unsigned long event_mask[2];
@@ -639,6 +641,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
639 sdmac->pc_to_device = 0; 641 sdmac->pc_to_device = 0;
640 sdmac->device_to_device = 0; 642 sdmac->device_to_device = 0;
641 sdmac->other_script = 0; 643 sdmac->other_script = 0;
644 sdmac->pc_to_pc = 0;
642 645
643 switch (peripheral_type) { 646 switch (peripheral_type) {
644 case IMX_DMATYPE_MEMORY: 647 case IMX_DMATYPE_MEMORY:
@@ -713,6 +716,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
713 sdmac->pc_to_device = emi_2_per; 716 sdmac->pc_to_device = emi_2_per;
714 sdmac->device_to_device = per_2_per; 717 sdmac->device_to_device = per_2_per;
715 sdmac->other_script = other; 718 sdmac->other_script = other;
719 sdmac->pc_to_pc = emi_2_emi;
716} 720}
717 721
718static int sdma_load_context(struct sdma_channel *sdmac) 722static int sdma_load_context(struct sdma_channel *sdmac)
@@ -731,6 +735,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
731 load_address = sdmac->device_to_device; 735 load_address = sdmac->device_to_device;
732 else if (sdmac->direction == DMA_MEM_TO_DEV) 736 else if (sdmac->direction == DMA_MEM_TO_DEV)
733 load_address = sdmac->pc_to_device; 737 load_address = sdmac->pc_to_device;
738 else if (sdmac->direction == DMA_MEM_TO_MEM)
739 load_address = sdmac->pc_to_pc;
734 else 740 else
735 load_address = sdmac->other_script; 741 load_address = sdmac->other_script;
736 742
@@ -1047,16 +1053,120 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1047 clk_disable(sdma->clk_ahb); 1053 clk_disable(sdma->clk_ahb);
1048} 1054}
1049 1055
1050static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 1056static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1051 struct dma_chan *chan, struct scatterlist *sgl, 1057 struct dma_chan *chan, dma_addr_t dma_dst,
1052 unsigned int sg_len, enum dma_transfer_direction direction, 1058 dma_addr_t dma_src, size_t len, unsigned long flags)
1053 unsigned long flags, void *context) 1059{
1060 struct sdma_channel *sdmac = to_sdma_chan(chan);
1061 struct sdma_engine *sdma = sdmac->sdma;
1062 int channel = sdmac->channel;
1063 size_t count;
1064 int i = 0, param, ret;
1065 struct sdma_buffer_descriptor *bd;
1066
1067 if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
1068 return NULL;
1069
1070 if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
1071 dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n"
1072 , channel, len, NUM_BD * SDMA_BD_MAX_CNT);
1073 goto err_out;
1074 }
1075
1076 sdmac->status = DMA_IN_PROGRESS;
1077
1078 sdmac->mode = SDMA_MODE_NORMAL;
1079
1080 sdmac->buf_tail = 0;
1081
1082 dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
1083 dma_src, dma_dst, len, channel);
1084
1085 sdmac->direction = DMA_MEM_TO_MEM;
1086
1087 ret = sdma_load_context(sdmac);
1088 if (ret)
1089 goto err_out;
1090
1091 sdmac->chn_count = 0;
1092
1093 do {
1094 count = min(len, (size_t)SDMA_BD_MAX_CNT);
1095 bd = &sdmac->bd[i];
1096 bd->buffer_addr = dma_src;
1097 bd->ext_buffer_addr = dma_dst;
1098 bd->mode.count = count;
1099
1100 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1101 ret = -EINVAL;
1102 goto err_out;
1103 }
1104
1105 switch (sdmac->word_size) {
1106 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1107 bd->mode.command = 0;
1108 if (count & 3 || dma_dst & 3 || dma_src & 3)
1109 return NULL;
1110 break;
1111 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1112 bd->mode.command = 2;
1113 if (count & 1 || dma_dst & 1 || dma_src & 1)
1114 return NULL;
1115 break;
1116 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1117 bd->mode.command = 1;
1118 break;
1119 default:
1120 return NULL;
1121 }
1122
1123 dma_src += count;
1124 dma_dst += count;
1125 len -= count;
1126 i++;
1127
1128 param = BD_DONE | BD_EXTD | BD_CONT;
1129 /* last bd */
1130 if (!len) {
1131 param |= BD_INTR;
1132 param |= BD_LAST;
1133 param &= ~BD_CONT;
1134 }
1135
1136 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1137 i, count, bd->buffer_addr,
1138 param & BD_WRAP ? "wrap" : "",
1139 param & BD_INTR ? " intr" : "");
1140
1141 bd->mode.status = param;
1142 sdmac->chn_count += count;
1143 } while (len);
1144
1145 sdmac->num_bd = i;
1146 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1147
1148 return &sdmac->desc;
1149err_out:
1150 sdmac->status = DMA_ERROR;
1151 return NULL;
1152}
1153
1154/*
1155 * Please ensure dst_nents no smaller than src_nents , also every sg_len of
1156 * dst_sg node no smaller than src_sg. To simply things, please use the same
1157 * size of dst_sg as src_sg.
1158 */
1159static struct dma_async_tx_descriptor *sdma_prep_sg(
1160 struct dma_chan *chan,
1161 struct scatterlist *dst_sg, unsigned int dst_nents,
1162 struct scatterlist *src_sg, unsigned int src_nents,
1163 enum dma_transfer_direction direction)
1054{ 1164{
1055 struct sdma_channel *sdmac = to_sdma_chan(chan); 1165 struct sdma_channel *sdmac = to_sdma_chan(chan);
1056 struct sdma_engine *sdma = sdmac->sdma; 1166 struct sdma_engine *sdma = sdmac->sdma;
1057 int ret, i, count; 1167 int ret, i, count;
1058 int channel = sdmac->channel; 1168 int channel = sdmac->channel;
1059 struct scatterlist *sg; 1169 struct scatterlist *sg_src = src_sg, *sg_dst = dst_sg;
1060 1170
1061 if (sdmac->status == DMA_IN_PROGRESS) 1171 if (sdmac->status == DMA_IN_PROGRESS)
1062 return NULL; 1172 return NULL;
@@ -1067,32 +1177,38 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1067 sdmac->buf_tail = 0; 1177 sdmac->buf_tail = 0;
1068 1178
1069 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 1179 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1070 sg_len, channel); 1180 src_nents, channel);
1071 1181
1072 sdmac->direction = direction; 1182 sdmac->direction = direction;
1183
1073 ret = sdma_load_context(sdmac); 1184 ret = sdma_load_context(sdmac);
1074 if (ret) 1185 if (ret)
1075 goto err_out; 1186 goto err_out;
1076 1187
1077 if (sg_len > NUM_BD) { 1188 if (src_nents > NUM_BD) {
1078 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1189 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1079 channel, sg_len, NUM_BD); 1190 channel, src_nents, NUM_BD);
1080 ret = -EINVAL; 1191 ret = -EINVAL;
1081 goto err_out; 1192 goto err_out;
1082 } 1193 }
1083 1194
1084 sdmac->chn_count = 0; 1195 sdmac->chn_count = 0;
1085 for_each_sg(sgl, sg, sg_len, i) { 1196 for_each_sg(src_sg, sg_src, src_nents, i) {
1086 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1197 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1087 int param; 1198 int param;
1088 1199
1089 bd->buffer_addr = sg->dma_address; 1200 bd->buffer_addr = sg_src->dma_address;
1201
1202 if (direction == DMA_MEM_TO_MEM) {
1203 BUG_ON(!sg_dst);
1204 bd->ext_buffer_addr = sg_dst->dma_address;
1205 }
1090 1206
1091 count = sg_dma_len(sg); 1207 count = sg_dma_len(sg_src);
1092 1208
1093 if (count > 0xffff) { 1209 if (count > SDMA_BD_MAX_CNT) {
1094 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 1210 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1095 channel, count, 0xffff); 1211 channel, count, SDMA_BD_MAX_CNT);
1096 ret = -EINVAL; 1212 ret = -EINVAL;
1097 goto err_out; 1213 goto err_out;
1098 } 1214 }
@@ -1108,12 +1224,14 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1108 switch (sdmac->word_size) { 1224 switch (sdmac->word_size) {
1109 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1225 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1110 bd->mode.command = 0; 1226 bd->mode.command = 0;
1111 if (count & 3 || sg->dma_address & 3) 1227 if (count & 3 || sg_src->dma_address & 3 ||
1228 (sg_dst && (sg_dst->dma_address & 3)))
1112 return NULL; 1229 return NULL;
1113 break; 1230 break;
1114 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1231 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1115 bd->mode.command = 2; 1232 bd->mode.command = 2;
1116 if (count & 1 || sg->dma_address & 1) 1233 if (count & 1 || sg_src->dma_address & 1 ||
1234 (sg_dst && (sg_dst->dma_address & 1)))
1117 return NULL; 1235 return NULL;
1118 break; 1236 break;
1119 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1237 case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -1125,21 +1243,23 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1125 1243
1126 param = BD_DONE | BD_EXTD | BD_CONT; 1244 param = BD_DONE | BD_EXTD | BD_CONT;
1127 1245
1128 if (i + 1 == sg_len) { 1246 if (i + 1 == src_nents) {
1129 param |= BD_INTR; 1247 param |= BD_INTR;
1130 param |= BD_LAST; 1248 param |= BD_LAST;
1131 param &= ~BD_CONT; 1249 param &= ~BD_CONT;
1132 } 1250 }
1133 1251
1134 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1252 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1135 i, count, sg->dma_address, 1253 i, count, sg_src->dma_address,
1136 param & BD_WRAP ? "wrap" : "", 1254 param & BD_WRAP ? "wrap" : "",
1137 param & BD_INTR ? " intr" : ""); 1255 param & BD_INTR ? " intr" : "");
1138 1256
1139 bd->mode.status = param; 1257 bd->mode.status = param;
1258 if (direction == DMA_MEM_TO_MEM)
1259 sg_dst = sg_next(sg_dst);
1140 } 1260 }
1141 1261
1142 sdmac->num_bd = sg_len; 1262 sdmac->num_bd = src_nents;
1143 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1263 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1144 1264
1145 return &sdmac->desc; 1265 return &sdmac->desc;
@@ -1148,6 +1268,23 @@ err_out:
1148 return NULL; 1268 return NULL;
1149} 1269}
1150 1270
1271static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
1272 struct dma_chan *chan,
1273 struct scatterlist *dst_sg, unsigned int dst_nents,
1274 struct scatterlist *src_sg, unsigned int src_nents,
1275 unsigned long flags)
1276{
1277 return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents, DMA_MEM_TO_MEM);
1278}
1279
1280static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1281 struct dma_chan *chan, struct scatterlist *sgl,
1282 unsigned int sg_len, enum dma_transfer_direction direction,
1283 unsigned long flags, void *context)
1284{
1285 return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
1286}
1287
1151static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1288static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1152 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1289 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1153 size_t period_len, enum dma_transfer_direction direction, 1290 size_t period_len, enum dma_transfer_direction direction,
@@ -1201,9 +1338,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1201 goto err_out; 1338 goto err_out;
1202 } 1339 }
1203 1340
1204 if (period_len > 0xffff) { 1341 if (period_len > SDMA_BD_MAX_CNT) {
1205 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", 1342 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1206 channel, period_len, 0xffff); 1343 channel, period_len, SDMA_BD_MAX_CNT);
1207 goto err_out; 1344 goto err_out;
1208 } 1345 }
1209 1346
@@ -1278,6 +1415,8 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1278 sdmac->watermark_level = dmaengine_cfg->dst_maxburst * 1415 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1279 dmaengine_cfg->dst_addr_width; 1416 dmaengine_cfg->dst_addr_width;
1280 sdmac->word_size = dmaengine_cfg->dst_addr_width; 1417 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1418 } else if (dmaengine_cfg->direction == DMA_MEM_TO_MEM) {
1419 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1281 } 1420 }
1282 sdmac->direction = dmaengine_cfg->direction; 1421 sdmac->direction = dmaengine_cfg->direction;
1283 if (dmaengine_cfg->dma_request0) 1422 if (dmaengine_cfg->dma_request0)
@@ -1653,6 +1792,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1653 1792
1654 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1793 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1655 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1794 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1795 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
1656 1796
1657 INIT_LIST_HEAD(&sdma->dma_device.channels); 1797 INIT_LIST_HEAD(&sdma->dma_device.channels);
1658 /* Initialize channel parameters */ 1798 /* Initialize channel parameters */
@@ -1722,6 +1862,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1722 sdma->dma_device.device_tx_status = sdma_tx_status; 1862 sdma->dma_device.device_tx_status = sdma_tx_status;
1723 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1863 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1724 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1864 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1865 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
1866 sdma->dma_device.device_prep_dma_sg = sdma_prep_memcpy_sg;
1725 sdma->dma_device.device_control = sdma_control; 1867 sdma->dma_device.device_control = sdma_control;
1726 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1868 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1727 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1869 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;