aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2018-06-19 12:56:59 -0400
committerVinod Koul <vkoul@kernel.org>2018-07-02 08:54:47 -0400
commit76c33d27073e29bd98d1c975265e1cbe0889fc53 (patch)
tree8d486cb3cafb84539afdbc220d6dc932d98d895b
parentce397d215ccd07b8ae3f71db689aedb85d56ab40 (diff)
dmaengine: imx-sdma: factor out a struct sdma_desc from struct sdma_channel
This is a preparation step to make the adding of virt-dma easier. We create a struct sdma_desc, move some fields from struct sdma_channel there and add a pointer from the former to the latter. For now we allocate the data statically in struct sdma_channel, but with virt-dma support it will be dynamically allocated. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de> Signed-off-by: Robin Gong <yibin.gong@nxp.com> Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de> Tested-by: Lucas Stach <l.stach@pengutronix.de> Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--drivers/dma/imx-sdma.c137
1 files changed, 83 insertions, 54 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f077992635c2..19c351f3b4bc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -289,6 +289,30 @@ struct sdma_context_data {
289struct sdma_engine; 289struct sdma_engine;
290 290
291/** 291/**
292 * struct sdma_desc - descriptor structor for one transfer
293 * @vd descriptor for virt dma
294 * @num_bd max NUM_BD. number of descriptors currently handling
295 * @buf_tail ID of the buffer that was processed
296 * @buf_ptail ID of the previous buffer that was processed
297 * @period_len period length, used in cyclic.
298 * @chn_real_count the real count updated from bd->mode.count
299 * @chn_count the transfer count setuped
300 * @sdmac sdma_channel pointer
301 * @bd pointer of alloced bd
302 */
303struct sdma_desc {
304 unsigned int num_bd;
305 dma_addr_t bd_phys;
306 unsigned int buf_tail;
307 unsigned int buf_ptail;
308 unsigned int period_len;
309 unsigned int chn_real_count;
310 unsigned int chn_count;
311 struct sdma_channel *sdmac;
312 struct sdma_buffer_descriptor *bd;
313};
314
315/**
292 * struct sdma_channel - housekeeping for a SDMA channel 316 * struct sdma_channel - housekeeping for a SDMA channel
293 * 317 *
294 * @sdma pointer to the SDMA engine for this channel 318 * @sdma pointer to the SDMA engine for this channel
@@ -298,11 +322,10 @@ struct sdma_engine;
298 * @event_id0 aka dma request line 322 * @event_id0 aka dma request line
299 * @event_id1 for channels that use 2 events 323 * @event_id1 for channels that use 2 events
300 * @word_size peripheral access size 324 * @word_size peripheral access size
301 * @buf_tail ID of the buffer that was processed
302 * @buf_ptail ID of the previous buffer that was processed
303 * @num_bd max NUM_BD. number of descriptors currently handling
304 */ 325 */
305struct sdma_channel { 326struct sdma_channel {
327 struct sdma_desc *desc;
328 struct sdma_desc _desc;
306 struct sdma_engine *sdma; 329 struct sdma_engine *sdma;
307 unsigned int channel; 330 unsigned int channel;
308 enum dma_transfer_direction direction; 331 enum dma_transfer_direction direction;
@@ -310,12 +333,6 @@ struct sdma_channel {
310 unsigned int event_id0; 333 unsigned int event_id0;
311 unsigned int event_id1; 334 unsigned int event_id1;
312 enum dma_slave_buswidth word_size; 335 enum dma_slave_buswidth word_size;
313 unsigned int buf_tail;
314 unsigned int buf_ptail;
315 unsigned int num_bd;
316 unsigned int period_len;
317 struct sdma_buffer_descriptor *bd;
318 dma_addr_t bd_phys;
319 unsigned int pc_from_device, pc_to_device; 336 unsigned int pc_from_device, pc_to_device;
320 unsigned int device_to_device; 337 unsigned int device_to_device;
321 unsigned long flags; 338 unsigned long flags;
@@ -325,10 +342,8 @@ struct sdma_channel {
325 u32 shp_addr, per_addr; 342 u32 shp_addr, per_addr;
326 struct dma_chan chan; 343 struct dma_chan chan;
327 spinlock_t lock; 344 spinlock_t lock;
328 struct dma_async_tx_descriptor desc; 345 struct dma_async_tx_descriptor txdesc;
329 enum dma_status status; 346 enum dma_status status;
330 unsigned int chn_count;
331 unsigned int chn_real_count;
332 struct tasklet_struct tasklet; 347 struct tasklet_struct tasklet;
333 struct imx_dma_data data; 348 struct imx_dma_data data;
334 bool enabled; 349 bool enabled;
@@ -391,6 +406,8 @@ struct sdma_engine {
391 u32 spba_start_addr; 406 u32 spba_start_addr;
392 u32 spba_end_addr; 407 u32 spba_end_addr;
393 unsigned int irq; 408 unsigned int irq;
409 dma_addr_t bd0_phys;
410 struct sdma_buffer_descriptor *bd0;
394}; 411};
395 412
396static struct sdma_driver_data sdma_imx31 = { 413static struct sdma_driver_data sdma_imx31 = {
@@ -625,7 +642,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
625static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 642static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
626 u32 address) 643 u32 address)
627{ 644{
628 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 645 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
629 void *buf_virt; 646 void *buf_virt;
630 dma_addr_t buf_phys; 647 dma_addr_t buf_phys;
631 int ret; 648 int ret;
@@ -700,7 +717,9 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
700 * call callback function. 717 * call callback function.
701 */ 718 */
702 while (1) { 719 while (1) {
703 bd = &sdmac->bd[sdmac->buf_tail]; 720 struct sdma_desc *desc = sdmac->desc;
721
722 bd = &desc->bd[desc->buf_tail];
704 723
705 if (bd->mode.status & BD_DONE) 724 if (bd->mode.status & BD_DONE)
706 break; 725 break;
@@ -716,11 +735,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
716 * the number of bytes present in the current buffer descriptor. 735 * the number of bytes present in the current buffer descriptor.
717 */ 736 */
718 737
719 sdmac->chn_real_count = bd->mode.count; 738 desc->chn_real_count = bd->mode.count;
720 bd->mode.status |= BD_DONE; 739 bd->mode.status |= BD_DONE;
721 bd->mode.count = sdmac->period_len; 740 bd->mode.count = desc->period_len;
722 sdmac->buf_ptail = sdmac->buf_tail; 741 desc->buf_ptail = desc->buf_tail;
723 sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd; 742 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
724 743
725 /* 744 /*
726 * The callback is called from the interrupt context in order 745 * The callback is called from the interrupt context in order
@@ -729,7 +748,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
729 * executed. 748 * executed.
730 */ 749 */
731 750
732 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); 751 dmaengine_desc_get_callback_invoke(&sdmac->txdesc, NULL);
733 752
734 if (error) 753 if (error)
735 sdmac->status = old_status; 754 sdmac->status = old_status;
@@ -742,17 +761,17 @@ static void mxc_sdma_handle_channel_normal(unsigned long data)
742 struct sdma_buffer_descriptor *bd; 761 struct sdma_buffer_descriptor *bd;
743 int i, error = 0; 762 int i, error = 0;
744 763
745 sdmac->chn_real_count = 0; 764 sdmac->desc->chn_real_count = 0;
746 /* 765 /*
747 * non loop mode. Iterate over all descriptors, collect 766 * non loop mode. Iterate over all descriptors, collect
748 * errors and call callback function 767 * errors and call callback function
749 */ 768 */
750 for (i = 0; i < sdmac->num_bd; i++) { 769 for (i = 0; i < sdmac->desc->num_bd; i++) {
751 bd = &sdmac->bd[i]; 770 bd = &sdmac->desc->bd[i];
752 771
753 if (bd->mode.status & (BD_DONE | BD_RROR)) 772 if (bd->mode.status & (BD_DONE | BD_RROR))
754 error = -EIO; 773 error = -EIO;
755 sdmac->chn_real_count += bd->mode.count; 774 sdmac->desc->chn_real_count += bd->mode.count;
756 } 775 }
757 776
758 if (error) 777 if (error)
@@ -760,9 +779,9 @@ static void mxc_sdma_handle_channel_normal(unsigned long data)
760 else 779 else
761 sdmac->status = DMA_COMPLETE; 780 sdmac->status = DMA_COMPLETE;
762 781
763 dma_cookie_complete(&sdmac->desc); 782 dma_cookie_complete(&sdmac->txdesc);
764 783
765 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); 784 dmaengine_desc_get_callback_invoke(&sdmac->txdesc, NULL);
766} 785}
767 786
768static irqreturn_t sdma_int_handler(int irq, void *dev_id) 787static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -890,7 +909,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
890 int channel = sdmac->channel; 909 int channel = sdmac->channel;
891 int load_address; 910 int load_address;
892 struct sdma_context_data *context = sdma->context; 911 struct sdma_context_data *context = sdma->context;
893 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 912 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
894 int ret; 913 int ret;
895 unsigned long flags; 914 unsigned long flags;
896 915
@@ -1093,18 +1112,22 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1093static int sdma_request_channel(struct sdma_channel *sdmac) 1112static int sdma_request_channel(struct sdma_channel *sdmac)
1094{ 1113{
1095 struct sdma_engine *sdma = sdmac->sdma; 1114 struct sdma_engine *sdma = sdmac->sdma;
1115 struct sdma_desc *desc;
1096 int channel = sdmac->channel; 1116 int channel = sdmac->channel;
1097 int ret = -EBUSY; 1117 int ret = -EBUSY;
1098 1118
1099 sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, 1119 sdmac->desc = &sdmac->_desc;
1120 desc = sdmac->desc;
1121
1122 desc->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &desc->bd_phys,
1100 GFP_KERNEL); 1123 GFP_KERNEL);
1101 if (!sdmac->bd) { 1124 if (!desc->bd) {
1102 ret = -ENOMEM; 1125 ret = -ENOMEM;
1103 goto out; 1126 goto out;
1104 } 1127 }
1105 1128
1106 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 1129 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
1107 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1130 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
1108 1131
1109 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 1132 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
1110 return 0; 1133 return 0;
@@ -1169,10 +1192,10 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
1169 if (ret) 1192 if (ret)
1170 goto disable_clk_ahb; 1193 goto disable_clk_ahb;
1171 1194
1172 dma_async_tx_descriptor_init(&sdmac->desc, chan); 1195 dma_async_tx_descriptor_init(&sdmac->txdesc, chan);
1173 sdmac->desc.tx_submit = sdma_tx_submit; 1196 sdmac->txdesc.tx_submit = sdma_tx_submit;
1174 /* txd.flags will be overwritten in prep funcs */ 1197 /* txd.flags will be overwritten in prep funcs */
1175 sdmac->desc.flags = DMA_CTRL_ACK; 1198 sdmac->txdesc.flags = DMA_CTRL_ACK;
1176 1199
1177 return 0; 1200 return 0;
1178 1201
@@ -1187,6 +1210,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1187{ 1210{
1188 struct sdma_channel *sdmac = to_sdma_chan(chan); 1211 struct sdma_channel *sdmac = to_sdma_chan(chan);
1189 struct sdma_engine *sdma = sdmac->sdma; 1212 struct sdma_engine *sdma = sdmac->sdma;
1213 struct sdma_desc *desc = sdmac->desc;
1190 1214
1191 sdma_disable_channel(chan); 1215 sdma_disable_channel(chan);
1192 1216
@@ -1200,7 +1224,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1200 1224
1201 sdma_set_channel_priority(sdmac, 0); 1225 sdma_set_channel_priority(sdmac, 0);
1202 1226
1203 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); 1227 dma_free_coherent(NULL, PAGE_SIZE, desc->bd, desc->bd_phys);
1204 1228
1205 clk_disable(sdma->clk_ipg); 1229 clk_disable(sdma->clk_ipg);
1206 clk_disable(sdma->clk_ahb); 1230 clk_disable(sdma->clk_ahb);
@@ -1216,6 +1240,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1216 int ret, i, count; 1240 int ret, i, count;
1217 int channel = sdmac->channel; 1241 int channel = sdmac->channel;
1218 struct scatterlist *sg; 1242 struct scatterlist *sg;
1243 struct sdma_desc *desc = sdmac->desc;
1219 1244
1220 if (sdmac->status == DMA_IN_PROGRESS) 1245 if (sdmac->status == DMA_IN_PROGRESS)
1221 return NULL; 1246 return NULL;
@@ -1223,9 +1248,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1223 1248
1224 sdmac->flags = 0; 1249 sdmac->flags = 0;
1225 1250
1226 sdmac->buf_tail = 0; 1251 desc->buf_tail = 0;
1227 sdmac->buf_ptail = 0; 1252 desc->buf_ptail = 0;
1228 sdmac->chn_real_count = 0; 1253 desc->chn_real_count = 0;
1229 1254
1230 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 1255 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1231 sg_len, channel); 1256 sg_len, channel);
@@ -1242,9 +1267,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1242 goto err_out; 1267 goto err_out;
1243 } 1268 }
1244 1269
1245 sdmac->chn_count = 0; 1270 desc->chn_count = 0;
1246 for_each_sg(sgl, sg, sg_len, i) { 1271 for_each_sg(sgl, sg, sg_len, i) {
1247 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1272 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1248 int param; 1273 int param;
1249 1274
1250 bd->buffer_addr = sg->dma_address; 1275 bd->buffer_addr = sg->dma_address;
@@ -1259,7 +1284,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1259 } 1284 }
1260 1285
1261 bd->mode.count = count; 1286 bd->mode.count = count;
1262 sdmac->chn_count += count; 1287 desc->chn_count += count;
1263 1288
1264 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 1289 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1265 ret = -EINVAL; 1290 ret = -EINVAL;
@@ -1300,10 +1325,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1300 bd->mode.status = param; 1325 bd->mode.status = param;
1301 } 1326 }
1302 1327
1303 sdmac->num_bd = sg_len; 1328 desc->num_bd = sg_len;
1304 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1329 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
1305 1330
1306 return &sdmac->desc; 1331 return &sdmac->txdesc;
1307err_out: 1332err_out:
1308 sdmac->status = DMA_ERROR; 1333 sdmac->status = DMA_ERROR;
1309 return NULL; 1334 return NULL;
@@ -1319,6 +1344,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1319 int num_periods = buf_len / period_len; 1344 int num_periods = buf_len / period_len;
1320 int channel = sdmac->channel; 1345 int channel = sdmac->channel;
1321 int ret, i = 0, buf = 0; 1346 int ret, i = 0, buf = 0;
1347 struct sdma_desc *desc = sdmac->desc;
1322 1348
1323 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 1349 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1324 1350
@@ -1327,10 +1353,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1327 1353
1328 sdmac->status = DMA_IN_PROGRESS; 1354 sdmac->status = DMA_IN_PROGRESS;
1329 1355
1330 sdmac->buf_tail = 0; 1356 desc->buf_tail = 0;
1331 sdmac->buf_ptail = 0; 1357 desc->buf_ptail = 0;
1332 sdmac->chn_real_count = 0; 1358 desc->chn_real_count = 0;
1333 sdmac->period_len = period_len; 1359 desc->period_len = period_len;
1334 1360
1335 sdmac->flags |= IMX_DMA_SG_LOOP; 1361 sdmac->flags |= IMX_DMA_SG_LOOP;
1336 sdmac->direction = direction; 1362 sdmac->direction = direction;
@@ -1351,7 +1377,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1351 } 1377 }
1352 1378
1353 while (buf < buf_len) { 1379 while (buf < buf_len) {
1354 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1380 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1355 int param; 1381 int param;
1356 1382
1357 bd->buffer_addr = dma_addr; 1383 bd->buffer_addr = dma_addr;
@@ -1382,10 +1408,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1382 i++; 1408 i++;
1383 } 1409 }
1384 1410
1385 sdmac->num_bd = num_periods; 1411 desc->num_bd = num_periods;
1386 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1412 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
1387 1413
1388 return &sdmac->desc; 1414 return &sdmac->txdesc;
1389err_out: 1415err_out:
1390 sdmac->status = DMA_ERROR; 1416 sdmac->status = DMA_ERROR;
1391 return NULL; 1417 return NULL;
@@ -1424,13 +1450,14 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1424 struct dma_tx_state *txstate) 1450 struct dma_tx_state *txstate)
1425{ 1451{
1426 struct sdma_channel *sdmac = to_sdma_chan(chan); 1452 struct sdma_channel *sdmac = to_sdma_chan(chan);
1453 struct sdma_desc *desc = sdmac->desc;
1427 u32 residue; 1454 u32 residue;
1428 1455
1429 if (sdmac->flags & IMX_DMA_SG_LOOP) 1456 if (sdmac->flags & IMX_DMA_SG_LOOP)
1430 residue = (sdmac->num_bd - sdmac->buf_ptail) * 1457 residue = (desc->num_bd - desc->buf_ptail) *
1431 sdmac->period_len - sdmac->chn_real_count; 1458 desc->period_len - desc->chn_real_count;
1432 else 1459 else
1433 residue = sdmac->chn_count - sdmac->chn_real_count; 1460 residue = desc->chn_count - desc->chn_real_count;
1434 1461
1435 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1462 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1436 residue); 1463 residue);
@@ -1654,6 +1681,8 @@ static int sdma_init(struct sdma_engine *sdma)
1654 if (ret) 1681 if (ret)
1655 goto err_dma_alloc; 1682 goto err_dma_alloc;
1656 1683
1684 sdma->bd0 = sdma->channel[0].desc->bd;
1685
1657 sdma_config_ownership(&sdma->channel[0], false, true, false); 1686 sdma_config_ownership(&sdma->channel[0], false, true, false);
1658 1687
1659 /* Set Command Channel (Channel Zero) */ 1688 /* Set Command Channel (Channel Zero) */