aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Ogness <john.ogness@linutronix.de>2016-04-06 06:01:47 -0400
committerVinod Koul <vinod.koul@intel.com>2016-04-06 10:29:49 -0400
commita482f4e0d848d0914ff119ef32fe1d11434d570c (patch)
tree2271c092fe186cf9bcfb4436751c1e27c380a0b4
parent23f49fd2ea9bc8e1c8cff0126cd71b071ea9e91f (diff)
dmaengine: edma: special case slot limit workaround
Currently drivers are limited to 19 slots for cyclic transfers. However, if the DMA burst size is the same as the period size, the period size can be changed to the full buffer size and intermediate interrupts activated. Since intermediate interrupts will trigger for each burst and the burst size is the same as the period size, the driver will get interrupts each period as expected. This has the benefit of allowing the functionality of many more slots, but only uses 2 slots. This workaround is only active if more than 19 slots are needed and the burst size matches the period size. Acked-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: John Ogness <john.ogness@linutronix.de> Signed-off-by: Sekhar Nori <nsekhar@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/edma.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 13b6a23dc06b..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1238 struct edma_desc *edesc; 1238 struct edma_desc *edesc;
1239 dma_addr_t src_addr, dst_addr; 1239 dma_addr_t src_addr, dst_addr;
1240 enum dma_slave_buswidth dev_width; 1240 enum dma_slave_buswidth dev_width;
1241 bool use_intermediate = false;
1241 u32 burst; 1242 u32 burst;
1242 int i, ret, nslots; 1243 int i, ret, nslots;
1243 1244
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1279 * but the synchronization is difficult to achieve with Cyclic and 1280 * but the synchronization is difficult to achieve with Cyclic and
1280 * cannot be guaranteed, so we error out early. 1281 * cannot be guaranteed, so we error out early.
1281 */ 1282 */
1282 if (nslots > MAX_NR_SG) 1283 if (nslots > MAX_NR_SG) {
1283 return NULL; 1284 /*
1285 * If the burst and period sizes are the same, we can put
1286 * the full buffer into a single period and activate
1287 * intermediate interrupts. This will produce interrupts
1288 * after each burst, which is also after each desired period.
1289 */
1290 if (burst == period_len) {
1291 period_len = buf_len;
1292 nslots = 2;
1293 use_intermediate = true;
1294 } else {
1295 return NULL;
1296 }
1297 }
1284 1298
1285 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1299 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1286 GFP_ATOMIC); 1300 GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1358 /* 1372 /*
1359 * Enable period interrupt only if it is requested 1373 * Enable period interrupt only if it is requested
1360 */ 1374 */
1361 if (tx_flags & DMA_PREP_INTERRUPT) 1375 if (tx_flags & DMA_PREP_INTERRUPT) {
1362 edesc->pset[i].param.opt |= TCINTEN; 1376 edesc->pset[i].param.opt |= TCINTEN;
1377
1378 /* Also enable intermediate interrupts if necessary */
1379 if (use_intermediate)
1380 edesc->pset[i].param.opt |= ITCINTEN;
1381 }
1363 } 1382 }
1364 1383
1365 /* Place the cyclic channel to highest priority queue */ 1384 /* Place the cyclic channel to highest priority queue */