aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-10-14 01:00:47 -0400
committerVinod Koul <vinod.koul@intel.com>2016-10-18 10:52:01 -0400
commit9934075471dcc6de9bdce1f3c1e16f1afbd711a8 (patch)
tree84ebad10d13bff3d3ee5fb7a39905ac6e500af2f
parent1001354ca34179f3db924eb66672442a173147dc (diff)
dmaengine: omap-dma: add support for pause of non-cyclic transfers
This DMA driver is used by 8250-omap on DRA7-evm. There is one requirement that is to pause a transfer. This is currently used on the RX side. It is possible that the UART HW aborted the RX (UART's RX-timeout) but the DMA controller starts the transfer shortly after. Before we can manually purge the FIFO we need to pause the transfer, check how many bytes it already received and terminate the transfer without it making any progress. From testing on the TX side it seems that it is possible that we invoke pause once the transfer has completed which is indicated by the missing CCR_ENABLE bit but before the interrupt has been noticed. In that case the interrupt will come even after disabling it. The AM572x manual says that we have to wait for the CCR_RD_ACTIVE & CCR_WR_ACTIVE bits to be gone before programming it again here is the drain loop. Also it looks like without the drain the TX-transfer makes sometimes progress. One note: The pause + resume combo is broken because after resume the the complete transfer will be programmed again. That means the already transferred bytes (until the pause event) will be sent again. This is currently not important for my UART user because it does only pause + terminate. v3…v4: - update subject line. v2…v3: - rephrase the comment based on Russell's information / feedback. v1…v2: - move the drain loop into omap_dma_drain_chan() instead of having it twice. - allow pause only for DMA_DEV_TO_MEM transfers if non-cyclic. Add a comment why DMA_MEM_TO_DEV not allowed. - clear pause on terminate_all. Otherwise pause() + terminate_all() will keep the pause bit set and we can't pause the following transfer. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> [vigneshr@ti.com: drain channel only when buffering is on, rebase to v4.8] Signed-off-by: Vignesh R <vigneshr@ti.com> Acked-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/omap-dma.c124
1 files changed, 89 insertions, 35 deletions
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7ca27d4b1c54..fd6b9e6834ad 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -422,7 +422,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
422 c->running = true; 422 c->running = true;
423} 423}
424 424
425static void omap_dma_stop(struct omap_chan *c) 425static void omap_dma_drain_chan(struct omap_chan *c)
426{
427 int i;
428 u32 val;
429
430 /* Wait for sDMA FIFO to drain */
431 for (i = 0; ; i++) {
432 val = omap_dma_chan_read(c, CCR);
433 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
434 break;
435
436 if (i > 100)
437 break;
438
439 udelay(5);
440 }
441
442 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
443 dev_err(c->vc.chan.device->dev,
444 "DMA drain did not complete on lch %d\n",
445 c->dma_ch);
446}
447
448static int omap_dma_stop(struct omap_chan *c)
426{ 449{
427 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 450 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
428 uint32_t val; 451 uint32_t val;
@@ -435,7 +458,6 @@ static void omap_dma_stop(struct omap_chan *c)
435 val = omap_dma_chan_read(c, CCR); 458 val = omap_dma_chan_read(c, CCR);
436 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { 459 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
437 uint32_t sysconfig; 460 uint32_t sysconfig;
438 unsigned i;
439 461
440 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); 462 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
441 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; 463 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -446,27 +468,19 @@ static void omap_dma_stop(struct omap_chan *c)
446 val &= ~CCR_ENABLE; 468 val &= ~CCR_ENABLE;
447 omap_dma_chan_write(c, CCR, val); 469 omap_dma_chan_write(c, CCR, val);
448 470
449 /* Wait for sDMA FIFO to drain */ 471 if (!(c->ccr & CCR_BUFFERING_DISABLE))
450 for (i = 0; ; i++) { 472 omap_dma_drain_chan(c);
451 val = omap_dma_chan_read(c, CCR);
452 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
453 break;
454
455 if (i > 100)
456 break;
457
458 udelay(5);
459 }
460
461 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
462 dev_err(c->vc.chan.device->dev,
463 "DMA drain did not complete on lch %d\n",
464 c->dma_ch);
465 473
466 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); 474 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
467 } else { 475 } else {
476 if (!(val & CCR_ENABLE))
477 return -EINVAL;
478
468 val &= ~CCR_ENABLE; 479 val &= ~CCR_ENABLE;
469 omap_dma_chan_write(c, CCR, val); 480 omap_dma_chan_write(c, CCR, val);
481
482 if (!(c->ccr & CCR_BUFFERING_DISABLE))
483 omap_dma_drain_chan(c);
470 } 484 }
471 485
472 mb(); 486 mb();
@@ -481,8 +495,8 @@ static void omap_dma_stop(struct omap_chan *c)
481 495
482 omap_dma_chan_write(c, CLNK_CTRL, val); 496 omap_dma_chan_write(c, CLNK_CTRL, val);
483 } 497 }
484
485 c->running = false; 498 c->running = false;
499 return 0;
486} 500}
487 501
488static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) 502static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
@@ -836,6 +850,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
836 } else { 850 } else {
837 txstate->residue = 0; 851 txstate->residue = 0;
838 } 852 }
853 if (ret == DMA_IN_PROGRESS && c->paused)
854 ret = DMA_PAUSED;
839 spin_unlock_irqrestore(&c->vc.lock, flags); 855 spin_unlock_irqrestore(&c->vc.lock, flags);
840 856
841 return ret; 857 return ret;
@@ -1247,10 +1263,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
1247 omap_dma_stop(c); 1263 omap_dma_stop(c);
1248 } 1264 }
1249 1265
1250 if (c->cyclic) { 1266 c->cyclic = false;
1251 c->cyclic = false; 1267 c->paused = false;
1252 c->paused = false;
1253 }
1254 1268
1255 vchan_get_all_descriptors(&c->vc, &head); 1269 vchan_get_all_descriptors(&c->vc, &head);
1256 spin_unlock_irqrestore(&c->vc.lock, flags); 1270 spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1269,28 +1283,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
1269static int omap_dma_pause(struct dma_chan *chan) 1283static int omap_dma_pause(struct dma_chan *chan)
1270{ 1284{
1271 struct omap_chan *c = to_omap_dma_chan(chan); 1285 struct omap_chan *c = to_omap_dma_chan(chan);
1286 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1287 unsigned long flags;
1288 int ret = -EINVAL;
1289 bool can_pause;
1272 1290
1273 /* Pause/Resume only allowed with cyclic mode */ 1291 spin_lock_irqsave(&od->irq_lock, flags);
1274 if (!c->cyclic)
1275 return -EINVAL;
1276 1292
1277 if (!c->paused) { 1293 if (!c->desc)
1278 omap_dma_stop(c); 1294 goto out;
1279 c->paused = true; 1295
1296 if (c->cyclic)
1297 can_pause = true;
1298
1299 /*
1300 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1301 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1302 * "When a channel is disabled during a transfer, the channel undergoes
1303 * an abort, unless it is hardware-source-synchronized …".
1304 * A source-synchronised channel is one where the fetching of data is
1305 * under control of the device. In other words, a device-to-memory
1306 * transfer. So, a destination-synchronised channel (which would be a
1307 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1308 * bit is cleared.
1309 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1310 * aborts immediately after completion of current read/write
1311 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1312 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1313 * are both clear _before_ disabling the channel, otherwise data loss
1314 * will occur.
1315 * The problem is that if the channel is active, then device activity
1316 * can result in DMA activity starting between reading those as both
1317 * clear and the write to DMA_CCR to clear the enable bit hitting the
1318 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1319 * destination, then data loss "might" occur (say if we write to an UART
1320 * and the UART is not accepting any further data).
1321 */
1322 else if (c->desc->dir == DMA_DEV_TO_MEM)
1323 can_pause = true;
1324
1325 if (can_pause && !c->paused) {
1326 ret = omap_dma_stop(c);
1327 if (!ret)
1328 c->paused = true;
1280 } 1329 }
1330out:
1331 spin_unlock_irqrestore(&od->irq_lock, flags);
1281 1332
1282 return 0; 1333 return ret;
1283} 1334}
1284 1335
1285static int omap_dma_resume(struct dma_chan *chan) 1336static int omap_dma_resume(struct dma_chan *chan)
1286{ 1337{
1287 struct omap_chan *c = to_omap_dma_chan(chan); 1338 struct omap_chan *c = to_omap_dma_chan(chan);
1339 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1340 unsigned long flags;
1341 int ret = -EINVAL;
1288 1342
1289 /* Pause/Resume only allowed with cyclic mode */ 1343 spin_lock_irqsave(&od->irq_lock, flags);
1290 if (!c->cyclic)
1291 return -EINVAL;
1292 1344
1293 if (c->paused) { 1345 if (c->paused && c->desc) {
1294 mb(); 1346 mb();
1295 1347
1296 /* Restore channel link register */ 1348 /* Restore channel link register */
@@ -1298,9 +1350,11 @@ static int omap_dma_resume(struct dma_chan *chan)
1298 1350
1299 omap_dma_start(c, c->desc); 1351 omap_dma_start(c, c->desc);
1300 c->paused = false; 1352 c->paused = false;
1353 ret = 0;
1301 } 1354 }
1355 spin_unlock_irqrestore(&od->irq_lock, flags);
1302 1356
1303 return 0; 1357 return ret;
1304} 1358}
1305 1359
1306static int omap_dma_chan_init(struct omap_dmadev *od) 1360static int omap_dma_chan_init(struct omap_dmadev *od)