aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-04-16 13:22:03 -0400
committerVinod Koul <vinod.koul@intel.com>2016-04-16 13:22:03 -0400
commit956e6c8e18fa666ccc118c85fb32f92ebde3baf1 (patch)
treeb17916aa336dfad422b46a9f1e58d097e8f074ca
parent1cc3334e2e0263b02163a9edd43d0448ac00770c (diff)
parenta482f4e0d848d0914ff119ef32fe1d11434d570c (diff)
Merge branch 'fix/edma' into fixes
-rw-r--r--drivers/dma/edma.c63
1 files changed, 25 insertions, 38 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e774f8..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1238 struct edma_desc *edesc; 1238 struct edma_desc *edesc;
1239 dma_addr_t src_addr, dst_addr; 1239 dma_addr_t src_addr, dst_addr;
1240 enum dma_slave_buswidth dev_width; 1240 enum dma_slave_buswidth dev_width;
1241 bool use_intermediate = false;
1241 u32 burst; 1242 u32 burst;
1242 int i, ret, nslots; 1243 int i, ret, nslots;
1243 1244
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1279 * but the synchronization is difficult to achieve with Cyclic and 1280 * but the synchronization is difficult to achieve with Cyclic and
1280 * cannot be guaranteed, so we error out early. 1281 * cannot be guaranteed, so we error out early.
1281 */ 1282 */
1282 if (nslots > MAX_NR_SG) 1283 if (nslots > MAX_NR_SG) {
1283 return NULL; 1284 /*
1285 * If the burst and period sizes are the same, we can put
1286 * the full buffer into a single period and activate
1287 * intermediate interrupts. This will produce interrupts
1288 * after each burst, which is also after each desired period.
1289 */
1290 if (burst == period_len) {
1291 period_len = buf_len;
1292 nslots = 2;
1293 use_intermediate = true;
1294 } else {
1295 return NULL;
1296 }
1297 }
1284 1298
1285 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1299 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1286 GFP_ATOMIC); 1300 GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1358 /* 1372 /*
1359 * Enable period interrupt only if it is requested 1373 * Enable period interrupt only if it is requested
1360 */ 1374 */
1361 if (tx_flags & DMA_PREP_INTERRUPT) 1375 if (tx_flags & DMA_PREP_INTERRUPT) {
1362 edesc->pset[i].param.opt |= TCINTEN; 1376 edesc->pset[i].param.opt |= TCINTEN;
1377
1378 /* Also enable intermediate interrupts if necessary */
1379 if (use_intermediate)
1380 edesc->pset[i].param.opt |= ITCINTEN;
1381 }
1363 } 1382 }
1364 1383
1365 /* Place the cyclic channel to highest priority queue */ 1384 /* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
1570 return IRQ_HANDLED; 1589 return IRQ_HANDLED;
1571} 1590}
1572 1591
1573static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1574{
1575 struct platform_device *tc_pdev;
1576 int ret;
1577
1578 if (!IS_ENABLED(CONFIG_OF) || !tc)
1579 return;
1580
1581 tc_pdev = of_find_device_by_node(tc->node);
1582 if (!tc_pdev) {
1583 pr_err("%s: TPTC device is not found\n", __func__);
1584 return;
1585 }
1586 if (!pm_runtime_enabled(&tc_pdev->dev))
1587 pm_runtime_enable(&tc_pdev->dev);
1588
1589 if (enable)
1590 ret = pm_runtime_get_sync(&tc_pdev->dev);
1591 else
1592 ret = pm_runtime_put_sync(&tc_pdev->dev);
1593
1594 if (ret < 0)
1595 pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1596 enable ? "get" : "put", dev_name(&tc_pdev->dev));
1597}
1598
1599/* Alloc channel resources */ 1592/* Alloc channel resources */
1600static int edma_alloc_chan_resources(struct dma_chan *chan) 1593static int edma_alloc_chan_resources(struct dma_chan *chan)
1601{ 1594{
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1632 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, 1625 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1633 echan->hw_triggered ? "HW" : "SW"); 1626 echan->hw_triggered ? "HW" : "SW");
1634 1627
1635 edma_tc_set_pm_state(echan->tc, true);
1636
1637 return 0; 1628 return 0;
1638 1629
1639err_slot: 1630err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1670 echan->alloced = false; 1661 echan->alloced = false;
1671 } 1662 }
1672 1663
1673 edma_tc_set_pm_state(echan->tc, false);
1674 echan->tc = NULL; 1664 echan->tc = NULL;
1675 echan->hw_triggered = false; 1665 echan->hw_triggered = false;
1676 1666
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
2417 int i; 2407 int i;
2418 2408
2419 for (i = 0; i < ecc->num_channels; i++) { 2409 for (i = 0; i < ecc->num_channels; i++) {
2420 if (echan[i].alloced) { 2410 if (echan[i].alloced)
2421 edma_setup_interrupt(&echan[i], false); 2411 edma_setup_interrupt(&echan[i], false);
2422 edma_tc_set_pm_state(echan[i].tc, false);
2423 }
2424 } 2412 }
2425 2413
2426 return 0; 2414 return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
2450 2438
2451 /* Set up channel -> slot mapping for the entry slot */ 2439 /* Set up channel -> slot mapping for the entry slot */
2452 edma_set_chmap(&echan[i], echan[i].slot[0]); 2440 edma_set_chmap(&echan[i], echan[i].slot[0]);
2453
2454 edma_tc_set_pm_state(echan[i].tc, true);
2455 } 2441 }
2456 } 2442 }
2457 2443
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
2475 2461
2476static int edma_tptc_probe(struct platform_device *pdev) 2462static int edma_tptc_probe(struct platform_device *pdev)
2477{ 2463{
2478 return 0; 2464 pm_runtime_enable(&pdev->dev);
2465 return pm_runtime_get_sync(&pdev->dev);
2479} 2466}
2480 2467
2481static struct platform_driver edma_tptc_driver = { 2468static struct platform_driver edma_tptc_driver = {