aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw/core.c
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2014-09-23 10:18:14 -0400
committerVinod Koul <vinod.koul@intel.com>2014-10-15 11:01:05 -0400
commit99d9bf4ed27c63d5559e31d112f71af655c7182b (patch)
treef0fc2e9d1053d98b265f38453e01cba00d1b7bd2 /drivers/dma/dw/core.c
parent2540f74b187e3ec0fe106b7427c4a84c955dc140 (diff)
dmaengine: dw: enable and disable controller when needed
Enable controller automatically whenever first user requires for a channel and disable it when the last user gone. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/dw/core.c')
-rw-r--r--drivers/dma/dw/core.c60
1 files changed, 35 insertions, 25 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 48126380e036..244722170410 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1094,6 +1094,31 @@ static void dwc_issue_pending(struct dma_chan *chan)
1094 spin_unlock_irqrestore(&dwc->lock, flags); 1094 spin_unlock_irqrestore(&dwc->lock, flags);
1095} 1095}
1096 1096
1097/*----------------------------------------------------------------------*/
1098
1099static void dw_dma_off(struct dw_dma *dw)
1100{
1101 int i;
1102
1103 dma_writel(dw, CFG, 0);
1104
1105 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1106 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1107 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1108 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1109
1110 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1111 cpu_relax();
1112
1113 for (i = 0; i < dw->dma.chancnt; i++)
1114 dw->chan[i].initialized = false;
1115}
1116
1117static void dw_dma_on(struct dw_dma *dw)
1118{
1119 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1120}
1121
1097static int dwc_alloc_chan_resources(struct dma_chan *chan) 1122static int dwc_alloc_chan_resources(struct dma_chan *chan)
1098{ 1123{
1099 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1124 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -1118,6 +1143,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1118 * doesn't mean what you think it means), and status writeback. 1143 * doesn't mean what you think it means), and status writeback.
1119 */ 1144 */
1120 1145
1146 /* Enable controller here if needed */
1147 if (!dw->in_use)
1148 dw_dma_on(dw);
1149 dw->in_use |= dwc->mask;
1150
1121 spin_lock_irqsave(&dwc->lock, flags); 1151 spin_lock_irqsave(&dwc->lock, flags);
1122 i = dwc->descs_allocated; 1152 i = dwc->descs_allocated;
1123 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 1153 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -1182,6 +1212,11 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1182 1212
1183 spin_unlock_irqrestore(&dwc->lock, flags); 1213 spin_unlock_irqrestore(&dwc->lock, flags);
1184 1214
1215 /* Disable controller in case it was a last user */
1216 dw->in_use &= ~dwc->mask;
1217 if (!dw->in_use)
1218 dw_dma_off(dw);
1219
1185 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1220 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1186 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1221 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1187 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); 1222 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
@@ -1452,29 +1487,6 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
1452 1487
1453/*----------------------------------------------------------------------*/ 1488/*----------------------------------------------------------------------*/
1454 1489
1455static void dw_dma_off(struct dw_dma *dw)
1456{
1457 int i;
1458
1459 dma_writel(dw, CFG, 0);
1460
1461 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1462 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1463 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1464 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1465
1466 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1467 cpu_relax();
1468
1469 for (i = 0; i < dw->dma.chancnt; i++)
1470 dw->chan[i].initialized = false;
1471}
1472
1473static void dw_dma_on(struct dw_dma *dw)
1474{
1475 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1476}
1477
1478int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) 1490int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1479{ 1491{
1480 struct dw_dma *dw; 1492 struct dw_dma *dw;
@@ -1648,8 +1660,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1648 dw->dma.device_tx_status = dwc_tx_status; 1660 dw->dma.device_tx_status = dwc_tx_status;
1649 dw->dma.device_issue_pending = dwc_issue_pending; 1661 dw->dma.device_issue_pending = dwc_issue_pending;
1650 1662
1651 dw_dma_on(dw);
1652
1653 err = dma_async_device_register(&dw->dma); 1663 err = dma_async_device_register(&dw->dma);
1654 if (err) 1664 if (err)
1655 goto err_dma_register; 1665 goto err_dma_register;