aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2012-01-04 09:34:17 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2012-01-06 01:10:11 -0500
commitc11b46c32c8a9bf05fdb76d70d8dc74fcbfd02d1 (patch)
tree970a85e6a577d878c5ee57f31a1ee3df0fb8ad00 /drivers/dma
parentf69f2e264f6388df6d3cae45dd67ddfd52aaa14b (diff)
dma: shdma: fix runtime PM: clear channel buffers on reset
On platforms, supporting power domains, if the domain, containing a DMAC instance is powered down, the driver fails to resume correctly. On those platforms DMAC channels have an additional CHCLR register for clearing channel buffers. Using this register during runtime resume fixes the problem. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/shdma.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 592304fb41a6..54043cd831c8 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -56,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
56static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 56static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
57 57
58static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 58static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
59static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
60
61static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
62{
63 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
64
65 __raw_writel(data, shdev->chan_reg +
66 shdev->pdata->channel[sh_dc->id].chclr_offset);
67}
59 68
60static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 69static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
61{ 70{
@@ -128,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
128 137
129 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 138 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
130 139
140 if (shdev->pdata->chclr_present) {
141 int i;
142 for (i = 0; i < shdev->pdata->channel_num; i++) {
143 struct sh_dmae_chan *sh_chan = shdev->chan[i];
144 if (sh_chan)
145 chclr_write(sh_chan, 0);
146 }
147 }
148
131 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 149 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
132 150
133 dmaor = dmaor_read(shdev); 151 dmaor = dmaor_read(shdev);
@@ -138,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
138 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 156 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
139 return -EIO; 157 return -EIO;
140 } 158 }
159 if (shdev->pdata->dmaor_init & ~dmaor)
160 dev_warn(shdev->common.dev,
161 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
162 dmaor, shdev->pdata->dmaor_init);
141 return 0; 163 return 0;
142} 164}
143 165
@@ -258,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
258 return 0; 280 return 0;
259} 281}
260 282
261static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
262
263static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 283static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
264{ 284{
265 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 285 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -339,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
339 sh_chan_xfer_ld_queue(sh_chan); 359 sh_chan_xfer_ld_queue(sh_chan);
340 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 360 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
341 } 361 }
362 } else {
363 sh_chan->pm_state = DMAE_PM_PENDING;
342 } 364 }
343 365
344 spin_unlock_irq(&sh_chan->desc_lock); 366 spin_unlock_irq(&sh_chan->desc_lock);
@@ -1224,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1224 1246
1225 platform_set_drvdata(pdev, shdev); 1247 platform_set_drvdata(pdev, shdev);
1226 1248
1249 shdev->common.dev = &pdev->dev;
1250
1227 pm_runtime_enable(&pdev->dev); 1251 pm_runtime_enable(&pdev->dev);
1228 pm_runtime_get_sync(&pdev->dev); 1252 pm_runtime_get_sync(&pdev->dev);
1229 1253
@@ -1253,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1253 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1277 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1254 shdev->common.device_control = sh_dmae_control; 1278 shdev->common.device_control = sh_dmae_control;
1255 1279
1256 shdev->common.dev = &pdev->dev;
1257 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1280 /* Default transfer size of 32 bytes requires 32-byte alignment */
1258 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1281 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1259 1282
@@ -1434,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
1434#ifdef CONFIG_PM 1457#ifdef CONFIG_PM
1435static int sh_dmae_suspend(struct device *dev) 1458static int sh_dmae_suspend(struct device *dev)
1436{ 1459{
1437 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1438 int i;
1439
1440 for (i = 0; i < shdev->pdata->channel_num; i++) {
1441 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1442 if (sh_chan->descs_allocated)
1443 sh_chan->pm_error = pm_runtime_put_sync(dev);
1444 }
1445
1446 return 0; 1460 return 0;
1447} 1461}
1448 1462
1449static int sh_dmae_resume(struct device *dev) 1463static int sh_dmae_resume(struct device *dev)
1450{ 1464{
1451 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1465 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1452 int i; 1466 int i, ret;
1467
1468 ret = sh_dmae_rst(shdev);
1469 if (ret < 0)
1470 dev_err(dev, "Failed to reset!\n");
1453 1471
1454 for (i = 0; i < shdev->pdata->channel_num; i++) { 1472 for (i = 0; i < shdev->pdata->channel_num; i++) {
1455 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1473 struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1458,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
1458 if (!sh_chan->descs_allocated) 1476 if (!sh_chan->descs_allocated)
1459 continue; 1477 continue;
1460 1478
1461 if (!sh_chan->pm_error)
1462 pm_runtime_get_sync(dev);
1463
1464 if (param) { 1479 if (param) {
1465 const struct sh_dmae_slave_config *cfg = param->config; 1480 const struct sh_dmae_slave_config *cfg = param->config;
1466 dmae_set_dmars(sh_chan, cfg->mid_rid); 1481 dmae_set_dmars(sh_chan, cfg->mid_rid);