aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2014-05-08 05:01:49 -0400
committerVinod Koul <vinod.koul@intel.com>2014-05-22 06:07:24 -0400
commit8be4f523b48087765defd18483c66b268b3286e5 (patch)
tree75296fcfd7bd96e50329c4f6e6ef2433e25b6357 /drivers/dma
parentd2f78e95e42a9130002c76f1a1f76e657a4b4004 (diff)
dmaengine: dw: fix regression in dw_probe() function
The commit dbde5c29 "dw_dmac: use devm_* functions to simplify code" turns probe function to use devm_* helpers and simultaneously brings a regression. We have to 1) call clk_disable_unprepare() on error path, and 2) check error code of clk_enable_prepare(). First part was done in the original code, second one is an update. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw/core.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 009dc62f9437..d539019fbe60 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1496,7 +1496,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1496 dw->clk = devm_clk_get(chip->dev, "hclk"); 1496 dw->clk = devm_clk_get(chip->dev, "hclk");
1497 if (IS_ERR(dw->clk)) 1497 if (IS_ERR(dw->clk))
1498 return PTR_ERR(dw->clk); 1498 return PTR_ERR(dw->clk);
1499 clk_prepare_enable(dw->clk); 1499 err = clk_prepare_enable(dw->clk);
1500 if (err)
1501 return err;
1500 1502
1501 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1503 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1502 autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1504 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
@@ -1505,15 +1507,19 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1505 1507
1506 if (!pdata && autocfg) { 1508 if (!pdata && autocfg) {
1507 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); 1509 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
1508 if (!pdata) 1510 if (!pdata) {
1509 return -ENOMEM; 1511 err = -ENOMEM;
1512 goto err_pdata;
1513 }
1510 1514
1511 /* Fill platform data with the default values */ 1515 /* Fill platform data with the default values */
1512 pdata->is_private = true; 1516 pdata->is_private = true;
1513 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; 1517 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1514 pdata->chan_priority = CHAN_PRIORITY_ASCENDING; 1518 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1515 } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) 1519 } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1516 return -EINVAL; 1520 err = -EINVAL;
1521 goto err_pdata;
1522 }
1517 1523
1518 if (autocfg) 1524 if (autocfg)
1519 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; 1525 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
@@ -1522,8 +1528,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1522 1528
1523 dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan), 1529 dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
1524 GFP_KERNEL); 1530 GFP_KERNEL);
1525 if (!dw->chan) 1531 if (!dw->chan) {
1526 return -ENOMEM; 1532 err = -ENOMEM;
1533 goto err_pdata;
1534 }
1527 1535
1528 /* Get hardware configuration parameters */ 1536 /* Get hardware configuration parameters */
1529 if (autocfg) { 1537 if (autocfg) {
@@ -1553,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1553 sizeof(struct dw_desc), 4, 0); 1561 sizeof(struct dw_desc), 4, 0);
1554 if (!dw->desc_pool) { 1562 if (!dw->desc_pool) {
1555 dev_err(chip->dev, "No memory for descriptors dma pool\n"); 1563 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1556 return -ENOMEM; 1564 err = -ENOMEM;
1565 goto err_pdata;
1557 } 1566 }
1558 1567
1559 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1568 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
@@ -1561,7 +1570,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1561 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, 1570 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1562 "dw_dmac", dw); 1571 "dw_dmac", dw);
1563 if (err) 1572 if (err)
1564 return err; 1573 goto err_pdata;
1565 1574
1566 INIT_LIST_HEAD(&dw->dma.channels); 1575 INIT_LIST_HEAD(&dw->dma.channels);
1567 for (i = 0; i < nr_channels; i++) { 1576 for (i = 0; i < nr_channels; i++) {
@@ -1656,6 +1665,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1656 dma_async_device_register(&dw->dma); 1665 dma_async_device_register(&dw->dma);
1657 1666
1658 return 0; 1667 return 0;
1668
1669err_pdata:
1670 clk_disable_unprepare(dw->clk);
1671 return err;
1659} 1672}
1660EXPORT_SYMBOL_GPL(dw_dma_probe); 1673EXPORT_SYMBOL_GPL(dw_dma_probe);
1661 1674
@@ -1676,6 +1689,8 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1676 channel_clear_bit(dw, CH_EN, dwc->mask); 1689 channel_clear_bit(dw, CH_EN, dwc->mask);
1677 } 1690 }
1678 1691
1692 clk_disable_unprepare(dw->clk);
1693
1679 return 0; 1694 return 0;
1680} 1695}
1681EXPORT_SYMBOL_GPL(dw_dma_remove); 1696EXPORT_SYMBOL_GPL(dw_dma_remove);