aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnup Patel <anup.patel@broadcom.com>2017-08-22 05:56:59 -0400
committerVinod Koul <vinod.koul@intel.com>2017-08-28 07:14:24 -0400
commiteb67744b9af736073d6dffb7fb139f3f05d7e6a8 (patch)
treeda12ca14aa9536541246be94489b987aa6bb9050
parentf83385142c3a0864e4ac82d852dd0fdb54742ce3 (diff)
dmaengine: bcm-sba-raid: Alloc resources before registering DMA device
We should allocate DMA channel resources before registering the DMA device in sba_probe() because we can get DMA request soon after registering the DMA device. If DMA channel resources are not allocated before first DMA request then SBA-RAID driver will crash. Signed-off-by: Anup Patel <anup.patel@broadcom.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/bcm-sba-raid.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 9a9c0ad3689a..67c53c691af1 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1478,13 +1478,13 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
1478 int i, j, ret = 0; 1478 int i, j, ret = 0;
1479 struct sba_request *req = NULL; 1479 struct sba_request *req = NULL;
1480 1480
1481 sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev, 1481 sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
1482 sba->max_resp_pool_size, 1482 sba->max_resp_pool_size,
1483 &sba->resp_dma_base, GFP_KERNEL); 1483 &sba->resp_dma_base, GFP_KERNEL);
1484 if (!sba->resp_base) 1484 if (!sba->resp_base)
1485 return -ENOMEM; 1485 return -ENOMEM;
1486 1486
1487 sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev, 1487 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
1488 sba->max_cmds_pool_size, 1488 sba->max_cmds_pool_size,
1489 &sba->cmds_dma_base, GFP_KERNEL); 1489 &sba->cmds_dma_base, GFP_KERNEL);
1490 if (!sba->cmds_base) { 1490 if (!sba->cmds_base) {
@@ -1534,11 +1534,11 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
1534 return 0; 1534 return 0;
1535 1535
1536fail_free_cmds_pool: 1536fail_free_cmds_pool:
1537 dma_free_coherent(sba->dma_dev.dev, 1537 dma_free_coherent(sba->mbox_dev,
1538 sba->max_cmds_pool_size, 1538 sba->max_cmds_pool_size,
1539 sba->cmds_base, sba->cmds_dma_base); 1539 sba->cmds_base, sba->cmds_dma_base);
1540fail_free_resp_pool: 1540fail_free_resp_pool:
1541 dma_free_coherent(sba->dma_dev.dev, 1541 dma_free_coherent(sba->mbox_dev,
1542 sba->max_resp_pool_size, 1542 sba->max_resp_pool_size,
1543 sba->resp_base, sba->resp_dma_base); 1543 sba->resp_base, sba->resp_dma_base);
1544 return ret; 1544 return ret;
@@ -1547,9 +1547,9 @@ fail_free_resp_pool:
1547static void sba_freeup_channel_resources(struct sba_device *sba) 1547static void sba_freeup_channel_resources(struct sba_device *sba)
1548{ 1548{
1549 dmaengine_terminate_all(&sba->dma_chan); 1549 dmaengine_terminate_all(&sba->dma_chan);
1550 dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size, 1550 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
1551 sba->cmds_base, sba->cmds_dma_base); 1551 sba->cmds_base, sba->cmds_dma_base);
1552 dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size, 1552 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
1553 sba->resp_base, sba->resp_dma_base); 1553 sba->resp_base, sba->resp_dma_base);
1554 sba->resp_base = NULL; 1554 sba->resp_base = NULL;
1555 sba->resp_dma_base = 0; 1555 sba->resp_dma_base = 0;
@@ -1737,15 +1737,15 @@ static int sba_probe(struct platform_device *pdev)
1737 } 1737 }
1738 } 1738 }
1739 1739
1740 /* Register DMA device with linux async framework */ 1740 /* Prealloc channel resource */
1741 ret = sba_async_register(sba); 1741 ret = sba_prealloc_channel_resources(sba);
1742 if (ret) 1742 if (ret)
1743 goto fail_free_mchans; 1743 goto fail_free_mchans;
1744 1744
1745 /* Prealloc channel resource */ 1745 /* Register DMA device with Linux async framework */
1746 ret = sba_prealloc_channel_resources(sba); 1746 ret = sba_async_register(sba);
1747 if (ret) 1747 if (ret)
1748 goto fail_async_dev_unreg; 1748 goto fail_free_resources;
1749 1749
1750 /* Print device info */ 1750 /* Print device info */
1751 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", 1751 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
@@ -1754,8 +1754,8 @@ static int sba_probe(struct platform_device *pdev)
1754 1754
1755 return 0; 1755 return 0;
1756 1756
1757fail_async_dev_unreg: 1757fail_free_resources:
1758 dma_async_device_unregister(&sba->dma_dev); 1758 sba_freeup_channel_resources(sba);
1759fail_free_mchans: 1759fail_free_mchans:
1760 for (i = 0; i < sba->mchans_count; i++) 1760 for (i = 0; i < sba->mchans_count; i++)
1761 mbox_free_channel(sba->mchans[i]); 1761 mbox_free_channel(sba->mchans[i]);
@@ -1767,10 +1767,10 @@ static int sba_remove(struct platform_device *pdev)
1767 int i; 1767 int i;
1768 struct sba_device *sba = platform_get_drvdata(pdev); 1768 struct sba_device *sba = platform_get_drvdata(pdev);
1769 1769
1770 sba_freeup_channel_resources(sba);
1771
1772 dma_async_device_unregister(&sba->dma_dev); 1770 dma_async_device_unregister(&sba->dma_dev);
1773 1771
1772 sba_freeup_channel_resources(sba);
1773
1774 for (i = 0; i < sba->mchans_count; i++) 1774 for (i = 0; i < sba->mchans_count; i++)
1775 mbox_free_channel(sba->mchans[i]); 1775 mbox_free_channel(sba->mchans[i]);
1776 1776