aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/stm32-dma.c71
-rw-r--r--drivers/dma/stm32-dmamux.c58
-rw-r--r--drivers/dma/stm32-mdma.c56
3 files changed, 155 insertions, 30 deletions
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 4903a408fc14..ba239b529fa9 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -23,6 +23,7 @@
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/of_dma.h> 24#include <linux/of_dma.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
26#include <linux/reset.h> 27#include <linux/reset.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
@@ -641,12 +642,13 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
641{ 642{
642 struct stm32_dma_chan *chan = devid; 643 struct stm32_dma_chan *chan = devid;
643 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 644 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
644 u32 status, scr; 645 u32 status, scr, sfcr;
645 646
646 spin_lock(&chan->vchan.lock); 647 spin_lock(&chan->vchan.lock);
647 648
648 status = stm32_dma_irq_status(chan); 649 status = stm32_dma_irq_status(chan);
649 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 650 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
651 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
650 652
651 if (status & STM32_DMA_TCI) { 653 if (status & STM32_DMA_TCI) {
652 stm32_dma_irq_clear(chan, STM32_DMA_TCI); 654 stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -661,10 +663,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
661 if (status & STM32_DMA_FEI) { 663 if (status & STM32_DMA_FEI) {
662 stm32_dma_irq_clear(chan, STM32_DMA_FEI); 664 stm32_dma_irq_clear(chan, STM32_DMA_FEI);
663 status &= ~STM32_DMA_FEI; 665 status &= ~STM32_DMA_FEI;
664 if (!(scr & STM32_DMA_SCR_EN)) 666 if (sfcr & STM32_DMA_SFCR_FEIE) {
665 dev_err(chan2dev(chan), "FIFO Error\n"); 667 if (!(scr & STM32_DMA_SCR_EN))
666 else 668 dev_err(chan2dev(chan), "FIFO Error\n");
667 dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); 669 else
670 dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
671 }
668 } 672 }
669 if (status) { 673 if (status) {
670 stm32_dma_irq_clear(chan, status); 674 stm32_dma_irq_clear(chan, status);
@@ -1112,15 +1116,14 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
1112 int ret; 1116 int ret;
1113 1117
1114 chan->config_init = false; 1118 chan->config_init = false;
1115 ret = clk_prepare_enable(dmadev->clk); 1119
1116 if (ret < 0) { 1120 ret = pm_runtime_get_sync(dmadev->ddev.dev);
1117 dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); 1121 if (ret < 0)
1118 return ret; 1122 return ret;
1119 }
1120 1123
1121 ret = stm32_dma_disable_chan(chan); 1124 ret = stm32_dma_disable_chan(chan);
1122 if (ret < 0) 1125 if (ret < 0)
1123 clk_disable_unprepare(dmadev->clk); 1126 pm_runtime_put(dmadev->ddev.dev);
1124 1127
1125 return ret; 1128 return ret;
1126} 1129}
@@ -1140,7 +1143,7 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c)
1140 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1143 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1141 } 1144 }
1142 1145
1143 clk_disable_unprepare(dmadev->clk); 1146 pm_runtime_put(dmadev->ddev.dev);
1144 1147
1145 vchan_free_chan_resources(to_virt_chan(c)); 1148 vchan_free_chan_resources(to_virt_chan(c));
1146} 1149}
@@ -1240,6 +1243,12 @@ static int stm32_dma_probe(struct platform_device *pdev)
1240 return PTR_ERR(dmadev->clk); 1243 return PTR_ERR(dmadev->clk);
1241 } 1244 }
1242 1245
1246 ret = clk_prepare_enable(dmadev->clk);
1247 if (ret < 0) {
1248 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1249 return ret;
1250 }
1251
1243 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, 1252 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
1244 "st,mem2mem"); 1253 "st,mem2mem");
1245 1254
@@ -1289,7 +1298,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
1289 1298
1290 ret = dma_async_device_register(dd); 1299 ret = dma_async_device_register(dd);
1291 if (ret) 1300 if (ret)
1292 return ret; 1301 goto clk_free;
1293 1302
1294 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { 1303 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1295 chan = &dmadev->chan[i]; 1304 chan = &dmadev->chan[i];
@@ -1321,20 +1330,58 @@ static int stm32_dma_probe(struct platform_device *pdev)
1321 1330
1322 platform_set_drvdata(pdev, dmadev); 1331 platform_set_drvdata(pdev, dmadev);
1323 1332
1333 pm_runtime_set_active(&pdev->dev);
1334 pm_runtime_enable(&pdev->dev);
1335 pm_runtime_get_noresume(&pdev->dev);
1336 pm_runtime_put(&pdev->dev);
1337
1324 dev_info(&pdev->dev, "STM32 DMA driver registered\n"); 1338 dev_info(&pdev->dev, "STM32 DMA driver registered\n");
1325 1339
1326 return 0; 1340 return 0;
1327 1341
1328err_unregister: 1342err_unregister:
1329 dma_async_device_unregister(dd); 1343 dma_async_device_unregister(dd);
1344clk_free:
1345 clk_disable_unprepare(dmadev->clk);
1330 1346
1331 return ret; 1347 return ret;
1332} 1348}
1333 1349
1350#ifdef CONFIG_PM
1351static int stm32_dma_runtime_suspend(struct device *dev)
1352{
1353 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1354
1355 clk_disable_unprepare(dmadev->clk);
1356
1357 return 0;
1358}
1359
1360static int stm32_dma_runtime_resume(struct device *dev)
1361{
1362 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1363 int ret;
1364
1365 ret = clk_prepare_enable(dmadev->clk);
1366 if (ret) {
1367 dev_err(dev, "failed to prepare_enable clock\n");
1368 return ret;
1369 }
1370
1371 return 0;
1372}
1373#endif
1374
1375static const struct dev_pm_ops stm32_dma_pm_ops = {
1376 SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
1377 stm32_dma_runtime_resume, NULL)
1378};
1379
1334static struct platform_driver stm32_dma_driver = { 1380static struct platform_driver stm32_dma_driver = {
1335 .driver = { 1381 .driver = {
1336 .name = "stm32-dma", 1382 .name = "stm32-dma",
1337 .of_match_table = stm32_dma_of_match, 1383 .of_match_table = stm32_dma_of_match,
1384 .pm = &stm32_dma_pm_ops,
1338 }, 1385 },
1339}; 1386};
1340 1387
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index b922db90939a..a67119199c45 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/of_device.h> 29#include <linux/of_device.h>
30#include <linux/of_dma.h> 30#include <linux/of_dma.h>
31#include <linux/pm_runtime.h>
31#include <linux/reset.h> 32#include <linux/reset.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33#include <linux/spinlock.h> 34#include <linux/spinlock.h>
@@ -79,8 +80,7 @@ static void stm32_dmamux_free(struct device *dev, void *route_data)
79 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); 80 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
80 clear_bit(mux->chan_id, dmamux->dma_inuse); 81 clear_bit(mux->chan_id, dmamux->dma_inuse);
81 82
82 if (!IS_ERR(dmamux->clk)) 83 pm_runtime_put_sync(dev);
83 clk_disable(dmamux->clk);
84 84
85 spin_unlock_irqrestore(&dmamux->lock, flags); 85 spin_unlock_irqrestore(&dmamux->lock, flags);
86 86
@@ -146,13 +146,10 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
146 146
147 /* Set dma request */ 147 /* Set dma request */
148 spin_lock_irqsave(&dmamux->lock, flags); 148 spin_lock_irqsave(&dmamux->lock, flags);
149 if (!IS_ERR(dmamux->clk)) { 149 ret = pm_runtime_get_sync(&pdev->dev);
150 ret = clk_enable(dmamux->clk); 150 if (ret < 0) {
151 if (ret < 0) { 151 spin_unlock_irqrestore(&dmamux->lock, flags);
152 spin_unlock_irqrestore(&dmamux->lock, flags); 152 goto error;
153 dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret);
154 goto error;
155 }
156 } 153 }
157 spin_unlock_irqrestore(&dmamux->lock, flags); 154 spin_unlock_irqrestore(&dmamux->lock, flags);
158 155
@@ -254,6 +251,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
254 dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", 251 dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
255 stm32_dmamux->dmamux_requests); 252 stm32_dmamux->dmamux_requests);
256 } 253 }
254 pm_runtime_get_noresume(&pdev->dev);
257 255
258 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 256 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
259 iomem = devm_ioremap_resource(&pdev->dev, res); 257 iomem = devm_ioremap_resource(&pdev->dev, res);
@@ -282,6 +280,8 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
282 stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; 280 stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
283 281
284 platform_set_drvdata(pdev, stm32_dmamux); 282 platform_set_drvdata(pdev, stm32_dmamux);
283 pm_runtime_set_active(&pdev->dev);
284 pm_runtime_enable(&pdev->dev);
285 285
286 if (!IS_ERR(stm32_dmamux->clk)) { 286 if (!IS_ERR(stm32_dmamux->clk)) {
287 ret = clk_prepare_enable(stm32_dmamux->clk); 287 ret = clk_prepare_enable(stm32_dmamux->clk);
@@ -291,17 +291,52 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
291 } 291 }
292 } 292 }
293 293
294 pm_runtime_get_noresume(&pdev->dev);
295
294 /* Reset the dmamux */ 296 /* Reset the dmamux */
295 for (i = 0; i < stm32_dmamux->dma_requests; i++) 297 for (i = 0; i < stm32_dmamux->dma_requests; i++)
296 stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); 298 stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
297 299
298 if (!IS_ERR(stm32_dmamux->clk)) 300 pm_runtime_put(&pdev->dev);
299 clk_disable(stm32_dmamux->clk);
300 301
301 return of_dma_router_register(node, stm32_dmamux_route_allocate, 302 return of_dma_router_register(node, stm32_dmamux_route_allocate,
302 &stm32_dmamux->dmarouter); 303 &stm32_dmamux->dmarouter);
303} 304}
304 305
306#ifdef CONFIG_PM
307static int stm32_dmamux_runtime_suspend(struct device *dev)
308{
309 struct platform_device *pdev =
310 container_of(dev, struct platform_device, dev);
311 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
312
313 clk_disable_unprepare(stm32_dmamux->clk);
314
315 return 0;
316}
317
318static int stm32_dmamux_runtime_resume(struct device *dev)
319{
320 struct platform_device *pdev =
321 container_of(dev, struct platform_device, dev);
322 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
323 int ret;
324
325 ret = clk_prepare_enable(stm32_dmamux->clk);
326 if (ret) {
327 dev_err(&pdev->dev, "failed to prepare_enable clock\n");
328 return ret;
329 }
330
331 return 0;
332}
333#endif
334
335static const struct dev_pm_ops stm32_dmamux_pm_ops = {
336 SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
337 stm32_dmamux_runtime_resume, NULL)
338};
339
305static const struct of_device_id stm32_dmamux_match[] = { 340static const struct of_device_id stm32_dmamux_match[] = {
306 { .compatible = "st,stm32h7-dmamux" }, 341 { .compatible = "st,stm32h7-dmamux" },
307 {}, 342 {},
@@ -312,6 +347,7 @@ static struct platform_driver stm32_dmamux_driver = {
312 .driver = { 347 .driver = {
313 .name = "stm32-dmamux", 348 .name = "stm32-dmamux",
314 .of_match_table = stm32_dmamux_match, 349 .of_match_table = stm32_dmamux_match,
350 .pm = &stm32_dmamux_pm_ops,
315 }, 351 },
316}; 352};
317 353
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 390e4cae0e1a..4e0eede599a8 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -37,6 +37,7 @@
37#include <linux/of_device.h> 37#include <linux/of_device.h>
38#include <linux/of_dma.h> 38#include <linux/of_dma.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/pm_runtime.h>
40#include <linux/reset.h> 41#include <linux/reset.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42 43
@@ -1456,15 +1457,13 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
1456 return -ENOMEM; 1457 return -ENOMEM;
1457 } 1458 }
1458 1459
1459 ret = clk_prepare_enable(dmadev->clk); 1460 ret = pm_runtime_get_sync(dmadev->ddev.dev);
1460 if (ret < 0) { 1461 if (ret < 0)
1461 dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
1462 return ret; 1462 return ret;
1463 }
1464 1463
1465 ret = stm32_mdma_disable_chan(chan); 1464 ret = stm32_mdma_disable_chan(chan);
1466 if (ret < 0) 1465 if (ret < 0)
1467 clk_disable_unprepare(dmadev->clk); 1466 pm_runtime_put(dmadev->ddev.dev);
1468 1467
1469 return ret; 1468 return ret;
1470} 1469}
@@ -1484,7 +1483,7 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
1484 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1483 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1485 } 1484 }
1486 1485
1487 clk_disable_unprepare(dmadev->clk); 1486 pm_runtime_put(dmadev->ddev.dev);
1488 vchan_free_chan_resources(to_virt_chan(c)); 1487 vchan_free_chan_resources(to_virt_chan(c));
1489 dmam_pool_destroy(chan->desc_pool); 1488 dmam_pool_destroy(chan->desc_pool);
1490 chan->desc_pool = NULL; 1489 chan->desc_pool = NULL;
@@ -1579,9 +1578,11 @@ static int stm32_mdma_probe(struct platform_device *pdev)
1579 1578
1580 dmadev->nr_channels = nr_channels; 1579 dmadev->nr_channels = nr_channels;
1581 dmadev->nr_requests = nr_requests; 1580 dmadev->nr_requests = nr_requests;
1582 device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", 1581 ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
1583 dmadev->ahb_addr_masks, 1582 dmadev->ahb_addr_masks,
1584 count); 1583 count);
1584 if (ret)
1585 return ret;
1585 dmadev->nr_ahb_addr_masks = count; 1586 dmadev->nr_ahb_addr_masks = count;
1586 1587
1587 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1597,6 +1598,12 @@ static int stm32_mdma_probe(struct platform_device *pdev)
1597 return ret; 1598 return ret;
1598 } 1599 }
1599 1600
1601 ret = clk_prepare_enable(dmadev->clk);
1602 if (ret < 0) {
1603 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1604 return ret;
1605 }
1606
1600 dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); 1607 dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
1601 if (!IS_ERR(dmadev->rst)) { 1608 if (!IS_ERR(dmadev->rst)) {
1602 reset_control_assert(dmadev->rst); 1609 reset_control_assert(dmadev->rst);
@@ -1668,6 +1675,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
1668 } 1675 }
1669 1676
1670 platform_set_drvdata(pdev, dmadev); 1677 platform_set_drvdata(pdev, dmadev);
1678 pm_runtime_set_active(&pdev->dev);
1679 pm_runtime_enable(&pdev->dev);
1680 pm_runtime_get_noresume(&pdev->dev);
1681 pm_runtime_put(&pdev->dev);
1671 1682
1672 dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); 1683 dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
1673 1684
@@ -1677,11 +1688,42 @@ err_unregister:
1677 return ret; 1688 return ret;
1678} 1689}
1679 1690
1691#ifdef CONFIG_PM
1692static int stm32_mdma_runtime_suspend(struct device *dev)
1693{
1694 struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
1695
1696 clk_disable_unprepare(dmadev->clk);
1697
1698 return 0;
1699}
1700
1701static int stm32_mdma_runtime_resume(struct device *dev)
1702{
1703 struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
1704 int ret;
1705
1706 ret = clk_prepare_enable(dmadev->clk);
1707 if (ret) {
1708 dev_err(dev, "failed to prepare_enable clock\n");
1709 return ret;
1710 }
1711
1712 return 0;
1713}
1714#endif
1715
1716static const struct dev_pm_ops stm32_mdma_pm_ops = {
1717 SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
1718 stm32_mdma_runtime_resume, NULL)
1719};
1720
1680static struct platform_driver stm32_mdma_driver = { 1721static struct platform_driver stm32_mdma_driver = {
1681 .probe = stm32_mdma_probe, 1722 .probe = stm32_mdma_probe,
1682 .driver = { 1723 .driver = {
1683 .name = "stm32-mdma", 1724 .name = "stm32-mdma",
1684 .of_match_table = stm32_mdma_of_match, 1725 .of_match_table = stm32_mdma_of_match,
1726 .pm = &stm32_mdma_pm_ops,
1685 }, 1727 },
1686}; 1728};
1687 1729