diff options
author | Pramod Gurav <pramod.gurav@linaro.org> | 2016-06-17 06:26:03 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-06-30 00:35:56 -0400 |
commit | 7d2545599f5b09ccf6cdcab9ced58644a9cd038e (patch) | |
tree | 799a0b18f5604d15240d5a5969a7d56374620144 | |
parent | 1a695a905c18548062509178b98bc91e67510864 (diff) |
dmaengine: qcom-bam-dma: Add pm_runtime support
Adds pm_runtime support for BAM DMA so that clock is enabled only
when there is a transaction going on to help save power.
Signed-off-by: Pramod Gurav <pramod.gurav@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/qcom/bam_dma.c | 110 |
1 files changed, 109 insertions, 1 deletions
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 969b48176745..4754891742ad 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/of_dma.h> | 48 | #include <linux/of_dma.h> |
49 | #include <linux/clk.h> | 49 | #include <linux/clk.h> |
50 | #include <linux/dmaengine.h> | 50 | #include <linux/dmaengine.h> |
51 | #include <linux/pm_runtime.h> | ||
51 | 52 | ||
52 | #include "../dmaengine.h" | 53 | #include "../dmaengine.h" |
53 | #include "../virt-dma.h" | 54 | #include "../virt-dma.h" |
@@ -58,6 +59,8 @@ struct bam_desc_hw { | |||
58 | __le16 flags; | 59 | __le16 flags; |
59 | }; | 60 | }; |
60 | 61 | ||
62 | #define BAM_DMA_AUTOSUSPEND_DELAY 100 | ||
63 | |||
61 | #define DESC_FLAG_INT BIT(15) | 64 | #define DESC_FLAG_INT BIT(15) |
62 | #define DESC_FLAG_EOT BIT(14) | 65 | #define DESC_FLAG_EOT BIT(14) |
63 | #define DESC_FLAG_EOB BIT(13) | 66 | #define DESC_FLAG_EOB BIT(13) |
@@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan) | |||
527 | struct bam_device *bdev = bchan->bdev; | 530 | struct bam_device *bdev = bchan->bdev; |
528 | u32 val; | 531 | u32 val; |
529 | unsigned long flags; | 532 | unsigned long flags; |
533 | int ret; | ||
534 | |||
535 | ret = pm_runtime_get_sync(bdev->dev); | ||
536 | if (ret < 0) | ||
537 | return; | ||
530 | 538 | ||
531 | vchan_free_chan_resources(to_virt_chan(chan)); | 539 | vchan_free_chan_resources(to_virt_chan(chan)); |
532 | 540 | ||
533 | if (bchan->curr_txd) { | 541 | if (bchan->curr_txd) { |
534 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); | 542 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); |
535 | return; | 543 | goto err; |
536 | } | 544 | } |
537 | 545 | ||
538 | spin_lock_irqsave(&bchan->vc.lock, flags); | 546 | spin_lock_irqsave(&bchan->vc.lock, flags); |
@@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan) | |||
550 | 558 | ||
551 | /* disable irq */ | 559 | /* disable irq */ |
552 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); | 560 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); |
561 | |||
562 | err: | ||
563 | pm_runtime_mark_last_busy(bdev->dev); | ||
564 | pm_runtime_put_autosuspend(bdev->dev); | ||
553 | } | 565 | } |
554 | 566 | ||
555 | /** | 567 | /** |
@@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan) | |||
696 | struct bam_chan *bchan = to_bam_chan(chan); | 708 | struct bam_chan *bchan = to_bam_chan(chan); |
697 | struct bam_device *bdev = bchan->bdev; | 709 | struct bam_device *bdev = bchan->bdev; |
698 | unsigned long flag; | 710 | unsigned long flag; |
711 | int ret; | ||
712 | |||
713 | ret = pm_runtime_get_sync(bdev->dev); | ||
714 | if (ret < 0) | ||
715 | return ret; | ||
699 | 716 | ||
700 | spin_lock_irqsave(&bchan->vc.lock, flag); | 717 | spin_lock_irqsave(&bchan->vc.lock, flag); |
701 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); | 718 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); |
702 | bchan->paused = 1; | 719 | bchan->paused = 1; |
703 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | 720 | spin_unlock_irqrestore(&bchan->vc.lock, flag); |
721 | pm_runtime_mark_last_busy(bdev->dev); | ||
722 | pm_runtime_put_autosuspend(bdev->dev); | ||
704 | 723 | ||
705 | return 0; | 724 | return 0; |
706 | } | 725 | } |
@@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan) | |||
715 | struct bam_chan *bchan = to_bam_chan(chan); | 734 | struct bam_chan *bchan = to_bam_chan(chan); |
716 | struct bam_device *bdev = bchan->bdev; | 735 | struct bam_device *bdev = bchan->bdev; |
717 | unsigned long flag; | 736 | unsigned long flag; |
737 | int ret; | ||
738 | |||
739 | ret = pm_runtime_get_sync(bdev->dev); | ||
740 | if (ret < 0) | ||
741 | return ret; | ||
718 | 742 | ||
719 | spin_lock_irqsave(&bchan->vc.lock, flag); | 743 | spin_lock_irqsave(&bchan->vc.lock, flag); |
720 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); | 744 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); |
721 | bchan->paused = 0; | 745 | bchan->paused = 0; |
722 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | 746 | spin_unlock_irqrestore(&bchan->vc.lock, flag); |
747 | pm_runtime_mark_last_busy(bdev->dev); | ||
748 | pm_runtime_put_autosuspend(bdev->dev); | ||
723 | 749 | ||
724 | return 0; | 750 | return 0; |
725 | } | 751 | } |
@@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) | |||
795 | { | 821 | { |
796 | struct bam_device *bdev = data; | 822 | struct bam_device *bdev = data; |
797 | u32 clr_mask = 0, srcs = 0; | 823 | u32 clr_mask = 0, srcs = 0; |
824 | int ret; | ||
798 | 825 | ||
799 | srcs |= process_channel_irqs(bdev); | 826 | srcs |= process_channel_irqs(bdev); |
800 | 827 | ||
@@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data) | |||
802 | if (srcs & P_IRQ) | 829 | if (srcs & P_IRQ) |
803 | tasklet_schedule(&bdev->task); | 830 | tasklet_schedule(&bdev->task); |
804 | 831 | ||
832 | ret = pm_runtime_get_sync(bdev->dev); | ||
833 | if (ret < 0) | ||
834 | return ret; | ||
835 | |||
805 | if (srcs & BAM_IRQ) { | 836 | if (srcs & BAM_IRQ) { |
806 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); | 837 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); |
807 | 838 | ||
@@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data) | |||
814 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); | 845 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); |
815 | } | 846 | } |
816 | 847 | ||
848 | pm_runtime_mark_last_busy(bdev->dev); | ||
849 | pm_runtime_put_autosuspend(bdev->dev); | ||
850 | |||
817 | return IRQ_HANDLED; | 851 | return IRQ_HANDLED; |
818 | } | 852 | } |
819 | 853 | ||
@@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
893 | struct bam_desc_hw *desc; | 927 | struct bam_desc_hw *desc; |
894 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, | 928 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, |
895 | sizeof(struct bam_desc_hw)); | 929 | sizeof(struct bam_desc_hw)); |
930 | int ret; | ||
896 | 931 | ||
897 | lockdep_assert_held(&bchan->vc.lock); | 932 | lockdep_assert_held(&bchan->vc.lock); |
898 | 933 | ||
@@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
904 | async_desc = container_of(vd, struct bam_async_desc, vd); | 939 | async_desc = container_of(vd, struct bam_async_desc, vd); |
905 | bchan->curr_txd = async_desc; | 940 | bchan->curr_txd = async_desc; |
906 | 941 | ||
942 | ret = pm_runtime_get_sync(bdev->dev); | ||
943 | if (ret < 0) | ||
944 | return; | ||
945 | |||
907 | /* on first use, initialize the channel hardware */ | 946 | /* on first use, initialize the channel hardware */ |
908 | if (!bchan->initialized) | 947 | if (!bchan->initialized) |
909 | bam_chan_init_hw(bchan, async_desc->dir); | 948 | bam_chan_init_hw(bchan, async_desc->dir); |
@@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
946 | wmb(); | 985 | wmb(); |
947 | writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), | 986 | writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), |
948 | bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); | 987 | bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); |
988 | |||
989 | pm_runtime_mark_last_busy(bdev->dev); | ||
990 | pm_runtime_put_autosuspend(bdev->dev); | ||
949 | } | 991 | } |
950 | 992 | ||
951 | /** | 993 | /** |
@@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data) | |||
970 | bam_start_dma(bchan); | 1012 | bam_start_dma(bchan); |
971 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | 1013 | spin_unlock_irqrestore(&bchan->vc.lock, flags); |
972 | } | 1014 | } |
1015 | |||
973 | } | 1016 | } |
974 | 1017 | ||
975 | /** | 1018 | /** |
@@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1213 | if (ret) | 1256 | if (ret) |
1214 | goto err_unregister_dma; | 1257 | goto err_unregister_dma; |
1215 | 1258 | ||
1259 | pm_runtime_irq_safe(&pdev->dev); | ||
1260 | pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); | ||
1261 | pm_runtime_use_autosuspend(&pdev->dev); | ||
1262 | pm_runtime_mark_last_busy(&pdev->dev); | ||
1263 | pm_runtime_set_active(&pdev->dev); | ||
1264 | pm_runtime_enable(&pdev->dev); | ||
1265 | |||
1216 | return 0; | 1266 | return 0; |
1217 | 1267 | ||
1218 | err_unregister_dma: | 1268 | err_unregister_dma: |
@@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev) | |||
1233 | struct bam_device *bdev = platform_get_drvdata(pdev); | 1283 | struct bam_device *bdev = platform_get_drvdata(pdev); |
1234 | u32 i; | 1284 | u32 i; |
1235 | 1285 | ||
1286 | pm_runtime_force_suspend(&pdev->dev); | ||
1287 | |||
1236 | of_dma_controller_free(pdev->dev.of_node); | 1288 | of_dma_controller_free(pdev->dev.of_node); |
1237 | dma_async_device_unregister(&bdev->common); | 1289 | dma_async_device_unregister(&bdev->common); |
1238 | 1290 | ||
@@ -1260,11 +1312,67 @@ static int bam_dma_remove(struct platform_device *pdev) | |||
1260 | return 0; | 1312 | return 0; |
1261 | } | 1313 | } |
1262 | 1314 | ||
1315 | static int bam_dma_runtime_suspend(struct device *dev) | ||
1316 | { | ||
1317 | struct bam_device *bdev = dev_get_drvdata(dev); | ||
1318 | |||
1319 | clk_disable(bdev->bamclk); | ||
1320 | |||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | static int bam_dma_runtime_resume(struct device *dev) | ||
1325 | { | ||
1326 | struct bam_device *bdev = dev_get_drvdata(dev); | ||
1327 | int ret; | ||
1328 | |||
1329 | ret = clk_enable(bdev->bamclk); | ||
1330 | if (ret < 0) { | ||
1331 | dev_err(dev, "clk_enable failed: %d\n", ret); | ||
1332 | return ret; | ||
1333 | } | ||
1334 | |||
1335 | return 0; | ||
1336 | } | ||
1337 | #ifdef CONFIG_PM_SLEEP | ||
1338 | static int bam_dma_suspend(struct device *dev) | ||
1339 | { | ||
1340 | struct bam_device *bdev = dev_get_drvdata(dev); | ||
1341 | |||
1342 | pm_runtime_force_suspend(dev); | ||
1343 | |||
1344 | clk_unprepare(bdev->bamclk); | ||
1345 | |||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | static int bam_dma_resume(struct device *dev) | ||
1350 | { | ||
1351 | struct bam_device *bdev = dev_get_drvdata(dev); | ||
1352 | int ret; | ||
1353 | |||
1354 | ret = clk_prepare(bdev->bamclk); | ||
1355 | if (ret) | ||
1356 | return ret; | ||
1357 | |||
1358 | pm_runtime_force_resume(dev); | ||
1359 | |||
1360 | return 0; | ||
1361 | } | ||
1362 | #endif | ||
1363 | |||
1364 | static const struct dev_pm_ops bam_dma_pm_ops = { | ||
1365 | SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) | ||
1366 | SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, | ||
1367 | NULL) | ||
1368 | }; | ||
1369 | |||
1263 | static struct platform_driver bam_dma_driver = { | 1370 | static struct platform_driver bam_dma_driver = { |
1264 | .probe = bam_dma_probe, | 1371 | .probe = bam_dma_probe, |
1265 | .remove = bam_dma_remove, | 1372 | .remove = bam_dma_remove, |
1266 | .driver = { | 1373 | .driver = { |
1267 | .name = "bam-dma-engine", | 1374 | .name = "bam-dma-engine", |
1375 | .pm = &bam_dma_pm_ops, | ||
1268 | .of_match_table = bam_of_match, | 1376 | .of_match_table = bam_of_match, |
1269 | }, | 1377 | }, |
1270 | }; | 1378 | }; |