aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-07-16 10:39:34 -0400
committerVinod Koul <vinod.koul@intel.com>2016-07-16 10:39:34 -0400
commit69fa54a009e33073f9b3a710d21aac30d460d063 (patch)
treee30793a106ebbbe58e785096b963e34f0755d8f9
parent488bace2f1996ad38a80daf949f83fd3461a1564 (diff)
parent184f337e6d859c20e0d3c6954980cbb744e014fb (diff)
Merge branch 'topic/bam' into for-linus
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/qcom/bam_dma.c109
2 files changed, 111 insertions, 3 deletions
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6149b27c33ad..c8dd5b00c1b4 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
393 unsigned int sg_len) 393 unsigned int sg_len)
394{ 394{
395 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 395 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
396 size_t max_len = bcm2835_dma_max_frame_length(c); 396 size_t len, max_len;
397 unsigned int i, len; 397 unsigned int i;
398 dma_addr_t addr; 398 dma_addr_t addr;
399 struct scatterlist *sgent; 399 struct scatterlist *sgent;
400 400
401 max_len = bcm2835_dma_max_frame_length(c);
401 for_each_sg(sgl, sgent, sg_len, i) { 402 for_each_sg(sgl, sgent, sg_len, i) {
402 for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); 403 for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
403 len > 0; 404 len > 0;
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 969b48176745..03c4eb3fd314 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -48,6 +48,7 @@
48#include <linux/of_dma.h> 48#include <linux/of_dma.h>
49#include <linux/clk.h> 49#include <linux/clk.h>
50#include <linux/dmaengine.h> 50#include <linux/dmaengine.h>
51#include <linux/pm_runtime.h>
51 52
52#include "../dmaengine.h" 53#include "../dmaengine.h"
53#include "../virt-dma.h" 54#include "../virt-dma.h"
@@ -58,6 +59,8 @@ struct bam_desc_hw {
58 __le16 flags; 59 __le16 flags;
59}; 60};
60 61
62#define BAM_DMA_AUTOSUSPEND_DELAY 100
63
61#define DESC_FLAG_INT BIT(15) 64#define DESC_FLAG_INT BIT(15)
62#define DESC_FLAG_EOT BIT(14) 65#define DESC_FLAG_EOT BIT(14)
63#define DESC_FLAG_EOB BIT(13) 66#define DESC_FLAG_EOB BIT(13)
@@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan)
527 struct bam_device *bdev = bchan->bdev; 530 struct bam_device *bdev = bchan->bdev;
528 u32 val; 531 u32 val;
529 unsigned long flags; 532 unsigned long flags;
533 int ret;
534
535 ret = pm_runtime_get_sync(bdev->dev);
536 if (ret < 0)
537 return;
530 538
531 vchan_free_chan_resources(to_virt_chan(chan)); 539 vchan_free_chan_resources(to_virt_chan(chan));
532 540
533 if (bchan->curr_txd) { 541 if (bchan->curr_txd) {
534 dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); 542 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
535 return; 543 goto err;
536 } 544 }
537 545
538 spin_lock_irqsave(&bchan->vc.lock, flags); 546 spin_lock_irqsave(&bchan->vc.lock, flags);
@@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan)
550 558
551 /* disable irq */ 559 /* disable irq */
552 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); 560 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
561
562err:
563 pm_runtime_mark_last_busy(bdev->dev);
564 pm_runtime_put_autosuspend(bdev->dev);
553} 565}
554 566
555/** 567/**
@@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan)
696 struct bam_chan *bchan = to_bam_chan(chan); 708 struct bam_chan *bchan = to_bam_chan(chan);
697 struct bam_device *bdev = bchan->bdev; 709 struct bam_device *bdev = bchan->bdev;
698 unsigned long flag; 710 unsigned long flag;
711 int ret;
712
713 ret = pm_runtime_get_sync(bdev->dev);
714 if (ret < 0)
715 return ret;
699 716
700 spin_lock_irqsave(&bchan->vc.lock, flag); 717 spin_lock_irqsave(&bchan->vc.lock, flag);
701 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); 718 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
702 bchan->paused = 1; 719 bchan->paused = 1;
703 spin_unlock_irqrestore(&bchan->vc.lock, flag); 720 spin_unlock_irqrestore(&bchan->vc.lock, flag);
721 pm_runtime_mark_last_busy(bdev->dev);
722 pm_runtime_put_autosuspend(bdev->dev);
704 723
705 return 0; 724 return 0;
706} 725}
@@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan)
715 struct bam_chan *bchan = to_bam_chan(chan); 734 struct bam_chan *bchan = to_bam_chan(chan);
716 struct bam_device *bdev = bchan->bdev; 735 struct bam_device *bdev = bchan->bdev;
717 unsigned long flag; 736 unsigned long flag;
737 int ret;
738
739 ret = pm_runtime_get_sync(bdev->dev);
740 if (ret < 0)
741 return ret;
718 742
719 spin_lock_irqsave(&bchan->vc.lock, flag); 743 spin_lock_irqsave(&bchan->vc.lock, flag);
720 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); 744 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
721 bchan->paused = 0; 745 bchan->paused = 0;
722 spin_unlock_irqrestore(&bchan->vc.lock, flag); 746 spin_unlock_irqrestore(&bchan->vc.lock, flag);
747 pm_runtime_mark_last_busy(bdev->dev);
748 pm_runtime_put_autosuspend(bdev->dev);
723 749
724 return 0; 750 return 0;
725} 751}
@@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
795{ 821{
796 struct bam_device *bdev = data; 822 struct bam_device *bdev = data;
797 u32 clr_mask = 0, srcs = 0; 823 u32 clr_mask = 0, srcs = 0;
824 int ret;
798 825
799 srcs |= process_channel_irqs(bdev); 826 srcs |= process_channel_irqs(bdev);
800 827
@@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
802 if (srcs & P_IRQ) 829 if (srcs & P_IRQ)
803 tasklet_schedule(&bdev->task); 830 tasklet_schedule(&bdev->task);
804 831
832 ret = pm_runtime_get_sync(bdev->dev);
833 if (ret < 0)
834 return ret;
835
805 if (srcs & BAM_IRQ) { 836 if (srcs & BAM_IRQ) {
806 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); 837 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
807 838
@@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
814 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); 845 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
815 } 846 }
816 847
848 pm_runtime_mark_last_busy(bdev->dev);
849 pm_runtime_put_autosuspend(bdev->dev);
850
817 return IRQ_HANDLED; 851 return IRQ_HANDLED;
818} 852}
819 853
@@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan)
893 struct bam_desc_hw *desc; 927 struct bam_desc_hw *desc;
894 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, 928 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
895 sizeof(struct bam_desc_hw)); 929 sizeof(struct bam_desc_hw));
930 int ret;
896 931
897 lockdep_assert_held(&bchan->vc.lock); 932 lockdep_assert_held(&bchan->vc.lock);
898 933
@@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan)
904 async_desc = container_of(vd, struct bam_async_desc, vd); 939 async_desc = container_of(vd, struct bam_async_desc, vd);
905 bchan->curr_txd = async_desc; 940 bchan->curr_txd = async_desc;
906 941
942 ret = pm_runtime_get_sync(bdev->dev);
943 if (ret < 0)
944 return;
945
907 /* on first use, initialize the channel hardware */ 946 /* on first use, initialize the channel hardware */
908 if (!bchan->initialized) 947 if (!bchan->initialized)
909 bam_chan_init_hw(bchan, async_desc->dir); 948 bam_chan_init_hw(bchan, async_desc->dir);
@@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan)
946 wmb(); 985 wmb();
947 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), 986 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
948 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); 987 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
988
989 pm_runtime_mark_last_busy(bdev->dev);
990 pm_runtime_put_autosuspend(bdev->dev);
949} 991}
950 992
951/** 993/**
@@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data)
970 bam_start_dma(bchan); 1012 bam_start_dma(bchan);
971 spin_unlock_irqrestore(&bchan->vc.lock, flags); 1013 spin_unlock_irqrestore(&bchan->vc.lock, flags);
972 } 1014 }
1015
973} 1016}
974 1017
975/** 1018/**
@@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev)
1213 if (ret) 1256 if (ret)
1214 goto err_unregister_dma; 1257 goto err_unregister_dma;
1215 1258
1259 pm_runtime_irq_safe(&pdev->dev);
1260 pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
1261 pm_runtime_use_autosuspend(&pdev->dev);
1262 pm_runtime_mark_last_busy(&pdev->dev);
1263 pm_runtime_set_active(&pdev->dev);
1264 pm_runtime_enable(&pdev->dev);
1265
1216 return 0; 1266 return 0;
1217 1267
1218err_unregister_dma: 1268err_unregister_dma:
@@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev)
1233 struct bam_device *bdev = platform_get_drvdata(pdev); 1283 struct bam_device *bdev = platform_get_drvdata(pdev);
1234 u32 i; 1284 u32 i;
1235 1285
1286 pm_runtime_force_suspend(&pdev->dev);
1287
1236 of_dma_controller_free(pdev->dev.of_node); 1288 of_dma_controller_free(pdev->dev.of_node);
1237 dma_async_device_unregister(&bdev->common); 1289 dma_async_device_unregister(&bdev->common);
1238 1290
@@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev)
1260 return 0; 1312 return 0;
1261} 1313}
1262 1314
1315static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
1316{
1317 struct bam_device *bdev = dev_get_drvdata(dev);
1318
1319 clk_disable(bdev->bamclk);
1320
1321 return 0;
1322}
1323
1324static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
1325{
1326 struct bam_device *bdev = dev_get_drvdata(dev);
1327 int ret;
1328
1329 ret = clk_enable(bdev->bamclk);
1330 if (ret < 0) {
1331 dev_err(dev, "clk_enable failed: %d\n", ret);
1332 return ret;
1333 }
1334
1335 return 0;
1336}
1337
1338static int __maybe_unused bam_dma_suspend(struct device *dev)
1339{
1340 struct bam_device *bdev = dev_get_drvdata(dev);
1341
1342 pm_runtime_force_suspend(dev);
1343
1344 clk_unprepare(bdev->bamclk);
1345
1346 return 0;
1347}
1348
1349static int __maybe_unused bam_dma_resume(struct device *dev)
1350{
1351 struct bam_device *bdev = dev_get_drvdata(dev);
1352 int ret;
1353
1354 ret = clk_prepare(bdev->bamclk);
1355 if (ret)
1356 return ret;
1357
1358 pm_runtime_force_resume(dev);
1359
1360 return 0;
1361}
1362
1363static const struct dev_pm_ops bam_dma_pm_ops = {
1364 SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
1365 SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
1366 NULL)
1367};
1368
1263static struct platform_driver bam_dma_driver = { 1369static struct platform_driver bam_dma_driver = {
1264 .probe = bam_dma_probe, 1370 .probe = bam_dma_probe,
1265 .remove = bam_dma_remove, 1371 .remove = bam_dma_remove,
1266 .driver = { 1372 .driver = {
1267 .name = "bam-dma-engine", 1373 .name = "bam-dma-engine",
1374 .pm = &bam_dma_pm_ops,
1268 .of_match_table = bam_of_match, 1375 .of_match_table = bam_of_match,
1269 }, 1376 },
1270}; 1377};