aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2011-08-18 10:55:27 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2011-09-28 00:37:40 -0400
commit7a1cd9ad87979744e1510782b25c38feb9602739 (patch)
tree93dd2e114d474096fd654707d61a9e0f96a52a6b /drivers/dma
parentb4dae6e1adaedc9c343b5f00332312d649600bdc (diff)
dma: shdma: transfer based runtime PM
Currently the shdma dmaengine driver uses runtime PM to save power, when no channel on the specific controller is requested by a user. This patch switches the driver to count individual DMA transfers. That way the controller can be powered down between transfers, even if some of its channels are in use. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/shdma.c94
-rw-r--r--drivers/dma/shdma.h7
2 files changed, 75 insertions, 26 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index e7bb7479b187..81809c2b46ab 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -259,15 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
259 return 0; 259 return 0;
260} 260}
261 261
262static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
263
262static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 264static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
263{ 265{
264 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
265 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 267 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
268 struct sh_dmae_slave *param = tx->chan->private;
266 dma_async_tx_callback callback = tx->callback; 269 dma_async_tx_callback callback = tx->callback;
267 dma_cookie_t cookie; 270 dma_cookie_t cookie;
268 unsigned long flags; 271 bool power_up;
269 272
270 spin_lock_irqsave(&sh_chan->desc_lock, flags); 273 spin_lock_irq(&sh_chan->desc_lock);
274
275 if (list_empty(&sh_chan->ld_queue))
276 power_up = true;
277 else
278 power_up = false;
271 279
272 cookie = sh_chan->common.cookie; 280 cookie = sh_chan->common.cookie;
273 cookie++; 281 cookie++;
@@ -303,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
303 tx->cookie, &last->async_tx, sh_chan->id, 311 tx->cookie, &last->async_tx, sh_chan->id,
304 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 312 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
305 313
306 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 314 if (power_up) {
315 sh_chan->pm_state = DMAE_PM_BUSY;
316
317 pm_runtime_get(sh_chan->dev);
318
319 spin_unlock_irq(&sh_chan->desc_lock);
320
321 pm_runtime_barrier(sh_chan->dev);
322
323 spin_lock_irq(&sh_chan->desc_lock);
324
325 /* Have we been reset, while waiting? */
326 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
327 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
328 sh_chan->id);
329 if (param) {
330 const struct sh_dmae_slave_config *cfg =
331 param->config;
332
333 dmae_set_dmars(sh_chan, cfg->mid_rid);
334 dmae_set_chcr(sh_chan, cfg->chcr);
335 } else {
336 dmae_init(sh_chan);
337 }
338
339 if (sh_chan->pm_state == DMAE_PM_PENDING)
340 sh_chan_xfer_ld_queue(sh_chan);
341 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
342 }
343 }
344
345 spin_unlock_irq(&sh_chan->desc_lock);
307 346
308 return cookie; 347 return cookie;
309} 348}
@@ -347,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
347 struct sh_dmae_slave *param = chan->private; 386 struct sh_dmae_slave *param = chan->private;
348 int ret; 387 int ret;
349 388
350 pm_runtime_get_sync(sh_chan->dev);
351
352 /* 389 /*
353 * This relies on the guarantee from dmaengine that alloc_chan_resources 390 * This relies on the guarantee from dmaengine that alloc_chan_resources
354 * never runs concurrently with itself or free_chan_resources. 391 * never runs concurrently with itself or free_chan_resources.
@@ -368,11 +405,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
368 } 405 }
369 406
370 param->config = cfg; 407 param->config = cfg;
371
372 dmae_set_dmars(sh_chan, cfg->mid_rid);
373 dmae_set_chcr(sh_chan, cfg->chcr);
374 } else {
375 dmae_init(sh_chan);
376 } 408 }
377 409
378 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 410 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -401,7 +433,6 @@ edescalloc:
401etestused: 433etestused:
402efindslave: 434efindslave:
403 chan->private = NULL; 435 chan->private = NULL;
404 pm_runtime_put(sh_chan->dev);
405 return ret; 436 return ret;
406} 437}
407 438
@@ -413,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
413 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 444 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
414 struct sh_desc *desc, *_desc; 445 struct sh_desc *desc, *_desc;
415 LIST_HEAD(list); 446 LIST_HEAD(list);
416 int descs = sh_chan->descs_allocated;
417 447
418 /* Protect against ISR */ 448 /* Protect against ISR */
419 spin_lock_irq(&sh_chan->desc_lock); 449 spin_lock_irq(&sh_chan->desc_lock);
@@ -440,9 +470,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
440 470
441 spin_unlock_irq(&sh_chan->desc_lock); 471 spin_unlock_irq(&sh_chan->desc_lock);
442 472
443 if (descs > 0)
444 pm_runtime_put(sh_chan->dev);
445
446 list_for_each_entry_safe(desc, _desc, &list, node) 473 list_for_each_entry_safe(desc, _desc, &list, node)
447 kfree(desc); 474 kfree(desc);
448} 475}
@@ -676,7 +703,6 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
676 struct sh_desc, node); 703 struct sh_desc, node);
677 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 704 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
678 sh_chan->xmit_shift; 705 sh_chan->xmit_shift;
679
680 } 706 }
681 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 707 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
682 708
@@ -761,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
761 async_tx_test_ack(&desc->async_tx)) || all) { 787 async_tx_test_ack(&desc->async_tx)) || all) {
762 /* Remove from ld_queue list */ 788 /* Remove from ld_queue list */
763 desc->mark = DESC_IDLE; 789 desc->mark = DESC_IDLE;
790
764 list_move(&desc->node, &sh_chan->ld_free); 791 list_move(&desc->node, &sh_chan->ld_free);
792
793 if (list_empty(&sh_chan->ld_queue)) {
794 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
795 pm_runtime_put(sh_chan->dev);
796 }
765 } 797 }
766 } 798 }
767 799
@@ -791,16 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
791 ; 823 ;
792} 824}
793 825
826/* Called under spin_lock_irq(&sh_chan->desc_lock) */
794static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 827static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
795{ 828{
796 struct sh_desc *desc; 829 struct sh_desc *desc;
797 830
798 spin_lock_irq(&sh_chan->desc_lock);
799 /* DMA work check */ 831 /* DMA work check */
800 if (dmae_is_busy(sh_chan)) { 832 if (dmae_is_busy(sh_chan))
801 spin_unlock_irq(&sh_chan->desc_lock);
802 return; 833 return;
803 }
804 834
805 /* Find the first not transferred descriptor */ 835 /* Find the first not transferred descriptor */
806 list_for_each_entry(desc, &sh_chan->ld_queue, node) 836 list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -813,14 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
813 dmae_start(sh_chan); 843 dmae_start(sh_chan);
814 break; 844 break;
815 } 845 }
816
817 spin_unlock_irq(&sh_chan->desc_lock);
818} 846}
819 847
820static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 848static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
821{ 849{
822 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 850 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
823 sh_chan_xfer_ld_queue(sh_chan); 851
852 spin_lock_irq(&sh_chan->desc_lock);
853 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
854 sh_chan_xfer_ld_queue(sh_chan);
855 else
856 sh_chan->pm_state = DMAE_PM_PENDING;
857 spin_unlock_irq(&sh_chan->desc_lock);
824} 858}
825 859
826static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 860static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -913,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
913 947
914 list_splice_init(&sh_chan->ld_queue, &dl); 948 list_splice_init(&sh_chan->ld_queue, &dl);
915 949
950 if (!list_empty(&dl)) {
951 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
952 pm_runtime_put(sh_chan->dev);
953 }
954 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
955
916 spin_unlock(&sh_chan->desc_lock); 956 spin_unlock(&sh_chan->desc_lock);
917 957
918 /* Complete all */ 958 /* Complete all */
@@ -966,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
966 break; 1006 break;
967 } 1007 }
968 } 1008 }
969 spin_unlock_irq(&sh_chan->desc_lock);
970
971 /* Next desc */ 1009 /* Next desc */
972 sh_chan_xfer_ld_queue(sh_chan); 1010 sh_chan_xfer_ld_queue(sh_chan);
1011 spin_unlock_irq(&sh_chan->desc_lock);
1012
973 sh_dmae_chan_ld_cleanup(sh_chan, false); 1013 sh_dmae_chan_ld_cleanup(sh_chan, false);
974} 1014}
975 1015
@@ -1037,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1037 return -ENOMEM; 1077 return -ENOMEM;
1038 } 1078 }
1039 1079
1040 /* copy struct dma_device */ 1080 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1081
1082 /* reference struct dma_device */
1041 new_sh_chan->common.device = &shdev->common; 1083 new_sh_chan->common.device = &shdev->common;
1042 1084
1043 new_sh_chan->dev = shdev->common.dev; 1085 new_sh_chan->dev = shdev->common.dev;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index dc56576f9fdb..2b55a276dc5b 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -23,6 +23,12 @@
23 23
24struct device; 24struct device;
25 25
26enum dmae_pm_state {
27 DMAE_PM_ESTABLISHED,
28 DMAE_PM_BUSY,
29 DMAE_PM_PENDING,
30};
31
26struct sh_dmae_chan { 32struct sh_dmae_chan {
27 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 33 dma_cookie_t completed_cookie; /* The maximum cookie completed */
28 spinlock_t desc_lock; /* Descriptor operation lock */ 34 spinlock_t desc_lock; /* Descriptor operation lock */
@@ -38,6 +44,7 @@ struct sh_dmae_chan {
38 u32 __iomem *base; 44 u32 __iomem *base;
39 char dev_id[16]; /* unique name per DMAC of channel */ 45 char dev_id[16]; /* unique name per DMAC of channel */
40 int pm_error; 46 int pm_error;
47 enum dmae_pm_state pm_state;
41}; 48};
42 49
43struct sh_dmae_device { 50struct sh_dmae_device {