aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/shdma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-17 21:40:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-17 21:40:24 -0500
commit57f2685c16fa8e0cb86e4bc7c8ac33bfed943819 (patch)
tree96a42fe632687c8486c250c4805bf1d4c9c34d19 /drivers/dma/shdma.c
parent488a9d018256dc9f29e041c0360445b6d25eea9a (diff)
parente08b881a69d638175bfa99b5af4d72b731633ea7 (diff)
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits) ARM: mach-shmobile: specify CHCLR registers on SH7372 dma: shdma: fix runtime PM: clear channel buffers on reset dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit dmaengine/ste_dma40: clear LNK on channel startup dmaengine: intel_mid_dma: remove legacy pm interface ASoC: mxs: correct 'direction' of device_prep_dma_cyclic dmaengine: intel_mid_dma: error path fix dmaengine: intel_mid_dma: locking and freeing fixes mtd: gpmi-nand: move to dma_transfer_direction mtd: fix compile error for gpmi-nand mmc: mxs-mmc: fix the dma_transfer_direction migration dmaengine: add DMA_TRANS_NONE to dma_transfer_direction dma: mxs-dma: Don't use CLKGATE bits in CTRL0 to disable DMA channels dma: mxs-dma: make mxs_dma_prep_slave_sg() multi user safe dma: mxs-dma: Always leave mxs_dma_init() with the clock disabled. dma: mxs-dma: fix a typo in comment DMA: PL330: Remove pm_runtime_xxx calls from pl330 probe/remove video i.MX IPU: Fix display connections i.MX IPU DMA: Fix wrong burstsize settings dmaengine/ste_dma40: allow fixed physical channel ... Fix up conflicts in drivers/dma/{Kconfig,mxs-dma.c,pl330.c} The conflicts looked pretty trivial, but I'll ask people to verify them.
Diffstat (limited to 'drivers/dma/shdma.c')
-rw-r--r--drivers/dma/shdma.c72
1 files changed, 43 insertions, 29 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 81809c2b46ab..54043cd831c8 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,7 +23,6 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
29#include <linux/sh_dma.h> 28#include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
57static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 56static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
58 57
59static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 58static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
59static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
60
61static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
62{
63 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
64
65 __raw_writel(data, shdev->chan_reg +
66 shdev->pdata->channel[sh_dc->id].chclr_offset);
67}
60 68
61static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 69static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{ 70{
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
129 137
130 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 138 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
131 139
140 if (shdev->pdata->chclr_present) {
141 int i;
142 for (i = 0; i < shdev->pdata->channel_num; i++) {
143 struct sh_dmae_chan *sh_chan = shdev->chan[i];
144 if (sh_chan)
145 chclr_write(sh_chan, 0);
146 }
147 }
148
132 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 149 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
133 150
134 dmaor = dmaor_read(shdev); 151 dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
139 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 156 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
140 return -EIO; 157 return -EIO;
141 } 158 }
159 if (shdev->pdata->dmaor_init & ~dmaor)
160 dev_warn(shdev->common.dev,
161 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
162 dmaor, shdev->pdata->dmaor_init);
142 return 0; 163 return 0;
143} 164}
144 165
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
259 return 0; 280 return 0;
260} 281}
261 282
262static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
263
264static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 283static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
265{ 284{
266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 285 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
340 sh_chan_xfer_ld_queue(sh_chan); 359 sh_chan_xfer_ld_queue(sh_chan);
341 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 360 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
342 } 361 }
362 } else {
363 sh_chan->pm_state = DMAE_PM_PENDING;
343 } 364 }
344 365
345 spin_unlock_irq(&sh_chan->desc_lock); 366 spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
479 * @sh_chan: DMA channel 500 * @sh_chan: DMA channel
480 * @flags: DMA transfer flags 501 * @flags: DMA transfer flags
481 * @dest: destination DMA address, incremented when direction equals 502 * @dest: destination DMA address, incremented when direction equals
482 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL 503 * DMA_DEV_TO_MEM
483 * @src: source DMA address, incremented when direction equals 504 * @src: source DMA address, incremented when direction equals
484 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL 505 * DMA_MEM_TO_DEV
485 * @len: DMA transfer length 506 * @len: DMA transfer length
486 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 507 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
487 * @direction: needed for slave DMA to decide which address to keep constant, 508 * @direction: needed for slave DMA to decide which address to keep constant,
488 * equals DMA_BIDIRECTIONAL for MEMCPY 509 * equals DMA_MEM_TO_MEM for MEMCPY
489 * Returns 0 or an error 510 * Returns 0 or an error
490 * Locks: called with desc_lock held 511 * Locks: called with desc_lock held
491 */ 512 */
492static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 513static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
493 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 514 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
494 struct sh_desc **first, enum dma_data_direction direction) 515 struct sh_desc **first, enum dma_transfer_direction direction)
495{ 516{
496 struct sh_desc *new; 517 struct sh_desc *new;
497 size_t copy_size; 518 size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
531 new->direction = direction; 552 new->direction = direction;
532 553
533 *len -= copy_size; 554 *len -= copy_size;
534 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) 555 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
535 *src += copy_size; 556 *src += copy_size;
536 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) 557 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
537 *dest += copy_size; 558 *dest += copy_size;
538 559
539 return new; 560 return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
546 * converted to scatter-gather to guarantee consistent locking and a correct 567 * converted to scatter-gather to guarantee consistent locking and a correct
547 * list manipulation. For slave DMA direction carries the usual meaning, and, 568 * list manipulation. For slave DMA direction carries the usual meaning, and,
548 * logically, the SG list is RAM and the addr variable contains slave address, 569 * logically, the SG list is RAM and the addr variable contains slave address,
549 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL 570 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
550 * and the SG list contains only one element and points at the source buffer. 571 * and the SG list contains only one element and points at the source buffer.
551 */ 572 */
552static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 573static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
553 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 574 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
554 enum dma_data_direction direction, unsigned long flags) 575 enum dma_transfer_direction direction, unsigned long flags)
555{ 576{
556 struct scatterlist *sg; 577 struct scatterlist *sg;
557 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 578 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
592 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 613 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
593 i, sg, len, (unsigned long long)sg_addr); 614 i, sg, len, (unsigned long long)sg_addr);
594 615
595 if (direction == DMA_FROM_DEVICE) 616 if (direction == DMA_DEV_TO_MEM)
596 new = sh_dmae_add_desc(sh_chan, flags, 617 new = sh_dmae_add_desc(sh_chan, flags,
597 &sg_addr, addr, &len, &first, 618 &sg_addr, addr, &len, &first,
598 direction); 619 direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
646 sg_dma_address(&sg) = dma_src; 667 sg_dma_address(&sg) = dma_src;
647 sg_dma_len(&sg) = len; 668 sg_dma_len(&sg) = len;
648 669
649 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, 670 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
650 flags); 671 flags);
651} 672}
652 673
653static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 674static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
654 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 675 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
655 enum dma_data_direction direction, unsigned long flags) 676 enum dma_transfer_direction direction, unsigned long flags)
656{ 677{
657 struct sh_dmae_slave *param; 678 struct sh_dmae_slave *param;
658 struct sh_dmae_chan *sh_chan; 679 struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
996 spin_lock_irq(&sh_chan->desc_lock); 1017 spin_lock_irq(&sh_chan->desc_lock);
997 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 1018 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
998 if (desc->mark == DESC_SUBMITTED && 1019 if (desc->mark == DESC_SUBMITTED &&
999 ((desc->direction == DMA_FROM_DEVICE && 1020 ((desc->direction == DMA_DEV_TO_MEM &&
1000 (desc->hw.dar + desc->hw.tcr) == dar_buf) || 1021 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1001 (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 1022 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
1002 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 1023 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1225 1246
1226 platform_set_drvdata(pdev, shdev); 1247 platform_set_drvdata(pdev, shdev);
1227 1248
1249 shdev->common.dev = &pdev->dev;
1250
1228 pm_runtime_enable(&pdev->dev); 1251 pm_runtime_enable(&pdev->dev);
1229 pm_runtime_get_sync(&pdev->dev); 1252 pm_runtime_get_sync(&pdev->dev);
1230 1253
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1254 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1277 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1255 shdev->common.device_control = sh_dmae_control; 1278 shdev->common.device_control = sh_dmae_control;
1256 1279
1257 shdev->common.dev = &pdev->dev;
1258 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1280 /* Default transfer size of 32 bytes requires 32-byte alignment */
1259 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1281 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1260 1282
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
1435#ifdef CONFIG_PM 1457#ifdef CONFIG_PM
1436static int sh_dmae_suspend(struct device *dev) 1458static int sh_dmae_suspend(struct device *dev)
1437{ 1459{
1438 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1439 int i;
1440
1441 for (i = 0; i < shdev->pdata->channel_num; i++) {
1442 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1443 if (sh_chan->descs_allocated)
1444 sh_chan->pm_error = pm_runtime_put_sync(dev);
1445 }
1446
1447 return 0; 1460 return 0;
1448} 1461}
1449 1462
1450static int sh_dmae_resume(struct device *dev) 1463static int sh_dmae_resume(struct device *dev)
1451{ 1464{
1452 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1465 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1453 int i; 1466 int i, ret;
1467
1468 ret = sh_dmae_rst(shdev);
1469 if (ret < 0)
1470 dev_err(dev, "Failed to reset!\n");
1454 1471
1455 for (i = 0; i < shdev->pdata->channel_num; i++) { 1472 for (i = 0; i < shdev->pdata->channel_num; i++) {
1456 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1473 struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
1459 if (!sh_chan->descs_allocated) 1476 if (!sh_chan->descs_allocated)
1460 continue; 1477 continue;
1461 1478
1462 if (!sh_chan->pm_error)
1463 pm_runtime_get_sync(dev);
1464
1465 if (param) { 1479 if (param) {
1466 const struct sh_dmae_slave_config *cfg = param->config; 1480 const struct sh_dmae_slave_config *cfg = param->config;
1467 dmae_set_dmars(sh_chan, cfg->mid_rid); 1481 dmae_set_dmars(sh_chan, cfg->mid_rid);