diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 20:12:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 20:12:54 -0400 |
commit | c511dc1fb6bee58363eb203d53393784f2589d02 (patch) | |
tree | 20d91e01aec30c462965fba27cfea0c80744d7ed /drivers/dma | |
parent | 9161c3b796a2841a9a7be3d9c9dd121269ce90e8 (diff) | |
parent | 634332502366554849fe37e88d05ec0a13e550c8 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine update from Vinod Koul:
"This time we have a new dmaengine driver from the tegra folks. Also
we have Guennadi's cleanup of sh drivers which incudes a library for
sh drivers. And the usual odd fixes in bunch of drivers and some nice
cleanup of dw_dmac from Andy."
Fix up conflicts in drivers/mmc/host/sh_mmcif.c
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits)
dmaengine: Cleanup logging messages
mmc: sh_mmcif: switch to the new DMA channel allocation and configuration
dma: sh: provide a migration path for slave drivers to stop using .private
dma: sh: use an integer slave ID to improve API compatibility
dmaengine: shdma: prepare to stop using struct dma_chan::private
sh: remove unused DMA device pointer from SIU platform data
ASoC: siu: don't use DMA device for channel filtering
dmaengine: shdma: (cosmetic) simplify a static function
dmaengine: at_hdmac: add a few const qualifiers
dw_dmac: use 'u32' for LLI structure members, not dma_addr_t
dw_dmac: mark dwc_dump_lli inline
dma: mxs-dma: Export missing symbols from mxs-dma.c
dma: shdma: convert to the shdma base library
ASoC: fsi: prepare for conversion to the shdma base library
usb: renesas_usbhs: prepare for conversion to the shdma base library
ASoC: siu: prepare for conversion to the shdma base library
serial: sh-sci: prepare for conversion to the shdma base library
mmc: sh_mobile_sdhi: prepare for conversion to the shdma base library
mmc: sh_mmcif: remove unneeded struct sh_mmcif_dma, prepare to shdma conversion
dma: shdma: prepare for conversion to the shdma base library
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 26 | ||||
-rw-r--r-- | drivers/dma/Makefile | 4 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 11 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 72 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 20 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 182 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 8 | ||||
-rw-r--r-- | drivers/dma/mmp_tdma.c | 610 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/sh/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 934 | ||||
-rw-r--r-- | drivers/dma/sh/shdma.c | 943 | ||||
-rw-r--r-- | drivers/dma/sh/shdma.h (renamed from drivers/dma/shdma.h) | 46 | ||||
-rw-r--r-- | drivers/dma/shdma.c | 1524 | ||||
-rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 1415 |
15 files changed, 4083 insertions, 1717 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index aadeb5be9db..d45cf1bcbde 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -148,6 +148,20 @@ config TXX9_DMAC | |||
148 | Support the TXx9 SoC internal DMA controller. This can be | 148 | Support the TXx9 SoC internal DMA controller. This can be |
149 | integrated in chips such as the Toshiba TX4927/38/39. | 149 | integrated in chips such as the Toshiba TX4927/38/39. |
150 | 150 | ||
151 | config TEGRA20_APB_DMA | ||
152 | bool "NVIDIA Tegra20 APB DMA support" | ||
153 | depends on ARCH_TEGRA | ||
154 | select DMA_ENGINE | ||
155 | help | ||
156 | Support for the NVIDIA Tegra20 APB DMA controller driver. The | ||
157 | DMA controller is having multiple DMA channel which can be | ||
158 | configured for different peripherals like audio, UART, SPI, | ||
159 | I2C etc which is in APB bus. | ||
160 | This DMA controller transfers data from memory to peripheral fifo | ||
161 | or vice versa. It does not support memory to memory data transfer. | ||
162 | |||
163 | |||
164 | |||
151 | config SH_DMAE | 165 | config SH_DMAE |
152 | tristate "Renesas SuperH DMAC support" | 166 | tristate "Renesas SuperH DMAC support" |
153 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) | 167 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) |
@@ -237,7 +251,7 @@ config IMX_DMA | |||
237 | 251 | ||
238 | config MXS_DMA | 252 | config MXS_DMA |
239 | bool "MXS DMA support" | 253 | bool "MXS DMA support" |
240 | depends on SOC_IMX23 || SOC_IMX28 | 254 | depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q |
241 | select STMP_DEVICE | 255 | select STMP_DEVICE |
242 | select DMA_ENGINE | 256 | select DMA_ENGINE |
243 | help | 257 | help |
@@ -260,6 +274,16 @@ config DMA_SA11X0 | |||
260 | SA-1110 SoCs. This DMA engine can only be used with on-chip | 274 | SA-1110 SoCs. This DMA engine can only be used with on-chip |
261 | devices. | 275 | devices. |
262 | 276 | ||
277 | config MMP_TDMA | ||
278 | bool "MMP Two-Channel DMA support" | ||
279 | depends on ARCH_MMP | ||
280 | select DMA_ENGINE | ||
281 | help | ||
282 | Support the MMP Two-Channel DMA engine. | ||
283 | This engine used for MMP Audio DMA and pxa910 SQU. | ||
284 | |||
285 | Say Y here if you enabled MMP ADMA, otherwise say N. | ||
286 | |||
263 | config DMA_ENGINE | 287 | config DMA_ENGINE |
264 | bool | 288 | bool |
265 | 289 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 86b795baba9..640356add0a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -14,7 +14,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o | |||
14 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 14 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
15 | obj-$(CONFIG_MX3_IPU) += ipu/ | 15 | obj-$(CONFIG_MX3_IPU) += ipu/ |
16 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 16 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
17 | obj-$(CONFIG_SH_DMAE) += shdma.o | 17 | obj-$(CONFIG_SH_DMAE) += sh/ |
18 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 18 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
19 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 19 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
20 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 20 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
@@ -23,8 +23,10 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o | |||
23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
24 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 24 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
25 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 25 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
26 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | ||
26 | obj-$(CONFIG_PL330_DMA) += pl330.o | 27 | obj-$(CONFIG_PL330_DMA) += pl330.o |
27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 28 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
28 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 29 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
29 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 30 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
30 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | 31 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o |
32 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | ||
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7292aa87b2d..3934fcc4e00 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -9,10 +9,9 @@ | |||
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * | 11 | * |
12 | * This supports the Atmel AHB DMA Controller, | 12 | * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. |
13 | * | 13 | * The only Atmel DMA Controller that is not covered by this driver is the one |
14 | * The driver has currently been tested with the Atmel AT91SAM9RL | 14 | * found on AT91SAM9263. |
15 | * and AT91SAM9G45 series. | ||
16 | */ | 15 | */ |
17 | 16 | ||
18 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
@@ -1217,7 +1216,7 @@ static const struct platform_device_id atdma_devtypes[] = { | |||
1217 | } | 1216 | } |
1218 | }; | 1217 | }; |
1219 | 1218 | ||
1220 | static inline struct at_dma_platform_data * __init at_dma_get_driver_data( | 1219 | static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( |
1221 | struct platform_device *pdev) | 1220 | struct platform_device *pdev) |
1222 | { | 1221 | { |
1223 | if (pdev->dev.of_node) { | 1222 | if (pdev->dev.of_node) { |
@@ -1255,7 +1254,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1255 | int irq; | 1254 | int irq; |
1256 | int err; | 1255 | int err; |
1257 | int i; | 1256 | int i; |
1258 | struct at_dma_platform_data *plat_dat; | 1257 | const struct at_dma_platform_data *plat_dat; |
1259 | 1258 | ||
1260 | /* setup platform data for each SoC */ | 1259 | /* setup platform data for each SoC */ |
1261 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | 1260 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e67b4e06a91..aa384e53b7a 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -1438,34 +1438,32 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1438 | 1438 | ||
1439 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1439 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1440 | if (!io) | 1440 | if (!io) |
1441 | goto err_get_resource; | 1441 | return -ENODEV; |
1442 | 1442 | ||
1443 | /* Map DMA controller registers to virtual memory */ | 1443 | /* Map DMA controller registers to virtual memory */ |
1444 | if (request_mem_region(io->start, | 1444 | if (devm_request_mem_region(&pdev->dev, |
1445 | resource_size(io), | 1445 | io->start, |
1446 | pdev->dev.driver->name) == NULL) { | 1446 | resource_size(io), |
1447 | err = -EBUSY; | 1447 | pdev->dev.driver->name) == NULL) |
1448 | goto err_request_mem; | 1448 | return -ENOMEM; |
1449 | } | ||
1450 | 1449 | ||
1451 | pdata = pdev->dev.platform_data; | 1450 | pdata = pdev->dev.platform_data; |
1452 | if (!pdata) | 1451 | if (!pdata) |
1453 | goto err_no_platformdata; | 1452 | return -ENODEV; |
1454 | 1453 | ||
1455 | base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + | 1454 | base = devm_kzalloc(&pdev->dev, |
1456 | pdata->max_channels * | 1455 | ALIGN(sizeof(struct coh901318_base), 4) + |
1457 | sizeof(struct coh901318_chan), | 1456 | pdata->max_channels * |
1458 | GFP_KERNEL); | 1457 | sizeof(struct coh901318_chan), |
1458 | GFP_KERNEL); | ||
1459 | if (!base) | 1459 | if (!base) |
1460 | goto err_alloc_coh_dma_channels; | 1460 | return -ENOMEM; |
1461 | 1461 | ||
1462 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); | 1462 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); |
1463 | 1463 | ||
1464 | base->virtbase = ioremap(io->start, resource_size(io)); | 1464 | base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io)); |
1465 | if (!base->virtbase) { | 1465 | if (!base->virtbase) |
1466 | err = -ENOMEM; | 1466 | return -ENOMEM; |
1467 | goto err_no_ioremap; | ||
1468 | } | ||
1469 | 1467 | ||
1470 | base->dev = &pdev->dev; | 1468 | base->dev = &pdev->dev; |
1471 | base->platform = pdata; | 1469 | base->platform = pdata; |
@@ -1474,25 +1472,20 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1474 | 1472 | ||
1475 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); | 1473 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); |
1476 | 1474 | ||
1477 | platform_set_drvdata(pdev, base); | ||
1478 | |||
1479 | irq = platform_get_irq(pdev, 0); | 1475 | irq = platform_get_irq(pdev, 0); |
1480 | if (irq < 0) | 1476 | if (irq < 0) |
1481 | goto err_no_irq; | 1477 | return irq; |
1482 | 1478 | ||
1483 | err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, | 1479 | err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, |
1484 | "coh901318", base); | 1480 | "coh901318", base); |
1485 | if (err) { | 1481 | if (err) |
1486 | dev_crit(&pdev->dev, | 1482 | return err; |
1487 | "Cannot allocate IRQ for DMA controller!\n"); | ||
1488 | goto err_request_irq; | ||
1489 | } | ||
1490 | 1483 | ||
1491 | err = coh901318_pool_create(&base->pool, &pdev->dev, | 1484 | err = coh901318_pool_create(&base->pool, &pdev->dev, |
1492 | sizeof(struct coh901318_lli), | 1485 | sizeof(struct coh901318_lli), |
1493 | 32); | 1486 | 32); |
1494 | if (err) | 1487 | if (err) |
1495 | goto err_pool_create; | 1488 | return err; |
1496 | 1489 | ||
1497 | /* init channels for device transfers */ | 1490 | /* init channels for device transfers */ |
1498 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, | 1491 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, |
@@ -1538,6 +1531,7 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1538 | if (err) | 1531 | if (err) |
1539 | goto err_register_memcpy; | 1532 | goto err_register_memcpy; |
1540 | 1533 | ||
1534 | platform_set_drvdata(pdev, base); | ||
1541 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", | 1535 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", |
1542 | (u32) base->virtbase); | 1536 | (u32) base->virtbase); |
1543 | 1537 | ||
@@ -1547,19 +1541,6 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1547 | dma_async_device_unregister(&base->dma_slave); | 1541 | dma_async_device_unregister(&base->dma_slave); |
1548 | err_register_slave: | 1542 | err_register_slave: |
1549 | coh901318_pool_destroy(&base->pool); | 1543 | coh901318_pool_destroy(&base->pool); |
1550 | err_pool_create: | ||
1551 | free_irq(platform_get_irq(pdev, 0), base); | ||
1552 | err_request_irq: | ||
1553 | err_no_irq: | ||
1554 | iounmap(base->virtbase); | ||
1555 | err_no_ioremap: | ||
1556 | kfree(base); | ||
1557 | err_alloc_coh_dma_channels: | ||
1558 | err_no_platformdata: | ||
1559 | release_mem_region(pdev->resource->start, | ||
1560 | resource_size(pdev->resource)); | ||
1561 | err_request_mem: | ||
1562 | err_get_resource: | ||
1563 | return err; | 1544 | return err; |
1564 | } | 1545 | } |
1565 | 1546 | ||
@@ -1570,11 +1551,6 @@ static int __exit coh901318_remove(struct platform_device *pdev) | |||
1570 | dma_async_device_unregister(&base->dma_memcpy); | 1551 | dma_async_device_unregister(&base->dma_memcpy); |
1571 | dma_async_device_unregister(&base->dma_slave); | 1552 | dma_async_device_unregister(&base->dma_slave); |
1572 | coh901318_pool_destroy(&base->pool); | 1553 | coh901318_pool_destroy(&base->pool); |
1573 | free_irq(platform_get_irq(pdev, 0), base); | ||
1574 | iounmap(base->virtbase); | ||
1575 | kfree(base); | ||
1576 | release_mem_region(pdev->resource->start, | ||
1577 | resource_size(pdev->resource)); | ||
1578 | return 0; | 1554 | return 0; |
1579 | } | 1555 | } |
1580 | 1556 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2397f6f451b..3491654cdf7 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -45,6 +45,8 @@ | |||
45 | * See Documentation/dmaengine.txt for more details | 45 | * See Documentation/dmaengine.txt for more details |
46 | */ | 46 | */ |
47 | 47 | ||
48 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
49 | |||
48 | #include <linux/dma-mapping.h> | 50 | #include <linux/dma-mapping.h> |
49 | #include <linux/init.h> | 51 | #include <linux/init.h> |
50 | #include <linux/module.h> | 52 | #include <linux/module.h> |
@@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
261 | do { | 263 | do { |
262 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 264 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
263 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 265 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
264 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); | 266 | pr_err("%s: timeout!\n", __func__); |
265 | return DMA_ERROR; | 267 | return DMA_ERROR; |
266 | } | 268 | } |
267 | } while (status == DMA_IN_PROGRESS); | 269 | } while (status == DMA_IN_PROGRESS); |
@@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void) | |||
312 | } | 314 | } |
313 | 315 | ||
314 | if (err) { | 316 | if (err) { |
315 | pr_err("dmaengine: initialization failure\n"); | 317 | pr_err("initialization failure\n"); |
316 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | 318 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
317 | if (channel_table[cap]) | 319 | if (channel_table[cap]) |
318 | free_percpu(channel_table[cap]); | 320 | free_percpu(channel_table[cap]); |
@@ -520,12 +522,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
520 | err = dma_chan_get(chan); | 522 | err = dma_chan_get(chan); |
521 | 523 | ||
522 | if (err == -ENODEV) { | 524 | if (err == -ENODEV) { |
523 | pr_debug("%s: %s module removed\n", __func__, | 525 | pr_debug("%s: %s module removed\n", |
524 | dma_chan_name(chan)); | 526 | __func__, dma_chan_name(chan)); |
525 | list_del_rcu(&device->global_node); | 527 | list_del_rcu(&device->global_node); |
526 | } else if (err) | 528 | } else if (err) |
527 | pr_debug("%s: failed to get %s: (%d)\n", | 529 | pr_debug("%s: failed to get %s: (%d)\n", |
528 | __func__, dma_chan_name(chan), err); | 530 | __func__, dma_chan_name(chan), err); |
529 | else | 531 | else |
530 | break; | 532 | break; |
531 | if (--device->privatecnt == 0) | 533 | if (--device->privatecnt == 0) |
@@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
535 | } | 537 | } |
536 | mutex_unlock(&dma_list_mutex); | 538 | mutex_unlock(&dma_list_mutex); |
537 | 539 | ||
538 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | 540 | pr_debug("%s: %s (%s)\n", |
541 | __func__, | ||
542 | chan ? "success" : "fail", | ||
539 | chan ? dma_chan_name(chan) : NULL); | 543 | chan ? dma_chan_name(chan) : NULL); |
540 | 544 | ||
541 | return chan; | 545 | return chan; |
@@ -579,7 +583,7 @@ void dmaengine_get(void) | |||
579 | break; | 583 | break; |
580 | } else if (err) | 584 | } else if (err) |
581 | pr_err("%s: failed to get %s: (%d)\n", | 585 | pr_err("%s: failed to get %s: (%d)\n", |
582 | __func__, dma_chan_name(chan), err); | 586 | __func__, dma_chan_name(chan), err); |
583 | } | 587 | } |
584 | } | 588 | } |
585 | 589 | ||
@@ -1015,7 +1019,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
1015 | while (tx->cookie == -EBUSY) { | 1019 | while (tx->cookie == -EBUSY) { |
1016 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 1020 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
1017 | pr_err("%s timeout waiting for descriptor submission\n", | 1021 | pr_err("%s timeout waiting for descriptor submission\n", |
1018 | __func__); | 1022 | __func__); |
1019 | return DMA_ERROR; | 1023 | return DMA_ERROR; |
1020 | } | 1024 | } |
1021 | cpu_relax(); | 1025 | cpu_relax(); |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 72129615757..d3c5a5a88f1 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -105,13 +105,13 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
105 | 105 | ||
106 | spin_lock_irqsave(&dwc->lock, flags); | 106 | spin_lock_irqsave(&dwc->lock, flags); |
107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
108 | i++; | ||
108 | if (async_tx_test_ack(&desc->txd)) { | 109 | if (async_tx_test_ack(&desc->txd)) { |
109 | list_del(&desc->desc_node); | 110 | list_del(&desc->desc_node); |
110 | ret = desc; | 111 | ret = desc; |
111 | break; | 112 | break; |
112 | } | 113 | } |
113 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 114 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
114 | i++; | ||
115 | } | 115 | } |
116 | spin_unlock_irqrestore(&dwc->lock, flags); | 116 | spin_unlock_irqrestore(&dwc->lock, flags); |
117 | 117 | ||
@@ -191,6 +191,42 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
191 | 191 | ||
192 | /*----------------------------------------------------------------------*/ | 192 | /*----------------------------------------------------------------------*/ |
193 | 193 | ||
194 | static inline unsigned int dwc_fast_fls(unsigned long long v) | ||
195 | { | ||
196 | /* | ||
197 | * We can be a lot more clever here, but this should take care | ||
198 | * of the most common optimization. | ||
199 | */ | ||
200 | if (!(v & 7)) | ||
201 | return 3; | ||
202 | else if (!(v & 3)) | ||
203 | return 2; | ||
204 | else if (!(v & 1)) | ||
205 | return 1; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | ||
210 | { | ||
211 | dev_err(chan2dev(&dwc->chan), | ||
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
213 | channel_readl(dwc, SAR), | ||
214 | channel_readl(dwc, DAR), | ||
215 | channel_readl(dwc, LLP), | ||
216 | channel_readl(dwc, CTL_HI), | ||
217 | channel_readl(dwc, CTL_LO)); | ||
218 | } | ||
219 | |||
220 | |||
221 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
222 | { | ||
223 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
224 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
225 | cpu_relax(); | ||
226 | } | ||
227 | |||
228 | /*----------------------------------------------------------------------*/ | ||
229 | |||
194 | /* Called with dwc->lock held and bh disabled */ | 230 | /* Called with dwc->lock held and bh disabled */ |
195 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 231 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
196 | { | 232 | { |
@@ -200,13 +236,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
200 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 236 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
201 | dev_err(chan2dev(&dwc->chan), | 237 | dev_err(chan2dev(&dwc->chan), |
202 | "BUG: Attempted to start non-idle channel\n"); | 238 | "BUG: Attempted to start non-idle channel\n"); |
203 | dev_err(chan2dev(&dwc->chan), | 239 | dwc_dump_chan_regs(dwc); |
204 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
205 | channel_readl(dwc, SAR), | ||
206 | channel_readl(dwc, DAR), | ||
207 | channel_readl(dwc, LLP), | ||
208 | channel_readl(dwc, CTL_HI), | ||
209 | channel_readl(dwc, CTL_LO)); | ||
210 | 240 | ||
211 | /* The tasklet will hopefully advance the queue... */ | 241 | /* The tasklet will hopefully advance the queue... */ |
212 | return; | 242 | return; |
@@ -290,9 +320,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
290 | "BUG: XFER bit set, but channel not idle!\n"); | 320 | "BUG: XFER bit set, but channel not idle!\n"); |
291 | 321 | ||
292 | /* Try to continue after resetting the channel... */ | 322 | /* Try to continue after resetting the channel... */ |
293 | channel_clear_bit(dw, CH_EN, dwc->mask); | 323 | dwc_chan_disable(dw, dwc); |
294 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
295 | cpu_relax(); | ||
296 | } | 324 | } |
297 | 325 | ||
298 | /* | 326 | /* |
@@ -337,7 +365,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
337 | return; | 365 | return; |
338 | } | 366 | } |
339 | 367 | ||
340 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 368 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, |
369 | (unsigned long long)llp); | ||
341 | 370 | ||
342 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 371 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
343 | /* check first descriptors addr */ | 372 | /* check first descriptors addr */ |
@@ -373,9 +402,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
373 | "BUG: All descriptors done, but channel not idle!\n"); | 402 | "BUG: All descriptors done, but channel not idle!\n"); |
374 | 403 | ||
375 | /* Try to continue after resetting the channel... */ | 404 | /* Try to continue after resetting the channel... */ |
376 | channel_clear_bit(dw, CH_EN, dwc->mask); | 405 | dwc_chan_disable(dw, dwc); |
377 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
378 | cpu_relax(); | ||
379 | 406 | ||
380 | if (!list_empty(&dwc->queue)) { | 407 | if (!list_empty(&dwc->queue)) { |
381 | list_move(dwc->queue.next, &dwc->active_list); | 408 | list_move(dwc->queue.next, &dwc->active_list); |
@@ -384,12 +411,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
384 | spin_unlock_irqrestore(&dwc->lock, flags); | 411 | spin_unlock_irqrestore(&dwc->lock, flags); |
385 | } | 412 | } |
386 | 413 | ||
387 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 414 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
388 | { | 415 | { |
389 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 416 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
390 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 417 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
391 | lli->sar, lli->dar, lli->llp, | 418 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
392 | lli->ctlhi, lli->ctllo); | ||
393 | } | 419 | } |
394 | 420 | ||
395 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 421 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -487,17 +513,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
487 | 513 | ||
488 | spin_lock_irqsave(&dwc->lock, flags); | 514 | spin_lock_irqsave(&dwc->lock, flags); |
489 | 515 | ||
490 | dev_err(chan2dev(&dwc->chan), | 516 | dwc_dump_chan_regs(dwc); |
491 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
492 | channel_readl(dwc, SAR), | ||
493 | channel_readl(dwc, DAR), | ||
494 | channel_readl(dwc, LLP), | ||
495 | channel_readl(dwc, CTL_HI), | ||
496 | channel_readl(dwc, CTL_LO)); | ||
497 | 517 | ||
498 | channel_clear_bit(dw, CH_EN, dwc->mask); | 518 | dwc_chan_disable(dw, dwc); |
499 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
500 | cpu_relax(); | ||
501 | 519 | ||
502 | /* make sure DMA does not restart by loading a new list */ | 520 | /* make sure DMA does not restart by loading a new list */ |
503 | channel_writel(dwc, LLP, 0); | 521 | channel_writel(dwc, LLP, 0); |
@@ -527,7 +545,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
527 | status_xfer = dma_readl(dw, RAW.XFER); | 545 | status_xfer = dma_readl(dw, RAW.XFER); |
528 | status_err = dma_readl(dw, RAW.ERROR); | 546 | status_err = dma_readl(dw, RAW.ERROR); |
529 | 547 | ||
530 | dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); | 548 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
531 | 549 | ||
532 | for (i = 0; i < dw->dma.chancnt; i++) { | 550 | for (i = 0; i < dw->dma.chancnt; i++) { |
533 | dwc = &dw->chan[i]; | 551 | dwc = &dw->chan[i]; |
@@ -551,7 +569,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
551 | struct dw_dma *dw = dev_id; | 569 | struct dw_dma *dw = dev_id; |
552 | u32 status; | 570 | u32 status; |
553 | 571 | ||
554 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | 572 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, |
555 | dma_readl(dw, STATUS_INT)); | 573 | dma_readl(dw, STATUS_INT)); |
556 | 574 | ||
557 | /* | 575 | /* |
@@ -597,12 +615,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
597 | * for DMA. But this is hard to do in a race-free manner. | 615 | * for DMA. But this is hard to do in a race-free manner. |
598 | */ | 616 | */ |
599 | if (list_empty(&dwc->active_list)) { | 617 | if (list_empty(&dwc->active_list)) { |
600 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 618 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, |
601 | desc->txd.cookie); | 619 | desc->txd.cookie); |
602 | list_add_tail(&desc->desc_node, &dwc->active_list); | 620 | list_add_tail(&desc->desc_node, &dwc->active_list); |
603 | dwc_dostart(dwc, dwc_first_active(dwc)); | 621 | dwc_dostart(dwc, dwc_first_active(dwc)); |
604 | } else { | 622 | } else { |
605 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 623 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, |
606 | desc->txd.cookie); | 624 | desc->txd.cookie); |
607 | 625 | ||
608 | list_add_tail(&desc->desc_node, &dwc->queue); | 626 | list_add_tail(&desc->desc_node, &dwc->queue); |
@@ -627,26 +645,17 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
627 | unsigned int dst_width; | 645 | unsigned int dst_width; |
628 | u32 ctllo; | 646 | u32 ctllo; |
629 | 647 | ||
630 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 648 | dev_vdbg(chan2dev(chan), |
631 | dest, src, len, flags); | 649 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, |
650 | (unsigned long long)dest, (unsigned long long)src, | ||
651 | len, flags); | ||
632 | 652 | ||
633 | if (unlikely(!len)) { | 653 | if (unlikely(!len)) { |
634 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 654 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
635 | return NULL; | 655 | return NULL; |
636 | } | 656 | } |
637 | 657 | ||
638 | /* | 658 | src_width = dst_width = dwc_fast_fls(src | dest | len); |
639 | * We can be a lot more clever here, but this should take care | ||
640 | * of the most common optimization. | ||
641 | */ | ||
642 | if (!((src | dest | len) & 7)) | ||
643 | src_width = dst_width = 3; | ||
644 | else if (!((src | dest | len) & 3)) | ||
645 | src_width = dst_width = 2; | ||
646 | else if (!((src | dest | len) & 1)) | ||
647 | src_width = dst_width = 1; | ||
648 | else | ||
649 | src_width = dst_width = 0; | ||
650 | 659 | ||
651 | ctllo = DWC_DEFAULT_CTLLO(chan) | 660 | ctllo = DWC_DEFAULT_CTLLO(chan) |
652 | | DWC_CTLL_DST_WIDTH(dst_width) | 661 | | DWC_CTLL_DST_WIDTH(dst_width) |
@@ -720,7 +729,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
720 | struct scatterlist *sg; | 729 | struct scatterlist *sg; |
721 | size_t total_len = 0; | 730 | size_t total_len = 0; |
722 | 731 | ||
723 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | 732 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
724 | 733 | ||
725 | if (unlikely(!dws || !sg_len)) | 734 | if (unlikely(!dws || !sg_len)) |
726 | return NULL; | 735 | return NULL; |
@@ -746,14 +755,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
746 | mem = sg_dma_address(sg); | 755 | mem = sg_dma_address(sg); |
747 | len = sg_dma_len(sg); | 756 | len = sg_dma_len(sg); |
748 | 757 | ||
749 | if (!((mem | len) & 7)) | 758 | mem_width = dwc_fast_fls(mem | len); |
750 | mem_width = 3; | ||
751 | else if (!((mem | len) & 3)) | ||
752 | mem_width = 2; | ||
753 | else if (!((mem | len) & 1)) | ||
754 | mem_width = 1; | ||
755 | else | ||
756 | mem_width = 0; | ||
757 | 759 | ||
758 | slave_sg_todev_fill_desc: | 760 | slave_sg_todev_fill_desc: |
759 | desc = dwc_desc_get(dwc); | 761 | desc = dwc_desc_get(dwc); |
@@ -813,14 +815,7 @@ slave_sg_todev_fill_desc: | |||
813 | mem = sg_dma_address(sg); | 815 | mem = sg_dma_address(sg); |
814 | len = sg_dma_len(sg); | 816 | len = sg_dma_len(sg); |
815 | 817 | ||
816 | if (!((mem | len) & 7)) | 818 | mem_width = dwc_fast_fls(mem | len); |
817 | mem_width = 3; | ||
818 | else if (!((mem | len) & 3)) | ||
819 | mem_width = 2; | ||
820 | else if (!((mem | len) & 1)) | ||
821 | mem_width = 1; | ||
822 | else | ||
823 | mem_width = 0; | ||
824 | 819 | ||
825 | slave_sg_fromdev_fill_desc: | 820 | slave_sg_fromdev_fill_desc: |
826 | desc = dwc_desc_get(dwc); | 821 | desc = dwc_desc_get(dwc); |
@@ -950,9 +945,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
950 | } else if (cmd == DMA_TERMINATE_ALL) { | 945 | } else if (cmd == DMA_TERMINATE_ALL) { |
951 | spin_lock_irqsave(&dwc->lock, flags); | 946 | spin_lock_irqsave(&dwc->lock, flags); |
952 | 947 | ||
953 | channel_clear_bit(dw, CH_EN, dwc->mask); | 948 | dwc_chan_disable(dw, dwc); |
954 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
955 | cpu_relax(); | ||
956 | 949 | ||
957 | dwc->paused = false; | 950 | dwc->paused = false; |
958 | 951 | ||
@@ -1014,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1014 | int i; | 1007 | int i; |
1015 | unsigned long flags; | 1008 | unsigned long flags; |
1016 | 1009 | ||
1017 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 1010 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1018 | 1011 | ||
1019 | /* ASSERT: channel is idle */ | 1012 | /* ASSERT: channel is idle */ |
1020 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1013 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -1057,8 +1050,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1057 | 1050 | ||
1058 | spin_unlock_irqrestore(&dwc->lock, flags); | 1051 | spin_unlock_irqrestore(&dwc->lock, flags); |
1059 | 1052 | ||
1060 | dev_dbg(chan2dev(chan), | 1053 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
1061 | "alloc_chan_resources allocated %d descriptors\n", i); | ||
1062 | 1054 | ||
1063 | return i; | 1055 | return i; |
1064 | } | 1056 | } |
@@ -1071,7 +1063,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1071 | unsigned long flags; | 1063 | unsigned long flags; |
1072 | LIST_HEAD(list); | 1064 | LIST_HEAD(list); |
1073 | 1065 | ||
1074 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 1066 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
1075 | dwc->descs_allocated); | 1067 | dwc->descs_allocated); |
1076 | 1068 | ||
1077 | /* ASSERT: channel is idle */ | 1069 | /* ASSERT: channel is idle */ |
@@ -1097,7 +1089,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1097 | kfree(desc); | 1089 | kfree(desc); |
1098 | } | 1090 | } |
1099 | 1091 | ||
1100 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | 1092 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1101 | } | 1093 | } |
1102 | 1094 | ||
1103 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1095 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
@@ -1126,13 +1118,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1126 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1118 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
1127 | dev_err(chan2dev(&dwc->chan), | 1119 | dev_err(chan2dev(&dwc->chan), |
1128 | "BUG: Attempted to start non-idle channel\n"); | 1120 | "BUG: Attempted to start non-idle channel\n"); |
1129 | dev_err(chan2dev(&dwc->chan), | 1121 | dwc_dump_chan_regs(dwc); |
1130 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
1131 | channel_readl(dwc, SAR), | ||
1132 | channel_readl(dwc, DAR), | ||
1133 | channel_readl(dwc, LLP), | ||
1134 | channel_readl(dwc, CTL_HI), | ||
1135 | channel_readl(dwc, CTL_LO)); | ||
1136 | spin_unlock_irqrestore(&dwc->lock, flags); | 1122 | spin_unlock_irqrestore(&dwc->lock, flags); |
1137 | return -EBUSY; | 1123 | return -EBUSY; |
1138 | } | 1124 | } |
@@ -1167,9 +1153,7 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) | |||
1167 | 1153 | ||
1168 | spin_lock_irqsave(&dwc->lock, flags); | 1154 | spin_lock_irqsave(&dwc->lock, flags); |
1169 | 1155 | ||
1170 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1156 | dwc_chan_disable(dw, dwc); |
1171 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
1172 | cpu_relax(); | ||
1173 | 1157 | ||
1174 | spin_unlock_irqrestore(&dwc->lock, flags); | 1158 | spin_unlock_irqrestore(&dwc->lock, flags); |
1175 | } | 1159 | } |
@@ -1308,9 +1292,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1308 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | 1292 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, |
1309 | sizeof(last->lli), DMA_TO_DEVICE); | 1293 | sizeof(last->lli), DMA_TO_DEVICE); |
1310 | 1294 | ||
1311 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " | 1295 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1312 | "period %zu periods %d\n", buf_addr, buf_len, | 1296 | "period %zu periods %d\n", (unsigned long long)buf_addr, |
1313 | period_len, periods); | 1297 | buf_len, period_len, periods); |
1314 | 1298 | ||
1315 | cdesc->periods = periods; | 1299 | cdesc->periods = periods; |
1316 | dwc->cdesc = cdesc; | 1300 | dwc->cdesc = cdesc; |
@@ -1340,16 +1324,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1340 | int i; | 1324 | int i; |
1341 | unsigned long flags; | 1325 | unsigned long flags; |
1342 | 1326 | ||
1343 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | 1327 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
1344 | 1328 | ||
1345 | if (!cdesc) | 1329 | if (!cdesc) |
1346 | return; | 1330 | return; |
1347 | 1331 | ||
1348 | spin_lock_irqsave(&dwc->lock, flags); | 1332 | spin_lock_irqsave(&dwc->lock, flags); |
1349 | 1333 | ||
1350 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1334 | dwc_chan_disable(dw, dwc); |
1351 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
1352 | cpu_relax(); | ||
1353 | 1335 | ||
1354 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1336 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1355 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1337 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
@@ -1386,7 +1368,7 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1386 | dw->chan[i].initialized = false; | 1368 | dw->chan[i].initialized = false; |
1387 | } | 1369 | } |
1388 | 1370 | ||
1389 | static int __init dw_probe(struct platform_device *pdev) | 1371 | static int __devinit dw_probe(struct platform_device *pdev) |
1390 | { | 1372 | { |
1391 | struct dw_dma_platform_data *pdata; | 1373 | struct dw_dma_platform_data *pdata; |
1392 | struct resource *io; | 1374 | struct resource *io; |
@@ -1432,9 +1414,15 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1432 | } | 1414 | } |
1433 | clk_prepare_enable(dw->clk); | 1415 | clk_prepare_enable(dw->clk); |
1434 | 1416 | ||
1417 | /* Calculate all channel mask before DMA setup */ | ||
1418 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
1419 | |||
1435 | /* force dma off, just in case */ | 1420 | /* force dma off, just in case */ |
1436 | dw_dma_off(dw); | 1421 | dw_dma_off(dw); |
1437 | 1422 | ||
1423 | /* disable BLOCK interrupts as well */ | ||
1424 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1425 | |||
1438 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | 1426 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1439 | if (err) | 1427 | if (err) |
1440 | goto err_irq; | 1428 | goto err_irq; |
@@ -1443,8 +1431,6 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1443 | 1431 | ||
1444 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1432 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1445 | 1433 | ||
1446 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
1447 | |||
1448 | INIT_LIST_HEAD(&dw->dma.channels); | 1434 | INIT_LIST_HEAD(&dw->dma.channels); |
1449 | for (i = 0; i < pdata->nr_channels; i++) { | 1435 | for (i = 0; i < pdata->nr_channels; i++) { |
1450 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1436 | struct dw_dma_chan *dwc = &dw->chan[i]; |
@@ -1474,17 +1460,13 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1474 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1460 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1475 | } | 1461 | } |
1476 | 1462 | ||
1477 | /* Clear/disable all interrupts on all channels. */ | 1463 | /* Clear all interrupts on all channels. */ |
1478 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 1464 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1465 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | ||
1479 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 1466 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1480 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 1467 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1481 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 1468 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1482 | 1469 | ||
1483 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1484 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1485 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1486 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1487 | |||
1488 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1470 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1489 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1471 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1490 | if (pdata->is_private) | 1472 | if (pdata->is_private) |
@@ -1523,7 +1505,7 @@ err_kfree: | |||
1523 | return err; | 1505 | return err; |
1524 | } | 1506 | } |
1525 | 1507 | ||
1526 | static int __exit dw_remove(struct platform_device *pdev) | 1508 | static int __devexit dw_remove(struct platform_device *pdev) |
1527 | { | 1509 | { |
1528 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1510 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1529 | struct dw_dma_chan *dwc, *_dwc; | 1511 | struct dw_dma_chan *dwc, *_dwc; |
@@ -1602,7 +1584,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table); | |||
1602 | #endif | 1584 | #endif |
1603 | 1585 | ||
1604 | static struct platform_driver dw_driver = { | 1586 | static struct platform_driver dw_driver = { |
1605 | .remove = __exit_p(dw_remove), | 1587 | .remove = __devexit_p(dw_remove), |
1606 | .shutdown = dw_shutdown, | 1588 | .shutdown = dw_shutdown, |
1607 | .driver = { | 1589 | .driver = { |
1608 | .name = "dw_dmac", | 1590 | .name = "dw_dmac", |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index f298f69ecbf..50830bee087 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -82,7 +82,7 @@ struct dw_dma_regs { | |||
82 | DW_REG(ID); | 82 | DW_REG(ID); |
83 | DW_REG(TEST); | 83 | DW_REG(TEST); |
84 | 84 | ||
85 | /* optional encoded params, 0x3c8..0x3 */ | 85 | /* optional encoded params, 0x3c8..0x3f7 */ |
86 | }; | 86 | }; |
87 | 87 | ||
88 | /* Bitfields in CTL_LO */ | 88 | /* Bitfields in CTL_LO */ |
@@ -219,9 +219,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) | |||
219 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ | 219 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ |
220 | struct dw_lli { | 220 | struct dw_lli { |
221 | /* values that are not changed by hardware */ | 221 | /* values that are not changed by hardware */ |
222 | dma_addr_t sar; | 222 | u32 sar; |
223 | dma_addr_t dar; | 223 | u32 dar; |
224 | dma_addr_t llp; /* chain to next lli */ | 224 | u32 llp; /* chain to next lli */ |
225 | u32 ctllo; | 225 | u32 ctllo; |
226 | /* values that may get written back: */ | 226 | /* values that may get written back: */ |
227 | u32 ctlhi; | 227 | u32 ctlhi; |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c new file mode 100644 index 00000000000..8a15cf2163d --- /dev/null +++ b/drivers/dma/mmp_tdma.c | |||
@@ -0,0 +1,610 @@ | |||
1 | /* | ||
2 | * Driver For Marvell Two-channel DMA Engine | ||
3 | * | ||
4 | * Copyright: Marvell International Ltd. | ||
5 | * | ||
6 | * The code contained herein is licensed under the GNU General Public | ||
7 | * License. You may obtain a copy of the GNU General Public License | ||
8 | * Version 2 or later at the following locations: | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/dmaengine.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <mach/regs-icu.h> | ||
22 | #include <mach/sram.h> | ||
23 | |||
24 | #include "dmaengine.h" | ||
25 | |||
26 | /* | ||
27 | * Two-Channel DMA registers | ||
28 | */ | ||
29 | #define TDBCR 0x00 /* Byte Count */ | ||
30 | #define TDSAR 0x10 /* Src Addr */ | ||
31 | #define TDDAR 0x20 /* Dst Addr */ | ||
32 | #define TDNDPR 0x30 /* Next Desc */ | ||
33 | #define TDCR 0x40 /* Control */ | ||
34 | #define TDCP 0x60 /* Priority*/ | ||
35 | #define TDCDPR 0x70 /* Current Desc */ | ||
36 | #define TDIMR 0x80 /* Int Mask */ | ||
37 | #define TDISR 0xa0 /* Int Status */ | ||
38 | |||
39 | /* Two-Channel DMA Control Register */ | ||
40 | #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */ | ||
41 | #define TDCR_SSZ_12_BITS (0x1 << 22) | ||
42 | #define TDCR_SSZ_16_BITS (0x2 << 22) | ||
43 | #define TDCR_SSZ_20_BITS (0x3 << 22) | ||
44 | #define TDCR_SSZ_24_BITS (0x4 << 22) | ||
45 | #define TDCR_SSZ_32_BITS (0x5 << 22) | ||
46 | #define TDCR_SSZ_SHIFT (0x1 << 22) | ||
47 | #define TDCR_SSZ_MASK (0x7 << 22) | ||
48 | #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */ | ||
49 | #define TDCR_ABR (0x1 << 20) /* Channel Abort */ | ||
50 | #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */ | ||
51 | #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */ | ||
52 | #define TDCR_CHANACT (0x1 << 14) /* Channel Active */ | ||
53 | #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */ | ||
54 | #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */ | ||
55 | #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */ | ||
56 | #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */ | ||
57 | #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */ | ||
58 | #define TDCR_BURSTSZ_4B (0x0 << 6) | ||
59 | #define TDCR_BURSTSZ_8B (0x1 << 6) | ||
60 | #define TDCR_BURSTSZ_16B (0x3 << 6) | ||
61 | #define TDCR_BURSTSZ_32B (0x6 << 6) | ||
62 | #define TDCR_BURSTSZ_64B (0x7 << 6) | ||
63 | #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) | ||
64 | #define TDCR_BURSTSZ_128B (0x5 << 6) | ||
65 | #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ | ||
66 | #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */ | ||
67 | #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */ | ||
68 | #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */ | ||
69 | #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */ | ||
70 | #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */ | ||
71 | #define TDCR_DSTDESCCONT (0x1 << 1) | ||
72 | #define TDCR_SRCDESTCONT (0x1 << 0) | ||
73 | |||
74 | /* Two-Channel DMA Int Mask Register */ | ||
75 | #define TDIMR_COMP (0x1 << 0) | ||
76 | |||
77 | /* Two-Channel DMA Int Status Register */ | ||
78 | #define TDISR_COMP (0x1 << 0) | ||
79 | |||
80 | /* | ||
81 | * Two-Channel DMA Descriptor Struct | ||
82 | * NOTE: desc's buf must be aligned to 16 bytes. | ||
83 | */ | ||
84 | struct mmp_tdma_desc { | ||
85 | u32 byte_cnt; | ||
86 | u32 src_addr; | ||
87 | u32 dst_addr; | ||
88 | u32 nxt_desc; | ||
89 | }; | ||
90 | |||
91 | enum mmp_tdma_type { | ||
92 | MMP_AUD_TDMA = 0, | ||
93 | PXA910_SQU, | ||
94 | }; | ||
95 | |||
96 | #define TDMA_ALIGNMENT 3 | ||
97 | #define TDMA_MAX_XFER_BYTES SZ_64K | ||
98 | |||
99 | struct mmp_tdma_chan { | ||
100 | struct device *dev; | ||
101 | struct dma_chan chan; | ||
102 | struct dma_async_tx_descriptor desc; | ||
103 | struct tasklet_struct tasklet; | ||
104 | |||
105 | struct mmp_tdma_desc *desc_arr; | ||
106 | phys_addr_t desc_arr_phys; | ||
107 | int desc_num; | ||
108 | enum dma_transfer_direction dir; | ||
109 | dma_addr_t dev_addr; | ||
110 | u32 burst_sz; | ||
111 | enum dma_slave_buswidth buswidth; | ||
112 | enum dma_status status; | ||
113 | |||
114 | int idx; | ||
115 | enum mmp_tdma_type type; | ||
116 | int irq; | ||
117 | unsigned long reg_base; | ||
118 | |||
119 | size_t buf_len; | ||
120 | size_t period_len; | ||
121 | size_t pos; | ||
122 | }; | ||
123 | |||
124 | #define TDMA_CHANNEL_NUM 2 | ||
125 | struct mmp_tdma_device { | ||
126 | struct device *dev; | ||
127 | void __iomem *base; | ||
128 | struct dma_device device; | ||
129 | struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; | ||
130 | int irq; | ||
131 | }; | ||
132 | |||
133 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) | ||
134 | |||
135 | static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) | ||
136 | { | ||
137 | writel(phys, tdmac->reg_base + TDNDPR); | ||
138 | writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND, | ||
139 | tdmac->reg_base + TDCR); | ||
140 | } | ||
141 | |||
142 | static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | ||
143 | { | ||
144 | /* enable irq */ | ||
145 | writel(TDIMR_COMP, tdmac->reg_base + TDIMR); | ||
146 | /* enable dma chan */ | ||
147 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | ||
148 | tdmac->reg_base + TDCR); | ||
149 | tdmac->status = DMA_IN_PROGRESS; | ||
150 | } | ||
151 | |||
152 | static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | ||
153 | { | ||
154 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | ||
155 | tdmac->reg_base + TDCR); | ||
156 | tdmac->status = DMA_SUCCESS; | ||
157 | } | ||
158 | |||
159 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) | ||
160 | { | ||
161 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | ||
162 | tdmac->reg_base + TDCR); | ||
163 | tdmac->status = DMA_IN_PROGRESS; | ||
164 | } | ||
165 | |||
166 | static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) | ||
167 | { | ||
168 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | ||
169 | tdmac->reg_base + TDCR); | ||
170 | tdmac->status = DMA_PAUSED; | ||
171 | } | ||
172 | |||
173 | static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) | ||
174 | { | ||
175 | unsigned int tdcr; | ||
176 | |||
177 | mmp_tdma_disable_chan(tdmac); | ||
178 | |||
179 | if (tdmac->dir == DMA_MEM_TO_DEV) | ||
180 | tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; | ||
181 | else if (tdmac->dir == DMA_DEV_TO_MEM) | ||
182 | tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC; | ||
183 | |||
184 | if (tdmac->type == MMP_AUD_TDMA) { | ||
185 | tdcr |= TDCR_PACKMOD; | ||
186 | |||
187 | switch (tdmac->burst_sz) { | ||
188 | case 4: | ||
189 | tdcr |= TDCR_BURSTSZ_4B; | ||
190 | break; | ||
191 | case 8: | ||
192 | tdcr |= TDCR_BURSTSZ_8B; | ||
193 | break; | ||
194 | case 16: | ||
195 | tdcr |= TDCR_BURSTSZ_16B; | ||
196 | break; | ||
197 | case 32: | ||
198 | tdcr |= TDCR_BURSTSZ_32B; | ||
199 | break; | ||
200 | case 64: | ||
201 | tdcr |= TDCR_BURSTSZ_64B; | ||
202 | break; | ||
203 | case 128: | ||
204 | tdcr |= TDCR_BURSTSZ_128B; | ||
205 | break; | ||
206 | default: | ||
207 | dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | switch (tdmac->buswidth) { | ||
212 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
213 | tdcr |= TDCR_SSZ_8_BITS; | ||
214 | break; | ||
215 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
216 | tdcr |= TDCR_SSZ_16_BITS; | ||
217 | break; | ||
218 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
219 | tdcr |= TDCR_SSZ_32_BITS; | ||
220 | break; | ||
221 | default: | ||
222 | dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); | ||
223 | return -EINVAL; | ||
224 | } | ||
225 | } else if (tdmac->type == PXA910_SQU) { | ||
226 | tdcr |= TDCR_BURSTSZ_SQU_32B; | ||
227 | tdcr |= TDCR_SSPMOD; | ||
228 | } | ||
229 | |||
230 | writel(tdcr, tdmac->reg_base + TDCR); | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac) | ||
235 | { | ||
236 | u32 reg = readl(tdmac->reg_base + TDISR); | ||
237 | |||
238 | if (reg & TDISR_COMP) { | ||
239 | /* clear irq */ | ||
240 | reg &= ~TDISR_COMP; | ||
241 | writel(reg, tdmac->reg_base + TDISR); | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | return -EAGAIN; | ||
246 | } | ||
247 | |||
248 | static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) | ||
249 | { | ||
250 | struct mmp_tdma_chan *tdmac = dev_id; | ||
251 | |||
252 | if (mmp_tdma_clear_chan_irq(tdmac) == 0) { | ||
253 | tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len; | ||
254 | tasklet_schedule(&tdmac->tasklet); | ||
255 | return IRQ_HANDLED; | ||
256 | } else | ||
257 | return IRQ_NONE; | ||
258 | } | ||
259 | |||
260 | static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id) | ||
261 | { | ||
262 | struct mmp_tdma_device *tdev = dev_id; | ||
263 | int i, ret; | ||
264 | int irq_num = 0; | ||
265 | |||
266 | for (i = 0; i < TDMA_CHANNEL_NUM; i++) { | ||
267 | struct mmp_tdma_chan *tdmac = tdev->tdmac[i]; | ||
268 | |||
269 | ret = mmp_tdma_chan_handler(irq, tdmac); | ||
270 | if (ret == IRQ_HANDLED) | ||
271 | irq_num++; | ||
272 | } | ||
273 | |||
274 | if (irq_num) | ||
275 | return IRQ_HANDLED; | ||
276 | else | ||
277 | return IRQ_NONE; | ||
278 | } | ||
279 | |||
280 | static void dma_do_tasklet(unsigned long data) | ||
281 | { | ||
282 | struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; | ||
283 | |||
284 | if (tdmac->desc.callback) | ||
285 | tdmac->desc.callback(tdmac->desc.callback_param); | ||
286 | |||
287 | } | ||
288 | |||
289 | static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) | ||
290 | { | ||
291 | struct gen_pool *gpool; | ||
292 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); | ||
293 | |||
294 | gpool = sram_get_gpool("asram"); | ||
295 | if (tdmac->desc_arr) | ||
296 | gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, | ||
297 | size); | ||
298 | tdmac->desc_arr = NULL; | ||
299 | |||
300 | return; | ||
301 | } | ||
302 | |||
303 | static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
304 | { | ||
305 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan); | ||
306 | |||
307 | mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) | ||
313 | { | ||
314 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
315 | int ret; | ||
316 | |||
317 | dma_async_tx_descriptor_init(&tdmac->desc, chan); | ||
318 | tdmac->desc.tx_submit = mmp_tdma_tx_submit; | ||
319 | |||
320 | if (tdmac->irq) { | ||
321 | ret = devm_request_irq(tdmac->dev, tdmac->irq, | ||
322 | mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); | ||
323 | if (ret) | ||
324 | return ret; | ||
325 | } | ||
326 | return 1; | ||
327 | } | ||
328 | |||
329 | static void mmp_tdma_free_chan_resources(struct dma_chan *chan) | ||
330 | { | ||
331 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
332 | |||
333 | if (tdmac->irq) | ||
334 | devm_free_irq(tdmac->dev, tdmac->irq, tdmac); | ||
335 | mmp_tdma_free_descriptor(tdmac); | ||
336 | return; | ||
337 | } | ||
338 | |||
339 | struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) | ||
340 | { | ||
341 | struct gen_pool *gpool; | ||
342 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); | ||
343 | |||
344 | gpool = sram_get_gpool("asram"); | ||
345 | if (!gpool) | ||
346 | return NULL; | ||
347 | |||
348 | tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size); | ||
349 | if (!tdmac->desc_arr) | ||
350 | return NULL; | ||
351 | |||
352 | tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool, | ||
353 | (unsigned long)tdmac->desc_arr); | ||
354 | |||
355 | return tdmac->desc_arr; | ||
356 | } | ||
357 | |||
358 | static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | ||
359 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
360 | size_t period_len, enum dma_transfer_direction direction, | ||
361 | void *context) | ||
362 | { | ||
363 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
364 | struct mmp_tdma_desc *desc; | ||
365 | int num_periods = buf_len / period_len; | ||
366 | int i = 0, buf = 0; | ||
367 | |||
368 | if (tdmac->status != DMA_SUCCESS) | ||
369 | return NULL; | ||
370 | |||
371 | if (period_len > TDMA_MAX_XFER_BYTES) { | ||
372 | dev_err(tdmac->dev, | ||
373 | "maximum period size exceeded: %d > %d\n", | ||
374 | period_len, TDMA_MAX_XFER_BYTES); | ||
375 | goto err_out; | ||
376 | } | ||
377 | |||
378 | tdmac->status = DMA_IN_PROGRESS; | ||
379 | tdmac->desc_num = num_periods; | ||
380 | desc = mmp_tdma_alloc_descriptor(tdmac); | ||
381 | if (!desc) | ||
382 | goto err_out; | ||
383 | |||
384 | while (buf < buf_len) { | ||
385 | desc = &tdmac->desc_arr[i]; | ||
386 | |||
387 | if (i + 1 == num_periods) | ||
388 | desc->nxt_desc = tdmac->desc_arr_phys; | ||
389 | else | ||
390 | desc->nxt_desc = tdmac->desc_arr_phys + | ||
391 | sizeof(*desc) * (i + 1); | ||
392 | |||
393 | if (direction == DMA_MEM_TO_DEV) { | ||
394 | desc->src_addr = dma_addr; | ||
395 | desc->dst_addr = tdmac->dev_addr; | ||
396 | } else { | ||
397 | desc->src_addr = tdmac->dev_addr; | ||
398 | desc->dst_addr = dma_addr; | ||
399 | } | ||
400 | desc->byte_cnt = period_len; | ||
401 | dma_addr += period_len; | ||
402 | buf += period_len; | ||
403 | i++; | ||
404 | } | ||
405 | |||
406 | tdmac->buf_len = buf_len; | ||
407 | tdmac->period_len = period_len; | ||
408 | tdmac->pos = 0; | ||
409 | |||
410 | return &tdmac->desc; | ||
411 | |||
412 | err_out: | ||
413 | tdmac->status = DMA_ERROR; | ||
414 | return NULL; | ||
415 | } | ||
416 | |||
417 | static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
418 | unsigned long arg) | ||
419 | { | ||
420 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
421 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
422 | int ret = 0; | ||
423 | |||
424 | switch (cmd) { | ||
425 | case DMA_TERMINATE_ALL: | ||
426 | mmp_tdma_disable_chan(tdmac); | ||
427 | break; | ||
428 | case DMA_PAUSE: | ||
429 | mmp_tdma_pause_chan(tdmac); | ||
430 | break; | ||
431 | case DMA_RESUME: | ||
432 | mmp_tdma_resume_chan(tdmac); | ||
433 | break; | ||
434 | case DMA_SLAVE_CONFIG: | ||
435 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | ||
436 | tdmac->dev_addr = dmaengine_cfg->src_addr; | ||
437 | tdmac->burst_sz = dmaengine_cfg->src_maxburst; | ||
438 | tdmac->buswidth = dmaengine_cfg->src_addr_width; | ||
439 | } else { | ||
440 | tdmac->dev_addr = dmaengine_cfg->dst_addr; | ||
441 | tdmac->burst_sz = dmaengine_cfg->dst_maxburst; | ||
442 | tdmac->buswidth = dmaengine_cfg->dst_addr_width; | ||
443 | } | ||
444 | tdmac->dir = dmaengine_cfg->direction; | ||
445 | return mmp_tdma_config_chan(tdmac); | ||
446 | default: | ||
447 | ret = -ENOSYS; | ||
448 | } | ||
449 | |||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, | ||
454 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
455 | { | ||
456 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
457 | |||
458 | dma_set_residue(txstate, tdmac->buf_len - tdmac->pos); | ||
459 | |||
460 | return tdmac->status; | ||
461 | } | ||
462 | |||
463 | static void mmp_tdma_issue_pending(struct dma_chan *chan) | ||
464 | { | ||
465 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
466 | |||
467 | mmp_tdma_enable_chan(tdmac); | ||
468 | } | ||
469 | |||
470 | static int __devexit mmp_tdma_remove(struct platform_device *pdev) | ||
471 | { | ||
472 | struct mmp_tdma_device *tdev = platform_get_drvdata(pdev); | ||
473 | |||
474 | dma_async_device_unregister(&tdev->device); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | ||
479 | int idx, int irq, int type) | ||
480 | { | ||
481 | struct mmp_tdma_chan *tdmac; | ||
482 | |||
483 | if (idx >= TDMA_CHANNEL_NUM) { | ||
484 | dev_err(tdev->dev, "too many channels for device!\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | /* alloc channel */ | ||
489 | tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); | ||
490 | if (!tdmac) { | ||
491 | dev_err(tdev->dev, "no free memory for DMA channels!\n"); | ||
492 | return -ENOMEM; | ||
493 | } | ||
494 | if (irq) | ||
495 | tdmac->irq = irq + idx; | ||
496 | tdmac->dev = tdev->dev; | ||
497 | tdmac->chan.device = &tdev->device; | ||
498 | tdmac->idx = idx; | ||
499 | tdmac->type = type; | ||
500 | tdmac->reg_base = (unsigned long)tdev->base + idx * 4; | ||
501 | tdmac->status = DMA_SUCCESS; | ||
502 | tdev->tdmac[tdmac->idx] = tdmac; | ||
503 | tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); | ||
504 | |||
505 | /* add the channel to tdma_chan list */ | ||
506 | list_add_tail(&tdmac->chan.device_node, | ||
507 | &tdev->device.channels); | ||
508 | |||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int __devinit mmp_tdma_probe(struct platform_device *pdev) | ||
513 | { | ||
514 | const struct platform_device_id *id = platform_get_device_id(pdev); | ||
515 | enum mmp_tdma_type type = id->driver_data; | ||
516 | struct mmp_tdma_device *tdev; | ||
517 | struct resource *iores; | ||
518 | int i, ret; | ||
519 | int irq = 0; | ||
520 | int chan_num = TDMA_CHANNEL_NUM; | ||
521 | |||
522 | /* always have couple channels */ | ||
523 | tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); | ||
524 | if (!tdev) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | tdev->dev = &pdev->dev; | ||
528 | iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
529 | if (!iores) | ||
530 | return -EINVAL; | ||
531 | |||
532 | if (resource_size(iores) != chan_num) | ||
533 | tdev->irq = iores->start; | ||
534 | else | ||
535 | irq = iores->start; | ||
536 | |||
537 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
538 | if (!iores) | ||
539 | return -EINVAL; | ||
540 | |||
541 | tdev->base = devm_request_and_ioremap(&pdev->dev, iores); | ||
542 | if (!tdev->base) | ||
543 | return -EADDRNOTAVAIL; | ||
544 | |||
545 | if (tdev->irq) { | ||
546 | ret = devm_request_irq(&pdev->dev, tdev->irq, | ||
547 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); | ||
548 | if (ret) | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); | ||
553 | dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); | ||
554 | |||
555 | INIT_LIST_HEAD(&tdev->device.channels); | ||
556 | |||
557 | /* initialize channel parameters */ | ||
558 | for (i = 0; i < chan_num; i++) { | ||
559 | ret = mmp_tdma_chan_init(tdev, i, irq, type); | ||
560 | if (ret) | ||
561 | return ret; | ||
562 | } | ||
563 | |||
564 | tdev->device.dev = &pdev->dev; | ||
565 | tdev->device.device_alloc_chan_resources = | ||
566 | mmp_tdma_alloc_chan_resources; | ||
567 | tdev->device.device_free_chan_resources = | ||
568 | mmp_tdma_free_chan_resources; | ||
569 | tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; | ||
570 | tdev->device.device_tx_status = mmp_tdma_tx_status; | ||
571 | tdev->device.device_issue_pending = mmp_tdma_issue_pending; | ||
572 | tdev->device.device_control = mmp_tdma_control; | ||
573 | tdev->device.copy_align = TDMA_ALIGNMENT; | ||
574 | |||
575 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
576 | platform_set_drvdata(pdev, tdev); | ||
577 | |||
578 | ret = dma_async_device_register(&tdev->device); | ||
579 | if (ret) { | ||
580 | dev_err(tdev->device.dev, "unable to register\n"); | ||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | dev_info(tdev->device.dev, "initialized\n"); | ||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | static const struct platform_device_id mmp_tdma_id_table[] = { | ||
589 | { "mmp-adma", MMP_AUD_TDMA }, | ||
590 | { "pxa910-squ", PXA910_SQU }, | ||
591 | { }, | ||
592 | }; | ||
593 | |||
594 | static struct platform_driver mmp_tdma_driver = { | ||
595 | .driver = { | ||
596 | .name = "mmp-tdma", | ||
597 | .owner = THIS_MODULE, | ||
598 | }, | ||
599 | .id_table = mmp_tdma_id_table, | ||
600 | .probe = mmp_tdma_probe, | ||
601 | .remove = __devexit_p(mmp_tdma_remove), | ||
602 | }; | ||
603 | |||
604 | module_platform_driver(mmp_tdma_driver); | ||
605 | |||
606 | MODULE_LICENSE("GPL"); | ||
607 | MODULE_DESCRIPTION("MMP Two-Channel DMA Driver"); | ||
608 | MODULE_ALIAS("platform:mmp-tdma"); | ||
609 | MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); | ||
610 | MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>"); | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index c96ab15319f..7f41b25805f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
30 | 30 | ||
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | #include <mach/mxs.h> | ||
33 | 32 | ||
34 | #include "dmaengine.h" | 33 | #include "dmaengine.h" |
35 | 34 | ||
@@ -201,6 +200,7 @@ int mxs_dma_is_apbh(struct dma_chan *chan) | |||
201 | 200 | ||
202 | return dma_is_apbh(mxs_dma); | 201 | return dma_is_apbh(mxs_dma); |
203 | } | 202 | } |
203 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbh); | ||
204 | 204 | ||
205 | int mxs_dma_is_apbx(struct dma_chan *chan) | 205 | int mxs_dma_is_apbx(struct dma_chan *chan) |
206 | { | 206 | { |
@@ -209,6 +209,7 @@ int mxs_dma_is_apbx(struct dma_chan *chan) | |||
209 | 209 | ||
210 | return !dma_is_apbh(mxs_dma); | 210 | return !dma_is_apbh(mxs_dma); |
211 | } | 211 | } |
212 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbx); | ||
212 | 213 | ||
213 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 214 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
214 | { | 215 | { |
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile new file mode 100644 index 00000000000..54ae9572b0a --- /dev/null +++ b/drivers/dma/sh/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_SH_DMAE) += shdma-base.o | ||
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | ||
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c new file mode 100644 index 00000000000..27f5c781fd7 --- /dev/null +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -0,0 +1,934 @@ | |||
1 | /* | ||
2 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs | ||
3 | * | ||
4 | * extracted from shdma.c | ||
5 | * | ||
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
10 | * | ||
11 | * This is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of version 2 of the GNU General Public License as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/delay.h> | ||
17 | #include <linux/shdma-base.h> | ||
18 | #include <linux/dmaengine.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | #include "../dmaengine.h" | ||
27 | |||
28 | /* DMA descriptor control */ | ||
29 | enum shdma_desc_status { | ||
30 | DESC_IDLE, | ||
31 | DESC_PREPARED, | ||
32 | DESC_SUBMITTED, | ||
33 | DESC_COMPLETED, /* completed, have to call callback */ | ||
34 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | ||
35 | }; | ||
36 | |||
37 | #define NR_DESCS_PER_CHANNEL 32 | ||
38 | |||
39 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | ||
40 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) | ||
41 | |||
42 | /* | ||
43 | * For slave DMA we assume, that there is a finite number of DMA slaves in the | ||
44 | * system, and that each such slave can only use a finite number of channels. | ||
45 | * We use slave channel IDs to make sure, that no such slave channel ID is | ||
46 | * allocated more than once. | ||
47 | */ | ||
48 | static unsigned int slave_num = 256; | ||
49 | module_param(slave_num, uint, 0444); | ||
50 | |||
51 | /* A bitmask with slave_num bits */ | ||
52 | static unsigned long *shdma_slave_used; | ||
53 | |||
54 | /* Called under spin_lock_irq(&schan->chan_lock") */ | ||
55 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) | ||
56 | { | ||
57 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
58 | const struct shdma_ops *ops = sdev->ops; | ||
59 | struct shdma_desc *sdesc; | ||
60 | |||
61 | /* DMA work check */ | ||
62 | if (ops->channel_busy(schan)) | ||
63 | return; | ||
64 | |||
65 | /* Find the first not transferred descriptor */ | ||
66 | list_for_each_entry(sdesc, &schan->ld_queue, node) | ||
67 | if (sdesc->mark == DESC_SUBMITTED) { | ||
68 | ops->start_xfer(schan, sdesc); | ||
69 | break; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
74 | { | ||
75 | struct shdma_desc *chunk, *c, *desc = | ||
76 | container_of(tx, struct shdma_desc, async_tx), | ||
77 | *last = desc; | ||
78 | struct shdma_chan *schan = to_shdma_chan(tx->chan); | ||
79 | dma_async_tx_callback callback = tx->callback; | ||
80 | dma_cookie_t cookie; | ||
81 | bool power_up; | ||
82 | |||
83 | spin_lock_irq(&schan->chan_lock); | ||
84 | |||
85 | power_up = list_empty(&schan->ld_queue); | ||
86 | |||
87 | cookie = dma_cookie_assign(tx); | ||
88 | |||
89 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | ||
90 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | ||
91 | /* | ||
92 | * All chunks are on the global ld_free, so, we have to find | ||
93 | * the end of the chain ourselves | ||
94 | */ | ||
95 | if (chunk != desc && (chunk->mark == DESC_IDLE || | ||
96 | chunk->async_tx.cookie > 0 || | ||
97 | chunk->async_tx.cookie == -EBUSY || | ||
98 | &chunk->node == &schan->ld_free)) | ||
99 | break; | ||
100 | chunk->mark = DESC_SUBMITTED; | ||
101 | /* Callback goes to the last chunk */ | ||
102 | chunk->async_tx.callback = NULL; | ||
103 | chunk->cookie = cookie; | ||
104 | list_move_tail(&chunk->node, &schan->ld_queue); | ||
105 | last = chunk; | ||
106 | |||
107 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", | ||
108 | tx->cookie, &last->async_tx, schan->id); | ||
109 | } | ||
110 | |||
111 | last->async_tx.callback = callback; | ||
112 | last->async_tx.callback_param = tx->callback_param; | ||
113 | |||
114 | if (power_up) { | ||
115 | int ret; | ||
116 | schan->pm_state = SHDMA_PM_BUSY; | ||
117 | |||
118 | ret = pm_runtime_get(schan->dev); | ||
119 | |||
120 | spin_unlock_irq(&schan->chan_lock); | ||
121 | if (ret < 0) | ||
122 | dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); | ||
123 | |||
124 | pm_runtime_barrier(schan->dev); | ||
125 | |||
126 | spin_lock_irq(&schan->chan_lock); | ||
127 | |||
128 | /* Have we been reset, while waiting? */ | ||
129 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { | ||
130 | struct shdma_dev *sdev = | ||
131 | to_shdma_dev(schan->dma_chan.device); | ||
132 | const struct shdma_ops *ops = sdev->ops; | ||
133 | dev_dbg(schan->dev, "Bring up channel %d\n", | ||
134 | schan->id); | ||
135 | /* | ||
136 | * TODO: .xfer_setup() might fail on some platforms. | ||
137 | * Make it int then, on error remove chunks from the | ||
138 | * queue again | ||
139 | */ | ||
140 | ops->setup_xfer(schan, schan->slave_id); | ||
141 | |||
142 | if (schan->pm_state == SHDMA_PM_PENDING) | ||
143 | shdma_chan_xfer_ld_queue(schan); | ||
144 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
145 | } | ||
146 | } else { | ||
147 | /* | ||
148 | * Tell .device_issue_pending() not to run the queue, interrupts | ||
149 | * will do it anyway | ||
150 | */ | ||
151 | schan->pm_state = SHDMA_PM_PENDING; | ||
152 | } | ||
153 | |||
154 | spin_unlock_irq(&schan->chan_lock); | ||
155 | |||
156 | return cookie; | ||
157 | } | ||
158 | |||
159 | /* Called with desc_lock held */ | ||
160 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | ||
161 | { | ||
162 | struct shdma_desc *sdesc; | ||
163 | |||
164 | list_for_each_entry(sdesc, &schan->ld_free, node) | ||
165 | if (sdesc->mark != DESC_PREPARED) { | ||
166 | BUG_ON(sdesc->mark != DESC_IDLE); | ||
167 | list_del(&sdesc->node); | ||
168 | return sdesc; | ||
169 | } | ||
170 | |||
171 | return NULL; | ||
172 | } | ||
173 | |||
174 | static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | ||
175 | { | ||
176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
177 | const struct shdma_ops *ops = sdev->ops; | ||
178 | int ret; | ||
179 | |||
180 | if (slave_id < 0 || slave_id >= slave_num) | ||
181 | return -EINVAL; | ||
182 | |||
183 | if (test_and_set_bit(slave_id, shdma_slave_used)) | ||
184 | return -EBUSY; | ||
185 | |||
186 | ret = ops->set_slave(schan, slave_id, false); | ||
187 | if (ret < 0) { | ||
188 | clear_bit(slave_id, shdma_slave_used); | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | schan->slave_id = slave_id; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * This is the standard shdma filter function to be used as a replacement to the | ||
199 | * "old" method, using the .private pointer. If for some reason you allocate a | ||
200 | * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter | ||
201 | * parameter. If this filter is used, the slave driver, after calling | ||
202 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | ||
203 | * .slave_id, .direction, and either .src_addr or .dst_addr set. | ||
204 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE | ||
205 | * capability! If this becomes a requirement, hardware glue drivers, using this | ||
206 | * services would have to provide their own filters, which first would check | ||
207 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | ||
208 | * this, and only then, in case of a match, call this common filter. | ||
209 | */ | ||
210 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | ||
211 | { | ||
212 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
213 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
214 | const struct shdma_ops *ops = sdev->ops; | ||
215 | int slave_id = (int)arg; | ||
216 | int ret; | ||
217 | |||
218 | if (slave_id < 0) | ||
219 | /* No slave requested - arbitrary channel */ | ||
220 | return true; | ||
221 | |||
222 | if (slave_id >= slave_num) | ||
223 | return false; | ||
224 | |||
225 | ret = ops->set_slave(schan, slave_id, true); | ||
226 | if (ret < 0) | ||
227 | return false; | ||
228 | |||
229 | return true; | ||
230 | } | ||
231 | EXPORT_SYMBOL(shdma_chan_filter); | ||
232 | |||
233 | static int shdma_alloc_chan_resources(struct dma_chan *chan) | ||
234 | { | ||
235 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
236 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
237 | const struct shdma_ops *ops = sdev->ops; | ||
238 | struct shdma_desc *desc; | ||
239 | struct shdma_slave *slave = chan->private; | ||
240 | int ret, i; | ||
241 | |||
242 | /* | ||
243 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
244 | * never runs concurrently with itself or free_chan_resources. | ||
245 | */ | ||
246 | if (slave) { | ||
247 | /* Legacy mode: .private is set in filter */ | ||
248 | ret = shdma_setup_slave(schan, slave->slave_id); | ||
249 | if (ret < 0) | ||
250 | goto esetslave; | ||
251 | } else { | ||
252 | schan->slave_id = -EINVAL; | ||
253 | } | ||
254 | |||
255 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, | ||
256 | sdev->desc_size, GFP_KERNEL); | ||
257 | if (!schan->desc) { | ||
258 | ret = -ENOMEM; | ||
259 | goto edescalloc; | ||
260 | } | ||
261 | schan->desc_num = NR_DESCS_PER_CHANNEL; | ||
262 | |||
263 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { | ||
264 | desc = ops->embedded_desc(schan->desc, i); | ||
265 | dma_async_tx_descriptor_init(&desc->async_tx, | ||
266 | &schan->dma_chan); | ||
267 | desc->async_tx.tx_submit = shdma_tx_submit; | ||
268 | desc->mark = DESC_IDLE; | ||
269 | |||
270 | list_add(&desc->node, &schan->ld_free); | ||
271 | } | ||
272 | |||
273 | return NR_DESCS_PER_CHANNEL; | ||
274 | |||
275 | edescalloc: | ||
276 | if (slave) | ||
277 | esetslave: | ||
278 | clear_bit(slave->slave_id, shdma_slave_used); | ||
279 | chan->private = NULL; | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | ||
284 | { | ||
285 | struct shdma_desc *desc, *_desc; | ||
286 | /* Is the "exposed" head of a chain acked? */ | ||
287 | bool head_acked = false; | ||
288 | dma_cookie_t cookie = 0; | ||
289 | dma_async_tx_callback callback = NULL; | ||
290 | void *param = NULL; | ||
291 | unsigned long flags; | ||
292 | |||
293 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
294 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | ||
295 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
296 | |||
297 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | ||
298 | BUG_ON(desc->mark != DESC_SUBMITTED && | ||
299 | desc->mark != DESC_COMPLETED && | ||
300 | desc->mark != DESC_WAITING); | ||
301 | |||
302 | /* | ||
303 | * queue is ordered, and we use this loop to (1) clean up all | ||
304 | * completed descriptors, and to (2) update descriptor flags of | ||
305 | * any chunks in a (partially) completed chain | ||
306 | */ | ||
307 | if (!all && desc->mark == DESC_SUBMITTED && | ||
308 | desc->cookie != cookie) | ||
309 | break; | ||
310 | |||
311 | if (tx->cookie > 0) | ||
312 | cookie = tx->cookie; | ||
313 | |||
314 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | ||
315 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) | ||
316 | dev_dbg(schan->dev, | ||
317 | "Completing cookie %d, expected %d\n", | ||
318 | desc->cookie, | ||
319 | schan->dma_chan.completed_cookie + 1); | ||
320 | schan->dma_chan.completed_cookie = desc->cookie; | ||
321 | } | ||
322 | |||
323 | /* Call callback on the last chunk */ | ||
324 | if (desc->mark == DESC_COMPLETED && tx->callback) { | ||
325 | desc->mark = DESC_WAITING; | ||
326 | callback = tx->callback; | ||
327 | param = tx->callback_param; | ||
328 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", | ||
329 | tx->cookie, tx, schan->id); | ||
330 | BUG_ON(desc->chunks != 1); | ||
331 | break; | ||
332 | } | ||
333 | |||
334 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | ||
335 | if (desc->mark == DESC_COMPLETED) { | ||
336 | BUG_ON(tx->cookie < 0); | ||
337 | desc->mark = DESC_WAITING; | ||
338 | } | ||
339 | head_acked = async_tx_test_ack(tx); | ||
340 | } else { | ||
341 | switch (desc->mark) { | ||
342 | case DESC_COMPLETED: | ||
343 | desc->mark = DESC_WAITING; | ||
344 | /* Fall through */ | ||
345 | case DESC_WAITING: | ||
346 | if (head_acked) | ||
347 | async_tx_ack(&desc->async_tx); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n", | ||
352 | tx, tx->cookie); | ||
353 | |||
354 | if (((desc->mark == DESC_COMPLETED || | ||
355 | desc->mark == DESC_WAITING) && | ||
356 | async_tx_test_ack(&desc->async_tx)) || all) { | ||
357 | /* Remove from ld_queue list */ | ||
358 | desc->mark = DESC_IDLE; | ||
359 | |||
360 | list_move(&desc->node, &schan->ld_free); | ||
361 | |||
362 | if (list_empty(&schan->ld_queue)) { | ||
363 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | ||
364 | pm_runtime_put(schan->dev); | ||
365 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | |||
370 | if (all && !callback) | ||
371 | /* | ||
372 | * Terminating and the loop completed normally: forgive | ||
373 | * uncompleted cookies | ||
374 | */ | ||
375 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; | ||
376 | |||
377 | spin_unlock_irqrestore(&schan->chan_lock, flags); | ||
378 | |||
379 | if (callback) | ||
380 | callback(param); | ||
381 | |||
382 | return callback; | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * shdma_chan_ld_cleanup - Clean up link descriptors | ||
387 | * | ||
388 | * Clean up the ld_queue of DMA channel. | ||
389 | */ | ||
390 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) | ||
391 | { | ||
392 | while (__ld_cleanup(schan, all)) | ||
393 | ; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * shdma_free_chan_resources - Free all resources of the channel. | ||
398 | */ | ||
399 | static void shdma_free_chan_resources(struct dma_chan *chan) | ||
400 | { | ||
401 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
402 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | ||
403 | const struct shdma_ops *ops = sdev->ops; | ||
404 | LIST_HEAD(list); | ||
405 | |||
406 | /* Protect against ISR */ | ||
407 | spin_lock_irq(&schan->chan_lock); | ||
408 | ops->halt_channel(schan); | ||
409 | spin_unlock_irq(&schan->chan_lock); | ||
410 | |||
411 | /* Now no new interrupts will occur */ | ||
412 | |||
413 | /* Prepared and not submitted descriptors can still be on the queue */ | ||
414 | if (!list_empty(&schan->ld_queue)) | ||
415 | shdma_chan_ld_cleanup(schan, true); | ||
416 | |||
417 | if (schan->slave_id >= 0) { | ||
418 | /* The caller is holding dma_list_mutex */ | ||
419 | clear_bit(schan->slave_id, shdma_slave_used); | ||
420 | chan->private = NULL; | ||
421 | } | ||
422 | |||
423 | spin_lock_irq(&schan->chan_lock); | ||
424 | |||
425 | list_splice_init(&schan->ld_free, &list); | ||
426 | schan->desc_num = 0; | ||
427 | |||
428 | spin_unlock_irq(&schan->chan_lock); | ||
429 | |||
430 | kfree(schan->desc); | ||
431 | } | ||
432 | |||
433 | /** | ||
434 | * shdma_add_desc - get, set up and return one transfer descriptor | ||
435 | * @schan: DMA channel | ||
436 | * @flags: DMA transfer flags | ||
437 | * @dst: destination DMA address, incremented when direction equals | ||
438 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM | ||
439 | * @src: source DMA address, incremented when direction equals | ||
440 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM | ||
441 | * @len: DMA transfer length | ||
442 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
443 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
444 | * equals DMA_MEM_TO_MEM for MEMCPY | ||
445 | * Returns 0 or an error | ||
446 | * Locks: called with desc_lock held | ||
447 | */ | ||
448 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | ||
449 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, | ||
450 | struct shdma_desc **first, enum dma_transfer_direction direction) | ||
451 | { | ||
452 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
453 | const struct shdma_ops *ops = sdev->ops; | ||
454 | struct shdma_desc *new; | ||
455 | size_t copy_size = *len; | ||
456 | |||
457 | if (!copy_size) | ||
458 | return NULL; | ||
459 | |||
460 | /* Allocate the link descriptor from the free list */ | ||
461 | new = shdma_get_desc(schan); | ||
462 | if (!new) { | ||
463 | dev_err(schan->dev, "No free link descriptor available\n"); | ||
464 | return NULL; | ||
465 | } | ||
466 | |||
467 | ops->desc_setup(schan, new, *src, *dst, ©_size); | ||
468 | |||
469 | if (!*first) { | ||
470 | /* First desc */ | ||
471 | new->async_tx.cookie = -EBUSY; | ||
472 | *first = new; | ||
473 | } else { | ||
474 | /* Other desc - invisible to the user */ | ||
475 | new->async_tx.cookie = -EINVAL; | ||
476 | } | ||
477 | |||
478 | dev_dbg(schan->dev, | ||
479 | "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", | ||
480 | copy_size, *len, *src, *dst, &new->async_tx, | ||
481 | new->async_tx.cookie); | ||
482 | |||
483 | new->mark = DESC_PREPARED; | ||
484 | new->async_tx.flags = flags; | ||
485 | new->direction = direction; | ||
486 | |||
487 | *len -= copy_size; | ||
488 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | ||
489 | *src += copy_size; | ||
490 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | ||
491 | *dst += copy_size; | ||
492 | |||
493 | return new; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * shdma_prep_sg - prepare transfer descriptors from an SG list | ||
498 | * | ||
499 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
500 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
501 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
502 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
503 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | ||
504 | * and the SG list contains only one element and points at the source buffer. | ||
505 | */ | ||
506 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | ||
507 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
508 | enum dma_transfer_direction direction, unsigned long flags) | ||
509 | { | ||
510 | struct scatterlist *sg; | ||
511 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; | ||
512 | LIST_HEAD(tx_list); | ||
513 | int chunks = 0; | ||
514 | unsigned long irq_flags; | ||
515 | int i; | ||
516 | |||
517 | for_each_sg(sgl, sg, sg_len, i) | ||
518 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); | ||
519 | |||
520 | /* Have to lock the whole loop to protect against concurrent release */ | ||
521 | spin_lock_irqsave(&schan->chan_lock, irq_flags); | ||
522 | |||
523 | /* | ||
524 | * Chaining: | ||
525 | * first descriptor is what user is dealing with in all API calls, its | ||
526 | * cookie is at first set to -EBUSY, at tx-submit to a positive | ||
527 | * number | ||
528 | * if more than one chunk is needed further chunks have cookie = -EINVAL | ||
529 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | ||
530 | * all chunks are linked onto the tx_list head with their .node heads | ||
531 | * only during this function, then they are immediately spliced | ||
532 | * back onto the free list in form of a chain | ||
533 | */ | ||
534 | for_each_sg(sgl, sg, sg_len, i) { | ||
535 | dma_addr_t sg_addr = sg_dma_address(sg); | ||
536 | size_t len = sg_dma_len(sg); | ||
537 | |||
538 | if (!len) | ||
539 | goto err_get_desc; | ||
540 | |||
541 | do { | ||
542 | dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", | ||
543 | i, sg, len, (unsigned long long)sg_addr); | ||
544 | |||
545 | if (direction == DMA_DEV_TO_MEM) | ||
546 | new = shdma_add_desc(schan, flags, | ||
547 | &sg_addr, addr, &len, &first, | ||
548 | direction); | ||
549 | else | ||
550 | new = shdma_add_desc(schan, flags, | ||
551 | addr, &sg_addr, &len, &first, | ||
552 | direction); | ||
553 | if (!new) | ||
554 | goto err_get_desc; | ||
555 | |||
556 | new->chunks = chunks--; | ||
557 | list_add_tail(&new->node, &tx_list); | ||
558 | } while (len); | ||
559 | } | ||
560 | |||
561 | if (new != first) | ||
562 | new->async_tx.cookie = -ENOSPC; | ||
563 | |||
564 | /* Put them back on the free list, so, they don't get lost */ | ||
565 | list_splice_tail(&tx_list, &schan->ld_free); | ||
566 | |||
567 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | ||
568 | |||
569 | return &first->async_tx; | ||
570 | |||
571 | err_get_desc: | ||
572 | list_for_each_entry(new, &tx_list, node) | ||
573 | new->mark = DESC_IDLE; | ||
574 | list_splice(&tx_list, &schan->ld_free); | ||
575 | |||
576 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | ||
577 | |||
578 | return NULL; | ||
579 | } | ||
580 | |||
581 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( | ||
582 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
583 | size_t len, unsigned long flags) | ||
584 | { | ||
585 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
586 | struct scatterlist sg; | ||
587 | |||
588 | if (!chan || !len) | ||
589 | return NULL; | ||
590 | |||
591 | BUG_ON(!schan->desc_num); | ||
592 | |||
593 | sg_init_table(&sg, 1); | ||
594 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
595 | offset_in_page(dma_src)); | ||
596 | sg_dma_address(&sg) = dma_src; | ||
597 | sg_dma_len(&sg) = len; | ||
598 | |||
599 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); | ||
600 | } | ||
601 | |||
602 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | ||
603 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
604 | enum dma_transfer_direction direction, unsigned long flags, void *context) | ||
605 | { | ||
606 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
607 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
608 | const struct shdma_ops *ops = sdev->ops; | ||
609 | int slave_id = schan->slave_id; | ||
610 | dma_addr_t slave_addr; | ||
611 | |||
612 | if (!chan) | ||
613 | return NULL; | ||
614 | |||
615 | BUG_ON(!schan->desc_num); | ||
616 | |||
617 | /* Someone calling slave DMA on a generic channel? */ | ||
618 | if (slave_id < 0 || !sg_len) { | ||
619 | dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", | ||
620 | __func__, sg_len, slave_id); | ||
621 | return NULL; | ||
622 | } | ||
623 | |||
624 | slave_addr = ops->slave_addr(schan); | ||
625 | |||
626 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | ||
627 | direction, flags); | ||
628 | } | ||
629 | |||
630 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
631 | unsigned long arg) | ||
632 | { | ||
633 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
634 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | ||
635 | const struct shdma_ops *ops = sdev->ops; | ||
636 | struct dma_slave_config *config; | ||
637 | unsigned long flags; | ||
638 | int ret; | ||
639 | |||
640 | if (!chan) | ||
641 | return -EINVAL; | ||
642 | |||
643 | switch (cmd) { | ||
644 | case DMA_TERMINATE_ALL: | ||
645 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
646 | ops->halt_channel(schan); | ||
647 | spin_unlock_irqrestore(&schan->chan_lock, flags); | ||
648 | |||
649 | shdma_chan_ld_cleanup(schan, true); | ||
650 | break; | ||
651 | case DMA_SLAVE_CONFIG: | ||
652 | /* | ||
653 | * So far only .slave_id is used, but the slave drivers are | ||
654 | * encouraged to also set a transfer direction and an address. | ||
655 | */ | ||
656 | if (!arg) | ||
657 | return -EINVAL; | ||
658 | /* | ||
659 | * We could lock this, but you shouldn't be configuring the | ||
660 | * channel, while using it... | ||
661 | */ | ||
662 | config = (struct dma_slave_config *)arg; | ||
663 | ret = shdma_setup_slave(schan, config->slave_id); | ||
664 | if (ret < 0) | ||
665 | return ret; | ||
666 | break; | ||
667 | default: | ||
668 | return -ENXIO; | ||
669 | } | ||
670 | |||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | static void shdma_issue_pending(struct dma_chan *chan) | ||
675 | { | ||
676 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
677 | |||
678 | spin_lock_irq(&schan->chan_lock); | ||
679 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) | ||
680 | shdma_chan_xfer_ld_queue(schan); | ||
681 | else | ||
682 | schan->pm_state = SHDMA_PM_PENDING; | ||
683 | spin_unlock_irq(&schan->chan_lock); | ||
684 | } | ||
685 | |||
686 | static enum dma_status shdma_tx_status(struct dma_chan *chan, | ||
687 | dma_cookie_t cookie, | ||
688 | struct dma_tx_state *txstate) | ||
689 | { | ||
690 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
691 | enum dma_status status; | ||
692 | unsigned long flags; | ||
693 | |||
694 | shdma_chan_ld_cleanup(schan, false); | ||
695 | |||
696 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
697 | |||
698 | status = dma_cookie_status(chan, cookie, txstate); | ||
699 | |||
700 | /* | ||
701 | * If we don't find cookie on the queue, it has been aborted and we have | ||
702 | * to report error | ||
703 | */ | ||
704 | if (status != DMA_SUCCESS) { | ||
705 | struct shdma_desc *sdesc; | ||
706 | status = DMA_ERROR; | ||
707 | list_for_each_entry(sdesc, &schan->ld_queue, node) | ||
708 | if (sdesc->cookie == cookie) { | ||
709 | status = DMA_IN_PROGRESS; | ||
710 | break; | ||
711 | } | ||
712 | } | ||
713 | |||
714 | spin_unlock_irqrestore(&schan->chan_lock, flags); | ||
715 | |||
716 | return status; | ||
717 | } | ||
718 | |||
719 | /* Called from error IRQ or NMI */ | ||
720 | bool shdma_reset(struct shdma_dev *sdev) | ||
721 | { | ||
722 | const struct shdma_ops *ops = sdev->ops; | ||
723 | struct shdma_chan *schan; | ||
724 | unsigned int handled = 0; | ||
725 | int i; | ||
726 | |||
727 | /* Reset all channels */ | ||
728 | shdma_for_each_chan(schan, sdev, i) { | ||
729 | struct shdma_desc *sdesc; | ||
730 | LIST_HEAD(dl); | ||
731 | |||
732 | if (!schan) | ||
733 | continue; | ||
734 | |||
735 | spin_lock(&schan->chan_lock); | ||
736 | |||
737 | /* Stop the channel */ | ||
738 | ops->halt_channel(schan); | ||
739 | |||
740 | list_splice_init(&schan->ld_queue, &dl); | ||
741 | |||
742 | if (!list_empty(&dl)) { | ||
743 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | ||
744 | pm_runtime_put(schan->dev); | ||
745 | } | ||
746 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
747 | |||
748 | spin_unlock(&schan->chan_lock); | ||
749 | |||
750 | /* Complete all */ | ||
751 | list_for_each_entry(sdesc, &dl, node) { | ||
752 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; | ||
753 | sdesc->mark = DESC_IDLE; | ||
754 | if (tx->callback) | ||
755 | tx->callback(tx->callback_param); | ||
756 | } | ||
757 | |||
758 | spin_lock(&schan->chan_lock); | ||
759 | list_splice(&dl, &schan->ld_free); | ||
760 | spin_unlock(&schan->chan_lock); | ||
761 | |||
762 | handled++; | ||
763 | } | ||
764 | |||
765 | return !!handled; | ||
766 | } | ||
767 | EXPORT_SYMBOL(shdma_reset); | ||
768 | |||
769 | static irqreturn_t chan_irq(int irq, void *dev) | ||
770 | { | ||
771 | struct shdma_chan *schan = dev; | ||
772 | const struct shdma_ops *ops = | ||
773 | to_shdma_dev(schan->dma_chan.device)->ops; | ||
774 | irqreturn_t ret; | ||
775 | |||
776 | spin_lock(&schan->chan_lock); | ||
777 | |||
778 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; | ||
779 | |||
780 | spin_unlock(&schan->chan_lock); | ||
781 | |||
782 | return ret; | ||
783 | } | ||
784 | |||
785 | static irqreturn_t chan_irqt(int irq, void *dev) | ||
786 | { | ||
787 | struct shdma_chan *schan = dev; | ||
788 | const struct shdma_ops *ops = | ||
789 | to_shdma_dev(schan->dma_chan.device)->ops; | ||
790 | struct shdma_desc *sdesc; | ||
791 | |||
792 | spin_lock_irq(&schan->chan_lock); | ||
793 | list_for_each_entry(sdesc, &schan->ld_queue, node) { | ||
794 | if (sdesc->mark == DESC_SUBMITTED && | ||
795 | ops->desc_completed(schan, sdesc)) { | ||
796 | dev_dbg(schan->dev, "done #%d@%p\n", | ||
797 | sdesc->async_tx.cookie, &sdesc->async_tx); | ||
798 | sdesc->mark = DESC_COMPLETED; | ||
799 | break; | ||
800 | } | ||
801 | } | ||
802 | /* Next desc */ | ||
803 | shdma_chan_xfer_ld_queue(schan); | ||
804 | spin_unlock_irq(&schan->chan_lock); | ||
805 | |||
806 | shdma_chan_ld_cleanup(schan, false); | ||
807 | |||
808 | return IRQ_HANDLED; | ||
809 | } | ||
810 | |||
811 | int shdma_request_irq(struct shdma_chan *schan, int irq, | ||
812 | unsigned long flags, const char *name) | ||
813 | { | ||
814 | int ret = request_threaded_irq(irq, chan_irq, chan_irqt, | ||
815 | flags, name, schan); | ||
816 | |||
817 | schan->irq = ret < 0 ? ret : irq; | ||
818 | |||
819 | return ret; | ||
820 | } | ||
821 | EXPORT_SYMBOL(shdma_request_irq); | ||
822 | |||
823 | void shdma_free_irq(struct shdma_chan *schan) | ||
824 | { | ||
825 | if (schan->irq >= 0) | ||
826 | free_irq(schan->irq, schan); | ||
827 | } | ||
828 | EXPORT_SYMBOL(shdma_free_irq); | ||
829 | |||
830 | void shdma_chan_probe(struct shdma_dev *sdev, | ||
831 | struct shdma_chan *schan, int id) | ||
832 | { | ||
833 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
834 | |||
835 | /* reference struct dma_device */ | ||
836 | schan->dma_chan.device = &sdev->dma_dev; | ||
837 | dma_cookie_init(&schan->dma_chan); | ||
838 | |||
839 | schan->dev = sdev->dma_dev.dev; | ||
840 | schan->id = id; | ||
841 | |||
842 | if (!schan->max_xfer_len) | ||
843 | schan->max_xfer_len = PAGE_SIZE; | ||
844 | |||
845 | spin_lock_init(&schan->chan_lock); | ||
846 | |||
847 | /* Init descripter manage list */ | ||
848 | INIT_LIST_HEAD(&schan->ld_queue); | ||
849 | INIT_LIST_HEAD(&schan->ld_free); | ||
850 | |||
851 | /* Add the channel to DMA device channel list */ | ||
852 | list_add_tail(&schan->dma_chan.device_node, | ||
853 | &sdev->dma_dev.channels); | ||
854 | sdev->schan[sdev->dma_dev.chancnt++] = schan; | ||
855 | } | ||
856 | EXPORT_SYMBOL(shdma_chan_probe); | ||
857 | |||
858 | void shdma_chan_remove(struct shdma_chan *schan) | ||
859 | { | ||
860 | list_del(&schan->dma_chan.device_node); | ||
861 | } | ||
862 | EXPORT_SYMBOL(shdma_chan_remove); | ||
863 | |||
864 | int shdma_init(struct device *dev, struct shdma_dev *sdev, | ||
865 | int chan_num) | ||
866 | { | ||
867 | struct dma_device *dma_dev = &sdev->dma_dev; | ||
868 | |||
869 | /* | ||
870 | * Require all call-backs for now, they can trivially be made optional | ||
871 | * later as required | ||
872 | */ | ||
873 | if (!sdev->ops || | ||
874 | !sdev->desc_size || | ||
875 | !sdev->ops->embedded_desc || | ||
876 | !sdev->ops->start_xfer || | ||
877 | !sdev->ops->setup_xfer || | ||
878 | !sdev->ops->set_slave || | ||
879 | !sdev->ops->desc_setup || | ||
880 | !sdev->ops->slave_addr || | ||
881 | !sdev->ops->channel_busy || | ||
882 | !sdev->ops->halt_channel || | ||
883 | !sdev->ops->desc_completed) | ||
884 | return -EINVAL; | ||
885 | |||
886 | sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); | ||
887 | if (!sdev->schan) | ||
888 | return -ENOMEM; | ||
889 | |||
890 | INIT_LIST_HEAD(&dma_dev->channels); | ||
891 | |||
892 | /* Common and MEMCPY operations */ | ||
893 | dma_dev->device_alloc_chan_resources | ||
894 | = shdma_alloc_chan_resources; | ||
895 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; | ||
896 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; | ||
897 | dma_dev->device_tx_status = shdma_tx_status; | ||
898 | dma_dev->device_issue_pending = shdma_issue_pending; | ||
899 | |||
900 | /* Compulsory for DMA_SLAVE fields */ | ||
901 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | ||
902 | dma_dev->device_control = shdma_control; | ||
903 | |||
904 | dma_dev->dev = dev; | ||
905 | |||
906 | return 0; | ||
907 | } | ||
908 | EXPORT_SYMBOL(shdma_init); | ||
909 | |||
910 | void shdma_cleanup(struct shdma_dev *sdev) | ||
911 | { | ||
912 | kfree(sdev->schan); | ||
913 | } | ||
914 | EXPORT_SYMBOL(shdma_cleanup); | ||
915 | |||
916 | static int __init shdma_enter(void) | ||
917 | { | ||
918 | shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * | ||
919 | sizeof(long), GFP_KERNEL); | ||
920 | if (!shdma_slave_used) | ||
921 | return -ENOMEM; | ||
922 | return 0; | ||
923 | } | ||
924 | module_init(shdma_enter); | ||
925 | |||
926 | static void __exit shdma_exit(void) | ||
927 | { | ||
928 | kfree(shdma_slave_used); | ||
929 | } | ||
930 | module_exit(shdma_exit); | ||
931 | |||
932 | MODULE_LICENSE("GPL v2"); | ||
933 | MODULE_DESCRIPTION("SH-DMA driver base library"); | ||
934 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); | ||
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c new file mode 100644 index 00000000000..027c9be9765 --- /dev/null +++ b/drivers/dma/sh/shdma.c | |||
@@ -0,0 +1,943 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * base is drivers/dma/flsdma.c | ||
5 | * | ||
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
10 | * | ||
11 | * This is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
17 | * - MAX DMA size is 16MB. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/dmaengine.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/sh_dma.h> | ||
30 | #include <linux/notifier.h> | ||
31 | #include <linux/kdebug.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/rculist.h> | ||
34 | |||
35 | #include "../dmaengine.h" | ||
36 | #include "shdma.h" | ||
37 | |||
38 | #define SH_DMAE_DRV_NAME "sh-dma-engine" | ||
39 | |||
40 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | ||
41 | #define LOG2_DEFAULT_XFER_SIZE 2 | ||
42 | #define SH_DMA_SLAVE_NUMBER 256 | ||
43 | #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) | ||
44 | |||
45 | /* | ||
46 | * Used for write-side mutual exclusion for the global device list, | ||
47 | * read-side synchronization by way of RCU, and per-controller data. | ||
48 | */ | ||
49 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
50 | static LIST_HEAD(sh_dmae_devices); | ||
51 | |||
52 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
53 | { | ||
54 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
55 | |||
56 | __raw_writel(data, shdev->chan_reg + | ||
57 | shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); | ||
58 | } | ||
59 | |||
60 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | ||
61 | { | ||
62 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); | ||
63 | } | ||
64 | |||
65 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | ||
66 | { | ||
67 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); | ||
68 | } | ||
69 | |||
70 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
71 | { | ||
72 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | ||
73 | |||
74 | if (shdev->pdata->dmaor_is_32bit) | ||
75 | return __raw_readl(addr); | ||
76 | else | ||
77 | return __raw_readw(addr); | ||
78 | } | ||
79 | |||
80 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
81 | { | ||
82 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | ||
83 | |||
84 | if (shdev->pdata->dmaor_is_32bit) | ||
85 | __raw_writel(data, addr); | ||
86 | else | ||
87 | __raw_writew(data, addr); | ||
88 | } | ||
89 | |||
90 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
91 | { | ||
92 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
93 | |||
94 | __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
95 | } | ||
96 | |||
97 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | ||
98 | { | ||
99 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
100 | |||
101 | return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Reset DMA controller | ||
106 | * | ||
107 | * SH7780 has two DMAOR register | ||
108 | */ | ||
109 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | ||
110 | { | ||
111 | unsigned short dmaor; | ||
112 | unsigned long flags; | ||
113 | |||
114 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
115 | |||
116 | dmaor = dmaor_read(shdev); | ||
117 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | ||
118 | |||
119 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
120 | } | ||
121 | |||
122 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | ||
123 | { | ||
124 | unsigned short dmaor; | ||
125 | unsigned long flags; | ||
126 | |||
127 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
128 | |||
129 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | ||
130 | |||
131 | if (shdev->pdata->chclr_present) { | ||
132 | int i; | ||
133 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
134 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
135 | if (sh_chan) | ||
136 | chclr_write(sh_chan, 0); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | ||
141 | |||
142 | dmaor = dmaor_read(shdev); | ||
143 | |||
144 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
145 | |||
146 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | ||
147 | dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); | ||
148 | return -EIO; | ||
149 | } | ||
150 | if (shdev->pdata->dmaor_init & ~dmaor) | ||
151 | dev_warn(shdev->shdma_dev.dma_dev.dev, | ||
152 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | ||
153 | dmaor, shdev->pdata->dmaor_init); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | ||
158 | { | ||
159 | u32 chcr = chcr_read(sh_chan); | ||
160 | |||
161 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | ||
162 | return true; /* working */ | ||
163 | |||
164 | return false; /* waiting */ | ||
165 | } | ||
166 | |||
167 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | ||
168 | { | ||
169 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
170 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
171 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
172 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
173 | |||
174 | if (cnt >= pdata->ts_shift_num) | ||
175 | cnt = 0; | ||
176 | |||
177 | return pdata->ts_shift[cnt]; | ||
178 | } | ||
179 | |||
180 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
181 | { | ||
182 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
183 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
184 | int i; | ||
185 | |||
186 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
187 | if (pdata->ts_shift[i] == l2size) | ||
188 | break; | ||
189 | |||
190 | if (i == pdata->ts_shift_num) | ||
191 | i = 0; | ||
192 | |||
193 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
194 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
195 | } | ||
196 | |||
197 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | ||
198 | { | ||
199 | sh_dmae_writel(sh_chan, hw->sar, SAR); | ||
200 | sh_dmae_writel(sh_chan, hw->dar, DAR); | ||
201 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); | ||
202 | } | ||
203 | |||
204 | static void dmae_start(struct sh_dmae_chan *sh_chan) | ||
205 | { | ||
206 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
207 | u32 chcr = chcr_read(sh_chan); | ||
208 | |||
209 | if (shdev->pdata->needs_tend_set) | ||
210 | sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); | ||
211 | |||
212 | chcr |= CHCR_DE | shdev->chcr_ie_bit; | ||
213 | chcr_write(sh_chan, chcr & ~CHCR_TE); | ||
214 | } | ||
215 | |||
216 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
217 | { | ||
218 | /* | ||
219 | * Default configuration for dual address memory-memory transfer. | ||
220 | * 0x400 represents auto-request. | ||
221 | */ | ||
222 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
223 | LOG2_DEFAULT_XFER_SIZE); | ||
224 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
225 | chcr_write(sh_chan, chcr); | ||
226 | } | ||
227 | |||
228 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | ||
229 | { | ||
230 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ | ||
231 | if (dmae_is_busy(sh_chan)) | ||
232 | return -EBUSY; | ||
233 | |||
234 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | ||
235 | chcr_write(sh_chan, val); | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | ||
241 | { | ||
242 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
243 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
244 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; | ||
245 | u16 __iomem *addr = shdev->dmars; | ||
246 | unsigned int shift = chan_pdata->dmars_bit; | ||
247 | |||
248 | if (dmae_is_busy(sh_chan)) | ||
249 | return -EBUSY; | ||
250 | |||
251 | if (pdata->no_dmars) | ||
252 | return 0; | ||
253 | |||
254 | /* in the case of a missing DMARS resource use first memory window */ | ||
255 | if (!addr) | ||
256 | addr = (u16 __iomem *)shdev->chan_reg; | ||
257 | addr += chan_pdata->dmars / sizeof(u16); | ||
258 | |||
259 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | ||
260 | addr); | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static void sh_dmae_start_xfer(struct shdma_chan *schan, | ||
266 | struct shdma_desc *sdesc) | ||
267 | { | ||
268 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
269 | shdma_chan); | ||
270 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
271 | struct sh_dmae_desc, shdma_desc); | ||
272 | dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
273 | sdesc->async_tx.cookie, sh_chan->shdma_chan.id, | ||
274 | sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); | ||
275 | /* Get the ld start address from ld_queue */ | ||
276 | dmae_set_reg(sh_chan, &sh_desc->hw); | ||
277 | dmae_start(sh_chan); | ||
278 | } | ||
279 | |||
280 | static bool sh_dmae_channel_busy(struct shdma_chan *schan) | ||
281 | { | ||
282 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
283 | shdma_chan); | ||
284 | return dmae_is_busy(sh_chan); | ||
285 | } | ||
286 | |||
287 | static void sh_dmae_setup_xfer(struct shdma_chan *schan, | ||
288 | int slave_id) | ||
289 | { | ||
290 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
291 | shdma_chan); | ||
292 | |||
293 | if (slave_id >= 0) { | ||
294 | const struct sh_dmae_slave_config *cfg = | ||
295 | sh_chan->config; | ||
296 | |||
297 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
298 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
299 | } else { | ||
300 | dmae_init(sh_chan); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | static const struct sh_dmae_slave_config *dmae_find_slave( | ||
305 | struct sh_dmae_chan *sh_chan, int slave_id) | ||
306 | { | ||
307 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
308 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
309 | const struct sh_dmae_slave_config *cfg; | ||
310 | int i; | ||
311 | |||
312 | if (slave_id >= SH_DMA_SLAVE_NUMBER) | ||
313 | return NULL; | ||
314 | |||
315 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
316 | if (cfg->slave_id == slave_id) | ||
317 | return cfg; | ||
318 | |||
319 | return NULL; | ||
320 | } | ||
321 | |||
322 | static int sh_dmae_set_slave(struct shdma_chan *schan, | ||
323 | int slave_id, bool try) | ||
324 | { | ||
325 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
326 | shdma_chan); | ||
327 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); | ||
328 | if (!cfg) | ||
329 | return -ENODEV; | ||
330 | |||
331 | if (!try) | ||
332 | sh_chan->config = cfg; | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | ||
338 | { | ||
339 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
340 | u32 chcr = chcr_read(sh_chan); | ||
341 | |||
342 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); | ||
343 | chcr_write(sh_chan, chcr); | ||
344 | } | ||
345 | |||
346 | static int sh_dmae_desc_setup(struct shdma_chan *schan, | ||
347 | struct shdma_desc *sdesc, | ||
348 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
349 | { | ||
350 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
351 | struct sh_dmae_desc, shdma_desc); | ||
352 | |||
353 | if (*len > schan->max_xfer_len) | ||
354 | *len = schan->max_xfer_len; | ||
355 | |||
356 | sh_desc->hw.sar = src; | ||
357 | sh_desc->hw.dar = dst; | ||
358 | sh_desc->hw.tcr = *len; | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void sh_dmae_halt(struct shdma_chan *schan) | ||
364 | { | ||
365 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
366 | shdma_chan); | ||
367 | dmae_halt(sh_chan); | ||
368 | } | ||
369 | |||
370 | static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
371 | { | ||
372 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
373 | shdma_chan); | ||
374 | |||
375 | if (!(chcr_read(sh_chan) & CHCR_TE)) | ||
376 | return false; | ||
377 | |||
378 | /* DMA stop */ | ||
379 | dmae_halt(sh_chan); | ||
380 | |||
381 | return true; | ||
382 | } | ||
383 | |||
384 | /* Called from error IRQ or NMI */ | ||
385 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | ||
386 | { | ||
387 | bool ret; | ||
388 | |||
389 | /* halt the dma controller */ | ||
390 | sh_dmae_ctl_stop(shdev); | ||
391 | |||
392 | /* We cannot detect, which channel caused the error, have to reset all */ | ||
393 | ret = shdma_reset(&shdev->shdma_dev); | ||
394 | |||
395 | sh_dmae_rst(shdev); | ||
396 | |||
397 | return ret; | ||
398 | } | ||
399 | |||
400 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
401 | { | ||
402 | struct sh_dmae_device *shdev = data; | ||
403 | |||
404 | if (!(dmaor_read(shdev) & DMAOR_AE)) | ||
405 | return IRQ_NONE; | ||
406 | |||
407 | sh_dmae_reset(shdev); | ||
408 | return IRQ_HANDLED; | ||
409 | } | ||
410 | |||
411 | static bool sh_dmae_desc_completed(struct shdma_chan *schan, | ||
412 | struct shdma_desc *sdesc) | ||
413 | { | ||
414 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
415 | struct sh_dmae_chan, shdma_chan); | ||
416 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
417 | struct sh_dmae_desc, shdma_desc); | ||
418 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | ||
419 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
420 | |||
421 | return (sdesc->direction == DMA_DEV_TO_MEM && | ||
422 | (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || | ||
423 | (sdesc->direction != DMA_DEV_TO_MEM && | ||
424 | (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); | ||
425 | } | ||
426 | |||
427 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
428 | { | ||
429 | /* Fast path out if NMIF is not asserted for this controller */ | ||
430 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
431 | return false; | ||
432 | |||
433 | return sh_dmae_reset(shdev); | ||
434 | } | ||
435 | |||
436 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
437 | unsigned long cmd, void *data) | ||
438 | { | ||
439 | struct sh_dmae_device *shdev; | ||
440 | int ret = NOTIFY_DONE; | ||
441 | bool triggered; | ||
442 | |||
443 | /* | ||
444 | * Only concern ourselves with NMI events. | ||
445 | * | ||
446 | * Normally we would check the die chain value, but as this needs | ||
447 | * to be architecture independent, check for NMI context instead. | ||
448 | */ | ||
449 | if (!in_nmi()) | ||
450 | return NOTIFY_DONE; | ||
451 | |||
452 | rcu_read_lock(); | ||
453 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
454 | /* | ||
455 | * Only stop if one of the controllers has NMIF asserted, | ||
456 | * we do not want to interfere with regular address error | ||
457 | * handling or NMI events that don't concern the DMACs. | ||
458 | */ | ||
459 | triggered = sh_dmae_nmi_notify(shdev); | ||
460 | if (triggered == true) | ||
461 | ret = NOTIFY_OK; | ||
462 | } | ||
463 | rcu_read_unlock(); | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
469 | .notifier_call = sh_dmae_nmi_handler, | ||
470 | |||
471 | /* Run before NMI debug handler and KGDB */ | ||
472 | .priority = 1, | ||
473 | }; | ||
474 | |||
475 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | ||
476 | int irq, unsigned long flags) | ||
477 | { | ||
478 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | ||
479 | struct shdma_dev *sdev = &shdev->shdma_dev; | ||
480 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); | ||
481 | struct sh_dmae_chan *sh_chan; | ||
482 | struct shdma_chan *schan; | ||
483 | int err; | ||
484 | |||
485 | sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | ||
486 | if (!sh_chan) { | ||
487 | dev_err(sdev->dma_dev.dev, | ||
488 | "No free memory for allocating dma channels!\n"); | ||
489 | return -ENOMEM; | ||
490 | } | ||
491 | |||
492 | schan = &sh_chan->shdma_chan; | ||
493 | schan->max_xfer_len = SH_DMA_TCR_MAX + 1; | ||
494 | |||
495 | shdma_chan_probe(sdev, schan, id); | ||
496 | |||
497 | sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
498 | |||
499 | /* set up channel irq */ | ||
500 | if (pdev->id >= 0) | ||
501 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), | ||
502 | "sh-dmae%d.%d", pdev->id, id); | ||
503 | else | ||
504 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), | ||
505 | "sh-dma%d", id); | ||
506 | |||
507 | err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); | ||
508 | if (err) { | ||
509 | dev_err(sdev->dma_dev.dev, | ||
510 | "DMA channel %d request_irq error %d\n", | ||
511 | id, err); | ||
512 | goto err_no_irq; | ||
513 | } | ||
514 | |||
515 | shdev->chan[id] = sh_chan; | ||
516 | return 0; | ||
517 | |||
518 | err_no_irq: | ||
519 | /* remove from dmaengine device node */ | ||
520 | shdma_chan_remove(schan); | ||
521 | kfree(sh_chan); | ||
522 | return err; | ||
523 | } | ||
524 | |||
525 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | ||
526 | { | ||
527 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
528 | struct shdma_chan *schan; | ||
529 | int i; | ||
530 | |||
531 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { | ||
532 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
533 | struct sh_dmae_chan, shdma_chan); | ||
534 | BUG_ON(!schan); | ||
535 | |||
536 | shdma_free_irq(&sh_chan->shdma_chan); | ||
537 | |||
538 | shdma_chan_remove(schan); | ||
539 | kfree(sh_chan); | ||
540 | } | ||
541 | dma_dev->chancnt = 0; | ||
542 | } | ||
543 | |||
544 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
545 | { | ||
546 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
547 | sh_dmae_ctl_stop(shdev); | ||
548 | } | ||
549 | |||
550 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
551 | { | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static int sh_dmae_runtime_resume(struct device *dev) | ||
556 | { | ||
557 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
558 | |||
559 | return sh_dmae_rst(shdev); | ||
560 | } | ||
561 | |||
562 | #ifdef CONFIG_PM | ||
563 | static int sh_dmae_suspend(struct device *dev) | ||
564 | { | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static int sh_dmae_resume(struct device *dev) | ||
569 | { | ||
570 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
571 | int i, ret; | ||
572 | |||
573 | ret = sh_dmae_rst(shdev); | ||
574 | if (ret < 0) | ||
575 | dev_err(dev, "Failed to reset!\n"); | ||
576 | |||
577 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
578 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
579 | |||
580 | if (!sh_chan->shdma_chan.desc_num) | ||
581 | continue; | ||
582 | |||
583 | if (sh_chan->shdma_chan.slave_id >= 0) { | ||
584 | const struct sh_dmae_slave_config *cfg = sh_chan->config; | ||
585 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
586 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
587 | } else { | ||
588 | dmae_init(sh_chan); | ||
589 | } | ||
590 | } | ||
591 | |||
592 | return 0; | ||
593 | } | ||
594 | #else | ||
595 | #define sh_dmae_suspend NULL | ||
596 | #define sh_dmae_resume NULL | ||
597 | #endif | ||
598 | |||
599 | const struct dev_pm_ops sh_dmae_pm = { | ||
600 | .suspend = sh_dmae_suspend, | ||
601 | .resume = sh_dmae_resume, | ||
602 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
603 | .runtime_resume = sh_dmae_runtime_resume, | ||
604 | }; | ||
605 | |||
606 | static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) | ||
607 | { | ||
608 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
609 | struct sh_dmae_chan, shdma_chan); | ||
610 | |||
611 | /* | ||
612 | * Implicit BUG_ON(!sh_chan->config) | ||
613 | * This is an exclusive slave DMA operation, may only be called after a | ||
614 | * successful slave configuration. | ||
615 | */ | ||
616 | return sh_chan->config->addr; | ||
617 | } | ||
618 | |||
619 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) | ||
620 | { | ||
621 | return &((struct sh_dmae_desc *)buf)[i].shdma_desc; | ||
622 | } | ||
623 | |||
624 | static const struct shdma_ops sh_dmae_shdma_ops = { | ||
625 | .desc_completed = sh_dmae_desc_completed, | ||
626 | .halt_channel = sh_dmae_halt, | ||
627 | .channel_busy = sh_dmae_channel_busy, | ||
628 | .slave_addr = sh_dmae_slave_addr, | ||
629 | .desc_setup = sh_dmae_desc_setup, | ||
630 | .set_slave = sh_dmae_set_slave, | ||
631 | .setup_xfer = sh_dmae_setup_xfer, | ||
632 | .start_xfer = sh_dmae_start_xfer, | ||
633 | .embedded_desc = sh_dmae_embedded_desc, | ||
634 | .chan_irq = sh_dmae_chan_irq, | ||
635 | }; | ||
636 | |||
637 | static int __devinit sh_dmae_probe(struct platform_device *pdev) | ||
638 | { | ||
639 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | ||
640 | unsigned long irqflags = IRQF_DISABLED, | ||
641 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | ||
642 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; | ||
643 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | ||
644 | struct sh_dmae_device *shdev; | ||
645 | struct dma_device *dma_dev; | ||
646 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
647 | |||
648 | /* get platform data */ | ||
649 | if (!pdata || !pdata->channel_num) | ||
650 | return -ENODEV; | ||
651 | |||
652 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
653 | /* DMARS area is optional */ | ||
654 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
655 | /* | ||
656 | * IRQ resources: | ||
657 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
658 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
659 | * start == end. If it is the only IRQ resource, all channels also | ||
660 | * use the same IRQ. | ||
661 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
662 | * ranges (start != end) | ||
663 | * 3. iff all events (channels and, optionally, error) on this | ||
664 | * controller use the same IRQ, only one IRQ resource can be | ||
665 | * specified, otherwise there must be one IRQ per channel, even if | ||
666 | * some of them are equal | ||
667 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
668 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
669 | * requested with the IRQF_SHARED flag | ||
670 | */ | ||
671 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
672 | if (!chan || !errirq_res) | ||
673 | return -ENODEV; | ||
674 | |||
675 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
676 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
677 | return -EBUSY; | ||
678 | } | ||
679 | |||
680 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
681 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
682 | err = -EBUSY; | ||
683 | goto ermrdmars; | ||
684 | } | ||
685 | |||
686 | err = -ENOMEM; | ||
687 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | ||
688 | if (!shdev) { | ||
689 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
690 | goto ealloc; | ||
691 | } | ||
692 | |||
693 | dma_dev = &shdev->shdma_dev.dma_dev; | ||
694 | |||
695 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
696 | if (!shdev->chan_reg) | ||
697 | goto emapchan; | ||
698 | if (dmars) { | ||
699 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
700 | if (!shdev->dmars) | ||
701 | goto emapdmars; | ||
702 | } | ||
703 | |||
704 | if (!pdata->slave_only) | ||
705 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
706 | if (pdata->slave && pdata->slave_num) | ||
707 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
708 | |||
709 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
710 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
711 | |||
712 | shdev->shdma_dev.ops = &sh_dmae_shdma_ops; | ||
713 | shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); | ||
714 | err = shdma_init(&pdev->dev, &shdev->shdma_dev, | ||
715 | pdata->channel_num); | ||
716 | if (err < 0) | ||
717 | goto eshdma; | ||
718 | |||
719 | /* platform data */ | ||
720 | shdev->pdata = pdev->dev.platform_data; | ||
721 | |||
722 | if (pdata->chcr_offset) | ||
723 | shdev->chcr_offset = pdata->chcr_offset; | ||
724 | else | ||
725 | shdev->chcr_offset = CHCR; | ||
726 | |||
727 | if (pdata->chcr_ie_bit) | ||
728 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; | ||
729 | else | ||
730 | shdev->chcr_ie_bit = CHCR_IE; | ||
731 | |||
732 | platform_set_drvdata(pdev, shdev); | ||
733 | |||
734 | pm_runtime_enable(&pdev->dev); | ||
735 | err = pm_runtime_get_sync(&pdev->dev); | ||
736 | if (err < 0) | ||
737 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
738 | |||
739 | spin_lock_irq(&sh_dmae_lock); | ||
740 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
741 | spin_unlock_irq(&sh_dmae_lock); | ||
742 | |||
743 | /* reset dma controller - only needed as a test */ | ||
744 | err = sh_dmae_rst(shdev); | ||
745 | if (err) | ||
746 | goto rst_err; | ||
747 | |||
748 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
749 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | ||
750 | |||
751 | if (!chanirq_res) | ||
752 | chanirq_res = errirq_res; | ||
753 | else | ||
754 | irqres++; | ||
755 | |||
756 | if (chanirq_res == errirq_res || | ||
757 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
758 | irqflags = IRQF_SHARED; | ||
759 | |||
760 | errirq = errirq_res->start; | ||
761 | |||
762 | err = request_irq(errirq, sh_dmae_err, irqflags, | ||
763 | "DMAC Address Error", shdev); | ||
764 | if (err) { | ||
765 | dev_err(&pdev->dev, | ||
766 | "DMA failed requesting irq #%d, error %d\n", | ||
767 | errirq, err); | ||
768 | goto eirq_err; | ||
769 | } | ||
770 | |||
771 | #else | ||
772 | chanirq_res = errirq_res; | ||
773 | #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ | ||
774 | |||
775 | if (chanirq_res->start == chanirq_res->end && | ||
776 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | ||
777 | /* Special case - all multiplexed */ | ||
778 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | ||
779 | if (irq_cnt < SH_DMAE_MAX_CHANNELS) { | ||
780 | chan_irq[irq_cnt] = chanirq_res->start; | ||
781 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
782 | } else { | ||
783 | irq_cap = 1; | ||
784 | break; | ||
785 | } | ||
786 | } | ||
787 | } else { | ||
788 | do { | ||
789 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
790 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { | ||
791 | irq_cap = 1; | ||
792 | break; | ||
793 | } | ||
794 | |||
795 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
796 | IORESOURCE_IRQ_SHAREABLE) | ||
797 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
798 | else | ||
799 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
800 | dev_dbg(&pdev->dev, | ||
801 | "Found IRQ %d for channel %d\n", | ||
802 | i, irq_cnt); | ||
803 | chan_irq[irq_cnt++] = i; | ||
804 | } | ||
805 | |||
806 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) | ||
807 | break; | ||
808 | |||
809 | chanirq_res = platform_get_resource(pdev, | ||
810 | IORESOURCE_IRQ, ++irqres); | ||
811 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
812 | } | ||
813 | |||
814 | /* Create DMA Channel */ | ||
815 | for (i = 0; i < irq_cnt; i++) { | ||
816 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | ||
817 | if (err) | ||
818 | goto chan_probe_err; | ||
819 | } | ||
820 | |||
821 | if (irq_cap) | ||
822 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | ||
823 | "channels when a maximum of %d are supported.\n", | ||
824 | pdata->channel_num, SH_DMAE_MAX_CHANNELS); | ||
825 | |||
826 | pm_runtime_put(&pdev->dev); | ||
827 | |||
828 | err = dma_async_device_register(&shdev->shdma_dev.dma_dev); | ||
829 | if (err < 0) | ||
830 | goto edmadevreg; | ||
831 | |||
832 | return err; | ||
833 | |||
834 | edmadevreg: | ||
835 | pm_runtime_get(&pdev->dev); | ||
836 | |||
837 | chan_probe_err: | ||
838 | sh_dmae_chan_remove(shdev); | ||
839 | |||
840 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
841 | free_irq(errirq, shdev); | ||
842 | eirq_err: | ||
843 | #endif | ||
844 | rst_err: | ||
845 | spin_lock_irq(&sh_dmae_lock); | ||
846 | list_del_rcu(&shdev->node); | ||
847 | spin_unlock_irq(&sh_dmae_lock); | ||
848 | |||
849 | pm_runtime_put(&pdev->dev); | ||
850 | pm_runtime_disable(&pdev->dev); | ||
851 | |||
852 | platform_set_drvdata(pdev, NULL); | ||
853 | shdma_cleanup(&shdev->shdma_dev); | ||
854 | eshdma: | ||
855 | if (dmars) | ||
856 | iounmap(shdev->dmars); | ||
857 | emapdmars: | ||
858 | iounmap(shdev->chan_reg); | ||
859 | synchronize_rcu(); | ||
860 | emapchan: | ||
861 | kfree(shdev); | ||
862 | ealloc: | ||
863 | if (dmars) | ||
864 | release_mem_region(dmars->start, resource_size(dmars)); | ||
865 | ermrdmars: | ||
866 | release_mem_region(chan->start, resource_size(chan)); | ||
867 | |||
868 | return err; | ||
869 | } | ||
870 | |||
871 | static int __devexit sh_dmae_remove(struct platform_device *pdev) | ||
872 | { | ||
873 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
874 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
875 | struct resource *res; | ||
876 | int errirq = platform_get_irq(pdev, 0); | ||
877 | |||
878 | dma_async_device_unregister(dma_dev); | ||
879 | |||
880 | if (errirq > 0) | ||
881 | free_irq(errirq, shdev); | ||
882 | |||
883 | spin_lock_irq(&sh_dmae_lock); | ||
884 | list_del_rcu(&shdev->node); | ||
885 | spin_unlock_irq(&sh_dmae_lock); | ||
886 | |||
887 | pm_runtime_disable(&pdev->dev); | ||
888 | |||
889 | sh_dmae_chan_remove(shdev); | ||
890 | shdma_cleanup(&shdev->shdma_dev); | ||
891 | |||
892 | if (shdev->dmars) | ||
893 | iounmap(shdev->dmars); | ||
894 | iounmap(shdev->chan_reg); | ||
895 | |||
896 | platform_set_drvdata(pdev, NULL); | ||
897 | |||
898 | synchronize_rcu(); | ||
899 | kfree(shdev); | ||
900 | |||
901 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
902 | if (res) | ||
903 | release_mem_region(res->start, resource_size(res)); | ||
904 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
905 | if (res) | ||
906 | release_mem_region(res->start, resource_size(res)); | ||
907 | |||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | static struct platform_driver sh_dmae_driver = { | ||
912 | .driver = { | ||
913 | .owner = THIS_MODULE, | ||
914 | .pm = &sh_dmae_pm, | ||
915 | .name = SH_DMAE_DRV_NAME, | ||
916 | }, | ||
917 | .remove = __devexit_p(sh_dmae_remove), | ||
918 | .shutdown = sh_dmae_shutdown, | ||
919 | }; | ||
920 | |||
921 | static int __init sh_dmae_init(void) | ||
922 | { | ||
923 | /* Wire up NMI handling */ | ||
924 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
925 | if (err) | ||
926 | return err; | ||
927 | |||
928 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | ||
929 | } | ||
930 | module_init(sh_dmae_init); | ||
931 | |||
932 | static void __exit sh_dmae_exit(void) | ||
933 | { | ||
934 | platform_driver_unregister(&sh_dmae_driver); | ||
935 | |||
936 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
937 | } | ||
938 | module_exit(sh_dmae_exit); | ||
939 | |||
940 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | ||
941 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | ||
942 | MODULE_LICENSE("GPL"); | ||
943 | MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); | ||
diff --git a/drivers/dma/shdma.h b/drivers/dma/sh/shdma.h index 0b1d2c105f0..9314e93225d 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/sh/shdma.h | |||
@@ -13,42 +13,29 @@ | |||
13 | #ifndef __DMA_SHDMA_H | 13 | #ifndef __DMA_SHDMA_H |
14 | #define __DMA_SHDMA_H | 14 | #define __DMA_SHDMA_H |
15 | 15 | ||
16 | #include <linux/sh_dma.h> | ||
17 | #include <linux/shdma-base.h> | ||
16 | #include <linux/dmaengine.h> | 18 | #include <linux/dmaengine.h> |
17 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
18 | #include <linux/list.h> | 20 | #include <linux/list.h> |
19 | 21 | ||
20 | #define SH_DMAC_MAX_CHANNELS 20 | 22 | #define SH_DMAE_MAX_CHANNELS 20 |
21 | #define SH_DMA_SLAVE_NUMBER 256 | 23 | #define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */ |
22 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | ||
23 | 24 | ||
24 | struct device; | 25 | struct device; |
25 | 26 | ||
26 | enum dmae_pm_state { | ||
27 | DMAE_PM_ESTABLISHED, | ||
28 | DMAE_PM_BUSY, | ||
29 | DMAE_PM_PENDING, | ||
30 | }; | ||
31 | |||
32 | struct sh_dmae_chan { | 27 | struct sh_dmae_chan { |
33 | spinlock_t desc_lock; /* Descriptor operation lock */ | 28 | struct shdma_chan shdma_chan; |
34 | struct list_head ld_queue; /* Link descriptors queue */ | 29 | const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ |
35 | struct list_head ld_free; /* Link descriptors free */ | ||
36 | struct dma_chan common; /* DMA common channel */ | ||
37 | struct device *dev; /* Channel device */ | ||
38 | struct tasklet_struct tasklet; /* Tasklet */ | ||
39 | int descs_allocated; /* desc count */ | ||
40 | int xmit_shift; /* log_2(bytes_per_xfer) */ | 30 | int xmit_shift; /* log_2(bytes_per_xfer) */ |
41 | int irq; | ||
42 | int id; /* Raw id of this channel */ | ||
43 | u32 __iomem *base; | 31 | u32 __iomem *base; |
44 | char dev_id[16]; /* unique name per DMAC of channel */ | 32 | char dev_id[16]; /* unique name per DMAC of channel */ |
45 | int pm_error; | 33 | int pm_error; |
46 | enum dmae_pm_state pm_state; | ||
47 | }; | 34 | }; |
48 | 35 | ||
49 | struct sh_dmae_device { | 36 | struct sh_dmae_device { |
50 | struct dma_device common; | 37 | struct shdma_dev shdma_dev; |
51 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; | 38 | struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; |
52 | struct sh_dmae_pdata *pdata; | 39 | struct sh_dmae_pdata *pdata; |
53 | struct list_head node; | 40 | struct list_head node; |
54 | u32 __iomem *chan_reg; | 41 | u32 __iomem *chan_reg; |
@@ -57,10 +44,21 @@ struct sh_dmae_device { | |||
57 | u32 chcr_ie_bit; | 44 | u32 chcr_ie_bit; |
58 | }; | 45 | }; |
59 | 46 | ||
60 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | 47 | struct sh_dmae_regs { |
48 | u32 sar; /* SAR / source address */ | ||
49 | u32 dar; /* DAR / destination address */ | ||
50 | u32 tcr; /* TCR / transfer count */ | ||
51 | }; | ||
52 | |||
53 | struct sh_dmae_desc { | ||
54 | struct sh_dmae_regs hw; | ||
55 | struct shdma_desc shdma_desc; | ||
56 | }; | ||
57 | |||
58 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan) | ||
61 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) | 59 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) |
62 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) | 60 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) |
63 | #define to_sh_dev(chan) container_of(chan->common.device,\ | 61 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ |
64 | struct sh_dmae_device, common) | 62 | struct sh_dmae_device, shdma_dev.dma_dev) |
65 | 63 | ||
66 | #endif /* __DMA_SHDMA_H */ | 64 | #endif /* __DMA_SHDMA_H */ |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c deleted file mode 100644 index 19d7a8d3975..00000000000 --- a/drivers/dma/shdma.c +++ /dev/null | |||
@@ -1,1524 +0,0 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * base is drivers/dma/flsdma.c | ||
5 | * | ||
6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
9 | * | ||
10 | * This is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
16 | * - MAX DMA size is 16MB. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/pm_runtime.h> | ||
28 | #include <linux/sh_dma.h> | ||
29 | #include <linux/notifier.h> | ||
30 | #include <linux/kdebug.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/rculist.h> | ||
33 | |||
34 | #include "dmaengine.h" | ||
35 | #include "shdma.h" | ||
36 | |||
37 | /* DMA descriptor control */ | ||
38 | enum sh_dmae_desc_status { | ||
39 | DESC_IDLE, | ||
40 | DESC_PREPARED, | ||
41 | DESC_SUBMITTED, | ||
42 | DESC_COMPLETED, /* completed, have to call callback */ | ||
43 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | ||
44 | }; | ||
45 | |||
46 | #define NR_DESCS_PER_CHANNEL 32 | ||
47 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | ||
48 | #define LOG2_DEFAULT_XFER_SIZE 2 | ||
49 | |||
50 | /* | ||
51 | * Used for write-side mutual exclusion for the global device list, | ||
52 | * read-side synchronization by way of RCU, and per-controller data. | ||
53 | */ | ||
54 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
55 | static LIST_HEAD(sh_dmae_devices); | ||
56 | |||
57 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | ||
58 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | ||
59 | |||
60 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | ||
61 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); | ||
62 | |||
63 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
64 | { | ||
65 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
66 | |||
67 | __raw_writel(data, shdev->chan_reg + | ||
68 | shdev->pdata->channel[sh_dc->id].chclr_offset); | ||
69 | } | ||
70 | |||
71 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | ||
72 | { | ||
73 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); | ||
74 | } | ||
75 | |||
76 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | ||
77 | { | ||
78 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); | ||
79 | } | ||
80 | |||
81 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
82 | { | ||
83 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | ||
84 | |||
85 | if (shdev->pdata->dmaor_is_32bit) | ||
86 | return __raw_readl(addr); | ||
87 | else | ||
88 | return __raw_readw(addr); | ||
89 | } | ||
90 | |||
91 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
92 | { | ||
93 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | ||
94 | |||
95 | if (shdev->pdata->dmaor_is_32bit) | ||
96 | __raw_writel(data, addr); | ||
97 | else | ||
98 | __raw_writew(data, addr); | ||
99 | } | ||
100 | |||
101 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
102 | { | ||
103 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
104 | |||
105 | __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
106 | } | ||
107 | |||
108 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | ||
109 | { | ||
110 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
111 | |||
112 | return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Reset DMA controller | ||
117 | * | ||
118 | * SH7780 has two DMAOR register | ||
119 | */ | ||
120 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | ||
121 | { | ||
122 | unsigned short dmaor; | ||
123 | unsigned long flags; | ||
124 | |||
125 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
126 | |||
127 | dmaor = dmaor_read(shdev); | ||
128 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | ||
129 | |||
130 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
131 | } | ||
132 | |||
133 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | ||
134 | { | ||
135 | unsigned short dmaor; | ||
136 | unsigned long flags; | ||
137 | |||
138 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
139 | |||
140 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | ||
141 | |||
142 | if (shdev->pdata->chclr_present) { | ||
143 | int i; | ||
144 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
145 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
146 | if (sh_chan) | ||
147 | chclr_write(sh_chan, 0); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | ||
152 | |||
153 | dmaor = dmaor_read(shdev); | ||
154 | |||
155 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
156 | |||
157 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | ||
158 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | ||
159 | return -EIO; | ||
160 | } | ||
161 | if (shdev->pdata->dmaor_init & ~dmaor) | ||
162 | dev_warn(shdev->common.dev, | ||
163 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | ||
164 | dmaor, shdev->pdata->dmaor_init); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | ||
169 | { | ||
170 | u32 chcr = chcr_read(sh_chan); | ||
171 | |||
172 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | ||
173 | return true; /* working */ | ||
174 | |||
175 | return false; /* waiting */ | ||
176 | } | ||
177 | |||
178 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | ||
179 | { | ||
180 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
181 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
182 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
183 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
184 | |||
185 | if (cnt >= pdata->ts_shift_num) | ||
186 | cnt = 0; | ||
187 | |||
188 | return pdata->ts_shift[cnt]; | ||
189 | } | ||
190 | |||
191 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
192 | { | ||
193 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
194 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
195 | int i; | ||
196 | |||
197 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
198 | if (pdata->ts_shift[i] == l2size) | ||
199 | break; | ||
200 | |||
201 | if (i == pdata->ts_shift_num) | ||
202 | i = 0; | ||
203 | |||
204 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
205 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
206 | } | ||
207 | |||
208 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | ||
209 | { | ||
210 | sh_dmae_writel(sh_chan, hw->sar, SAR); | ||
211 | sh_dmae_writel(sh_chan, hw->dar, DAR); | ||
212 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); | ||
213 | } | ||
214 | |||
215 | static void dmae_start(struct sh_dmae_chan *sh_chan) | ||
216 | { | ||
217 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
218 | u32 chcr = chcr_read(sh_chan); | ||
219 | |||
220 | if (shdev->pdata->needs_tend_set) | ||
221 | sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); | ||
222 | |||
223 | chcr |= CHCR_DE | shdev->chcr_ie_bit; | ||
224 | chcr_write(sh_chan, chcr & ~CHCR_TE); | ||
225 | } | ||
226 | |||
227 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | ||
228 | { | ||
229 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
230 | u32 chcr = chcr_read(sh_chan); | ||
231 | |||
232 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); | ||
233 | chcr_write(sh_chan, chcr); | ||
234 | } | ||
235 | |||
236 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
237 | { | ||
238 | /* | ||
239 | * Default configuration for dual address memory-memory transfer. | ||
240 | * 0x400 represents auto-request. | ||
241 | */ | ||
242 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
243 | LOG2_DEFAULT_XFER_SIZE); | ||
244 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
245 | chcr_write(sh_chan, chcr); | ||
246 | } | ||
247 | |||
248 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | ||
249 | { | ||
250 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ | ||
251 | if (dmae_is_busy(sh_chan)) | ||
252 | return -EBUSY; | ||
253 | |||
254 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | ||
255 | chcr_write(sh_chan, val); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | ||
261 | { | ||
262 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
263 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
264 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | ||
265 | u16 __iomem *addr = shdev->dmars; | ||
266 | unsigned int shift = chan_pdata->dmars_bit; | ||
267 | |||
268 | if (dmae_is_busy(sh_chan)) | ||
269 | return -EBUSY; | ||
270 | |||
271 | if (pdata->no_dmars) | ||
272 | return 0; | ||
273 | |||
274 | /* in the case of a missing DMARS resource use first memory window */ | ||
275 | if (!addr) | ||
276 | addr = (u16 __iomem *)shdev->chan_reg; | ||
277 | addr += chan_pdata->dmars / sizeof(u16); | ||
278 | |||
279 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | ||
280 | addr); | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | ||
286 | { | ||
287 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; | ||
288 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | ||
289 | struct sh_dmae_slave *param = tx->chan->private; | ||
290 | dma_async_tx_callback callback = tx->callback; | ||
291 | dma_cookie_t cookie; | ||
292 | bool power_up; | ||
293 | |||
294 | spin_lock_irq(&sh_chan->desc_lock); | ||
295 | |||
296 | if (list_empty(&sh_chan->ld_queue)) | ||
297 | power_up = true; | ||
298 | else | ||
299 | power_up = false; | ||
300 | |||
301 | cookie = dma_cookie_assign(tx); | ||
302 | |||
303 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | ||
304 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | ||
305 | /* | ||
306 | * All chunks are on the global ld_free, so, we have to find | ||
307 | * the end of the chain ourselves | ||
308 | */ | ||
309 | if (chunk != desc && (chunk->mark == DESC_IDLE || | ||
310 | chunk->async_tx.cookie > 0 || | ||
311 | chunk->async_tx.cookie == -EBUSY || | ||
312 | &chunk->node == &sh_chan->ld_free)) | ||
313 | break; | ||
314 | chunk->mark = DESC_SUBMITTED; | ||
315 | /* Callback goes to the last chunk */ | ||
316 | chunk->async_tx.callback = NULL; | ||
317 | chunk->cookie = cookie; | ||
318 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | ||
319 | last = chunk; | ||
320 | } | ||
321 | |||
322 | last->async_tx.callback = callback; | ||
323 | last->async_tx.callback_param = tx->callback_param; | ||
324 | |||
325 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", | ||
326 | tx->cookie, &last->async_tx, sh_chan->id, | ||
327 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | ||
328 | |||
329 | if (power_up) { | ||
330 | sh_chan->pm_state = DMAE_PM_BUSY; | ||
331 | |||
332 | pm_runtime_get(sh_chan->dev); | ||
333 | |||
334 | spin_unlock_irq(&sh_chan->desc_lock); | ||
335 | |||
336 | pm_runtime_barrier(sh_chan->dev); | ||
337 | |||
338 | spin_lock_irq(&sh_chan->desc_lock); | ||
339 | |||
340 | /* Have we been reset, while waiting? */ | ||
341 | if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { | ||
342 | dev_dbg(sh_chan->dev, "Bring up channel %d\n", | ||
343 | sh_chan->id); | ||
344 | if (param) { | ||
345 | const struct sh_dmae_slave_config *cfg = | ||
346 | param->config; | ||
347 | |||
348 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
349 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
350 | } else { | ||
351 | dmae_init(sh_chan); | ||
352 | } | ||
353 | |||
354 | if (sh_chan->pm_state == DMAE_PM_PENDING) | ||
355 | sh_chan_xfer_ld_queue(sh_chan); | ||
356 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; | ||
357 | } | ||
358 | } else { | ||
359 | sh_chan->pm_state = DMAE_PM_PENDING; | ||
360 | } | ||
361 | |||
362 | spin_unlock_irq(&sh_chan->desc_lock); | ||
363 | |||
364 | return cookie; | ||
365 | } | ||
366 | |||
367 | /* Called with desc_lock held */ | ||
368 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | ||
369 | { | ||
370 | struct sh_desc *desc; | ||
371 | |||
372 | list_for_each_entry(desc, &sh_chan->ld_free, node) | ||
373 | if (desc->mark != DESC_PREPARED) { | ||
374 | BUG_ON(desc->mark != DESC_IDLE); | ||
375 | list_del(&desc->node); | ||
376 | return desc; | ||
377 | } | ||
378 | |||
379 | return NULL; | ||
380 | } | ||
381 | |||
382 | static const struct sh_dmae_slave_config *sh_dmae_find_slave( | ||
383 | struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) | ||
384 | { | ||
385 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
386 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
387 | int i; | ||
388 | |||
389 | if (param->slave_id >= SH_DMA_SLAVE_NUMBER) | ||
390 | return NULL; | ||
391 | |||
392 | for (i = 0; i < pdata->slave_num; i++) | ||
393 | if (pdata->slave[i].slave_id == param->slave_id) | ||
394 | return pdata->slave + i; | ||
395 | |||
396 | return NULL; | ||
397 | } | ||
398 | |||
399 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | ||
400 | { | ||
401 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
402 | struct sh_desc *desc; | ||
403 | struct sh_dmae_slave *param = chan->private; | ||
404 | int ret; | ||
405 | |||
406 | /* | ||
407 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
408 | * never runs concurrently with itself or free_chan_resources. | ||
409 | */ | ||
410 | if (param) { | ||
411 | const struct sh_dmae_slave_config *cfg; | ||
412 | |||
413 | cfg = sh_dmae_find_slave(sh_chan, param); | ||
414 | if (!cfg) { | ||
415 | ret = -EINVAL; | ||
416 | goto efindslave; | ||
417 | } | ||
418 | |||
419 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { | ||
420 | ret = -EBUSY; | ||
421 | goto etestused; | ||
422 | } | ||
423 | |||
424 | param->config = cfg; | ||
425 | } | ||
426 | |||
427 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
428 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); | ||
429 | if (!desc) | ||
430 | break; | ||
431 | dma_async_tx_descriptor_init(&desc->async_tx, | ||
432 | &sh_chan->common); | ||
433 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | ||
434 | desc->mark = DESC_IDLE; | ||
435 | |||
436 | list_add(&desc->node, &sh_chan->ld_free); | ||
437 | sh_chan->descs_allocated++; | ||
438 | } | ||
439 | |||
440 | if (!sh_chan->descs_allocated) { | ||
441 | ret = -ENOMEM; | ||
442 | goto edescalloc; | ||
443 | } | ||
444 | |||
445 | return sh_chan->descs_allocated; | ||
446 | |||
447 | edescalloc: | ||
448 | if (param) | ||
449 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
450 | etestused: | ||
451 | efindslave: | ||
452 | chan->private = NULL; | ||
453 | return ret; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * sh_dma_free_chan_resources - Free all resources of the channel. | ||
458 | */ | ||
459 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) | ||
460 | { | ||
461 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
462 | struct sh_desc *desc, *_desc; | ||
463 | LIST_HEAD(list); | ||
464 | |||
465 | /* Protect against ISR */ | ||
466 | spin_lock_irq(&sh_chan->desc_lock); | ||
467 | dmae_halt(sh_chan); | ||
468 | spin_unlock_irq(&sh_chan->desc_lock); | ||
469 | |||
470 | /* Now no new interrupts will occur */ | ||
471 | |||
472 | /* Prepared and not submitted descriptors can still be on the queue */ | ||
473 | if (!list_empty(&sh_chan->ld_queue)) | ||
474 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
475 | |||
476 | if (chan->private) { | ||
477 | /* The caller is holding dma_list_mutex */ | ||
478 | struct sh_dmae_slave *param = chan->private; | ||
479 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
480 | chan->private = NULL; | ||
481 | } | ||
482 | |||
483 | spin_lock_irq(&sh_chan->desc_lock); | ||
484 | |||
485 | list_splice_init(&sh_chan->ld_free, &list); | ||
486 | sh_chan->descs_allocated = 0; | ||
487 | |||
488 | spin_unlock_irq(&sh_chan->desc_lock); | ||
489 | |||
490 | list_for_each_entry_safe(desc, _desc, &list, node) | ||
491 | kfree(desc); | ||
492 | } | ||
493 | |||
494 | /** | ||
495 | * sh_dmae_add_desc - get, set up and return one transfer descriptor | ||
496 | * @sh_chan: DMA channel | ||
497 | * @flags: DMA transfer flags | ||
498 | * @dest: destination DMA address, incremented when direction equals | ||
499 | * DMA_DEV_TO_MEM | ||
500 | * @src: source DMA address, incremented when direction equals | ||
501 | * DMA_MEM_TO_DEV | ||
502 | * @len: DMA transfer length | ||
503 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
504 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
505 | * equals DMA_MEM_TO_MEM for MEMCPY | ||
506 | * Returns 0 or an error | ||
507 | * Locks: called with desc_lock held | ||
508 | */ | ||
509 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
510 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
511 | struct sh_desc **first, enum dma_transfer_direction direction) | ||
512 | { | ||
513 | struct sh_desc *new; | ||
514 | size_t copy_size; | ||
515 | |||
516 | if (!*len) | ||
517 | return NULL; | ||
518 | |||
519 | /* Allocate the link descriptor from the free list */ | ||
520 | new = sh_dmae_get_desc(sh_chan); | ||
521 | if (!new) { | ||
522 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
523 | return NULL; | ||
524 | } | ||
525 | |||
526 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); | ||
527 | |||
528 | new->hw.sar = *src; | ||
529 | new->hw.dar = *dest; | ||
530 | new->hw.tcr = copy_size; | ||
531 | |||
532 | if (!*first) { | ||
533 | /* First desc */ | ||
534 | new->async_tx.cookie = -EBUSY; | ||
535 | *first = new; | ||
536 | } else { | ||
537 | /* Other desc - invisible to the user */ | ||
538 | new->async_tx.cookie = -EINVAL; | ||
539 | } | ||
540 | |||
541 | dev_dbg(sh_chan->dev, | ||
542 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | ||
543 | copy_size, *len, *src, *dest, &new->async_tx, | ||
544 | new->async_tx.cookie, sh_chan->xmit_shift); | ||
545 | |||
546 | new->mark = DESC_PREPARED; | ||
547 | new->async_tx.flags = flags; | ||
548 | new->direction = direction; | ||
549 | |||
550 | *len -= copy_size; | ||
551 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | ||
552 | *src += copy_size; | ||
553 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | ||
554 | *dest += copy_size; | ||
555 | |||
556 | return new; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | ||
561 | * | ||
562 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
563 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
564 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
565 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
566 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | ||
567 | * and the SG list contains only one element and points at the source buffer. | ||
568 | */ | ||
569 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
570 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
571 | enum dma_transfer_direction direction, unsigned long flags) | ||
572 | { | ||
573 | struct scatterlist *sg; | ||
574 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | ||
575 | LIST_HEAD(tx_list); | ||
576 | int chunks = 0; | ||
577 | unsigned long irq_flags; | ||
578 | int i; | ||
579 | |||
580 | if (!sg_len) | ||
581 | return NULL; | ||
582 | |||
583 | for_each_sg(sgl, sg, sg_len, i) | ||
584 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | ||
585 | (SH_DMA_TCR_MAX + 1); | ||
586 | |||
587 | /* Have to lock the whole loop to protect against concurrent release */ | ||
588 | spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); | ||
589 | |||
590 | /* | ||
591 | * Chaining: | ||
592 | * first descriptor is what user is dealing with in all API calls, its | ||
593 | * cookie is at first set to -EBUSY, at tx-submit to a positive | ||
594 | * number | ||
595 | * if more than one chunk is needed further chunks have cookie = -EINVAL | ||
596 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | ||
597 | * all chunks are linked onto the tx_list head with their .node heads | ||
598 | * only during this function, then they are immediately spliced | ||
599 | * back onto the free list in form of a chain | ||
600 | */ | ||
601 | for_each_sg(sgl, sg, sg_len, i) { | ||
602 | dma_addr_t sg_addr = sg_dma_address(sg); | ||
603 | size_t len = sg_dma_len(sg); | ||
604 | |||
605 | if (!len) | ||
606 | goto err_get_desc; | ||
607 | |||
608 | do { | ||
609 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", | ||
610 | i, sg, len, (unsigned long long)sg_addr); | ||
611 | |||
612 | if (direction == DMA_DEV_TO_MEM) | ||
613 | new = sh_dmae_add_desc(sh_chan, flags, | ||
614 | &sg_addr, addr, &len, &first, | ||
615 | direction); | ||
616 | else | ||
617 | new = sh_dmae_add_desc(sh_chan, flags, | ||
618 | addr, &sg_addr, &len, &first, | ||
619 | direction); | ||
620 | if (!new) | ||
621 | goto err_get_desc; | ||
622 | |||
623 | new->chunks = chunks--; | ||
624 | list_add_tail(&new->node, &tx_list); | ||
625 | } while (len); | ||
626 | } | ||
627 | |||
628 | if (new != first) | ||
629 | new->async_tx.cookie = -ENOSPC; | ||
630 | |||
631 | /* Put them back on the free list, so, they don't get lost */ | ||
632 | list_splice_tail(&tx_list, &sh_chan->ld_free); | ||
633 | |||
634 | spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); | ||
635 | |||
636 | return &first->async_tx; | ||
637 | |||
638 | err_get_desc: | ||
639 | list_for_each_entry(new, &tx_list, node) | ||
640 | new->mark = DESC_IDLE; | ||
641 | list_splice(&tx_list, &sh_chan->ld_free); | ||
642 | |||
643 | spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); | ||
644 | |||
645 | return NULL; | ||
646 | } | ||
647 | |||
648 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
649 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
650 | size_t len, unsigned long flags) | ||
651 | { | ||
652 | struct sh_dmae_chan *sh_chan; | ||
653 | struct scatterlist sg; | ||
654 | |||
655 | if (!chan || !len) | ||
656 | return NULL; | ||
657 | |||
658 | sh_chan = to_sh_chan(chan); | ||
659 | |||
660 | sg_init_table(&sg, 1); | ||
661 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
662 | offset_in_page(dma_src)); | ||
663 | sg_dma_address(&sg) = dma_src; | ||
664 | sg_dma_len(&sg) = len; | ||
665 | |||
666 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, | ||
667 | flags); | ||
668 | } | ||
669 | |||
670 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | ||
671 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
672 | enum dma_transfer_direction direction, unsigned long flags, | ||
673 | void *context) | ||
674 | { | ||
675 | struct sh_dmae_slave *param; | ||
676 | struct sh_dmae_chan *sh_chan; | ||
677 | dma_addr_t slave_addr; | ||
678 | |||
679 | if (!chan) | ||
680 | return NULL; | ||
681 | |||
682 | sh_chan = to_sh_chan(chan); | ||
683 | param = chan->private; | ||
684 | |||
685 | /* Someone calling slave DMA on a public channel? */ | ||
686 | if (!param || !sg_len) { | ||
687 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | ||
688 | __func__, param, sg_len, param ? param->slave_id : -1); | ||
689 | return NULL; | ||
690 | } | ||
691 | |||
692 | slave_addr = param->config->addr; | ||
693 | |||
694 | /* | ||
695 | * if (param != NULL), this is a successfully requested slave channel, | ||
696 | * therefore param->config != NULL too. | ||
697 | */ | ||
698 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, | ||
699 | direction, flags); | ||
700 | } | ||
701 | |||
702 | static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
703 | unsigned long arg) | ||
704 | { | ||
705 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
706 | unsigned long flags; | ||
707 | |||
708 | /* Only supports DMA_TERMINATE_ALL */ | ||
709 | if (cmd != DMA_TERMINATE_ALL) | ||
710 | return -ENXIO; | ||
711 | |||
712 | if (!chan) | ||
713 | return -EINVAL; | ||
714 | |||
715 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | ||
716 | dmae_halt(sh_chan); | ||
717 | |||
718 | if (!list_empty(&sh_chan->ld_queue)) { | ||
719 | /* Record partial transfer */ | ||
720 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | ||
721 | struct sh_desc, node); | ||
722 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
723 | sh_chan->xmit_shift; | ||
724 | } | ||
725 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | ||
726 | |||
727 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | ||
733 | { | ||
734 | struct sh_desc *desc, *_desc; | ||
735 | /* Is the "exposed" head of a chain acked? */ | ||
736 | bool head_acked = false; | ||
737 | dma_cookie_t cookie = 0; | ||
738 | dma_async_tx_callback callback = NULL; | ||
739 | void *param = NULL; | ||
740 | unsigned long flags; | ||
741 | |||
742 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | ||
743 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | ||
744 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
745 | |||
746 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | ||
747 | BUG_ON(desc->mark != DESC_SUBMITTED && | ||
748 | desc->mark != DESC_COMPLETED && | ||
749 | desc->mark != DESC_WAITING); | ||
750 | |||
751 | /* | ||
752 | * queue is ordered, and we use this loop to (1) clean up all | ||
753 | * completed descriptors, and to (2) update descriptor flags of | ||
754 | * any chunks in a (partially) completed chain | ||
755 | */ | ||
756 | if (!all && desc->mark == DESC_SUBMITTED && | ||
757 | desc->cookie != cookie) | ||
758 | break; | ||
759 | |||
760 | if (tx->cookie > 0) | ||
761 | cookie = tx->cookie; | ||
762 | |||
763 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | ||
764 | if (sh_chan->common.completed_cookie != desc->cookie - 1) | ||
765 | dev_dbg(sh_chan->dev, | ||
766 | "Completing cookie %d, expected %d\n", | ||
767 | desc->cookie, | ||
768 | sh_chan->common.completed_cookie + 1); | ||
769 | sh_chan->common.completed_cookie = desc->cookie; | ||
770 | } | ||
771 | |||
772 | /* Call callback on the last chunk */ | ||
773 | if (desc->mark == DESC_COMPLETED && tx->callback) { | ||
774 | desc->mark = DESC_WAITING; | ||
775 | callback = tx->callback; | ||
776 | param = tx->callback_param; | ||
777 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | ||
778 | tx->cookie, tx, sh_chan->id); | ||
779 | BUG_ON(desc->chunks != 1); | ||
780 | break; | ||
781 | } | ||
782 | |||
783 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | ||
784 | if (desc->mark == DESC_COMPLETED) { | ||
785 | BUG_ON(tx->cookie < 0); | ||
786 | desc->mark = DESC_WAITING; | ||
787 | } | ||
788 | head_acked = async_tx_test_ack(tx); | ||
789 | } else { | ||
790 | switch (desc->mark) { | ||
791 | case DESC_COMPLETED: | ||
792 | desc->mark = DESC_WAITING; | ||
793 | /* Fall through */ | ||
794 | case DESC_WAITING: | ||
795 | if (head_acked) | ||
796 | async_tx_ack(&desc->async_tx); | ||
797 | } | ||
798 | } | ||
799 | |||
800 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", | ||
801 | tx, tx->cookie); | ||
802 | |||
803 | if (((desc->mark == DESC_COMPLETED || | ||
804 | desc->mark == DESC_WAITING) && | ||
805 | async_tx_test_ack(&desc->async_tx)) || all) { | ||
806 | /* Remove from ld_queue list */ | ||
807 | desc->mark = DESC_IDLE; | ||
808 | |||
809 | list_move(&desc->node, &sh_chan->ld_free); | ||
810 | |||
811 | if (list_empty(&sh_chan->ld_queue)) { | ||
812 | dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); | ||
813 | pm_runtime_put(sh_chan->dev); | ||
814 | } | ||
815 | } | ||
816 | } | ||
817 | |||
818 | if (all && !callback) | ||
819 | /* | ||
820 | * Terminating and the loop completed normally: forgive | ||
821 | * uncompleted cookies | ||
822 | */ | ||
823 | sh_chan->common.completed_cookie = sh_chan->common.cookie; | ||
824 | |||
825 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | ||
826 | |||
827 | if (callback) | ||
828 | callback(param); | ||
829 | |||
830 | return callback; | ||
831 | } | ||
832 | |||
833 | /* | ||
834 | * sh_chan_ld_cleanup - Clean up link descriptors | ||
835 | * | ||
836 | * This function cleans up the ld_queue of DMA channel. | ||
837 | */ | ||
838 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | ||
839 | { | ||
840 | while (__ld_cleanup(sh_chan, all)) | ||
841 | ; | ||
842 | } | ||
843 | |||
844 | /* Called under spin_lock_irq(&sh_chan->desc_lock) */ | ||
845 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | ||
846 | { | ||
847 | struct sh_desc *desc; | ||
848 | |||
849 | /* DMA work check */ | ||
850 | if (dmae_is_busy(sh_chan)) | ||
851 | return; | ||
852 | |||
853 | /* Find the first not transferred descriptor */ | ||
854 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
855 | if (desc->mark == DESC_SUBMITTED) { | ||
856 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
857 | desc->async_tx.cookie, sh_chan->id, | ||
858 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | ||
859 | /* Get the ld start address from ld_queue */ | ||
860 | dmae_set_reg(sh_chan, &desc->hw); | ||
861 | dmae_start(sh_chan); | ||
862 | break; | ||
863 | } | ||
864 | } | ||
865 | |||
866 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | ||
867 | { | ||
868 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
869 | |||
870 | spin_lock_irq(&sh_chan->desc_lock); | ||
871 | if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) | ||
872 | sh_chan_xfer_ld_queue(sh_chan); | ||
873 | else | ||
874 | sh_chan->pm_state = DMAE_PM_PENDING; | ||
875 | spin_unlock_irq(&sh_chan->desc_lock); | ||
876 | } | ||
877 | |||
878 | static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | ||
879 | dma_cookie_t cookie, | ||
880 | struct dma_tx_state *txstate) | ||
881 | { | ||
882 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
883 | enum dma_status status; | ||
884 | unsigned long flags; | ||
885 | |||
886 | sh_dmae_chan_ld_cleanup(sh_chan, false); | ||
887 | |||
888 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | ||
889 | |||
890 | status = dma_cookie_status(chan, cookie, txstate); | ||
891 | |||
892 | /* | ||
893 | * If we don't find cookie on the queue, it has been aborted and we have | ||
894 | * to report error | ||
895 | */ | ||
896 | if (status != DMA_SUCCESS) { | ||
897 | struct sh_desc *desc; | ||
898 | status = DMA_ERROR; | ||
899 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
900 | if (desc->cookie == cookie) { | ||
901 | status = DMA_IN_PROGRESS; | ||
902 | break; | ||
903 | } | ||
904 | } | ||
905 | |||
906 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | ||
907 | |||
908 | return status; | ||
909 | } | ||
910 | |||
911 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | ||
912 | { | ||
913 | irqreturn_t ret = IRQ_NONE; | ||
914 | struct sh_dmae_chan *sh_chan = data; | ||
915 | u32 chcr; | ||
916 | |||
917 | spin_lock(&sh_chan->desc_lock); | ||
918 | |||
919 | chcr = chcr_read(sh_chan); | ||
920 | |||
921 | if (chcr & CHCR_TE) { | ||
922 | /* DMA stop */ | ||
923 | dmae_halt(sh_chan); | ||
924 | |||
925 | ret = IRQ_HANDLED; | ||
926 | tasklet_schedule(&sh_chan->tasklet); | ||
927 | } | ||
928 | |||
929 | spin_unlock(&sh_chan->desc_lock); | ||
930 | |||
931 | return ret; | ||
932 | } | ||
933 | |||
934 | /* Called from error IRQ or NMI */ | ||
935 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | ||
936 | { | ||
937 | unsigned int handled = 0; | ||
938 | int i; | ||
939 | |||
940 | /* halt the dma controller */ | ||
941 | sh_dmae_ctl_stop(shdev); | ||
942 | |||
943 | /* We cannot detect, which channel caused the error, have to reset all */ | ||
944 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | ||
945 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
946 | struct sh_desc *desc; | ||
947 | LIST_HEAD(dl); | ||
948 | |||
949 | if (!sh_chan) | ||
950 | continue; | ||
951 | |||
952 | spin_lock(&sh_chan->desc_lock); | ||
953 | |||
954 | /* Stop the channel */ | ||
955 | dmae_halt(sh_chan); | ||
956 | |||
957 | list_splice_init(&sh_chan->ld_queue, &dl); | ||
958 | |||
959 | if (!list_empty(&dl)) { | ||
960 | dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); | ||
961 | pm_runtime_put(sh_chan->dev); | ||
962 | } | ||
963 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; | ||
964 | |||
965 | spin_unlock(&sh_chan->desc_lock); | ||
966 | |||
967 | /* Complete all */ | ||
968 | list_for_each_entry(desc, &dl, node) { | ||
969 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
970 | desc->mark = DESC_IDLE; | ||
971 | if (tx->callback) | ||
972 | tx->callback(tx->callback_param); | ||
973 | } | ||
974 | |||
975 | spin_lock(&sh_chan->desc_lock); | ||
976 | list_splice(&dl, &sh_chan->ld_free); | ||
977 | spin_unlock(&sh_chan->desc_lock); | ||
978 | |||
979 | handled++; | ||
980 | } | ||
981 | |||
982 | sh_dmae_rst(shdev); | ||
983 | |||
984 | return !!handled; | ||
985 | } | ||
986 | |||
987 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
988 | { | ||
989 | struct sh_dmae_device *shdev = data; | ||
990 | |||
991 | if (!(dmaor_read(shdev) & DMAOR_AE)) | ||
992 | return IRQ_NONE; | ||
993 | |||
994 | sh_dmae_reset(data); | ||
995 | return IRQ_HANDLED; | ||
996 | } | ||
997 | |||
998 | static void dmae_do_tasklet(unsigned long data) | ||
999 | { | ||
1000 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | ||
1001 | struct sh_desc *desc; | ||
1002 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | ||
1003 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
1004 | |||
1005 | spin_lock_irq(&sh_chan->desc_lock); | ||
1006 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | ||
1007 | if (desc->mark == DESC_SUBMITTED && | ||
1008 | ((desc->direction == DMA_DEV_TO_MEM && | ||
1009 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | ||
1010 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | ||
1011 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | ||
1012 | desc->async_tx.cookie, &desc->async_tx, | ||
1013 | desc->hw.dar); | ||
1014 | desc->mark = DESC_COMPLETED; | ||
1015 | break; | ||
1016 | } | ||
1017 | } | ||
1018 | /* Next desc */ | ||
1019 | sh_chan_xfer_ld_queue(sh_chan); | ||
1020 | spin_unlock_irq(&sh_chan->desc_lock); | ||
1021 | |||
1022 | sh_dmae_chan_ld_cleanup(sh_chan, false); | ||
1023 | } | ||
1024 | |||
1025 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
1026 | { | ||
1027 | /* Fast path out if NMIF is not asserted for this controller */ | ||
1028 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
1029 | return false; | ||
1030 | |||
1031 | return sh_dmae_reset(shdev); | ||
1032 | } | ||
1033 | |||
1034 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
1035 | unsigned long cmd, void *data) | ||
1036 | { | ||
1037 | struct sh_dmae_device *shdev; | ||
1038 | int ret = NOTIFY_DONE; | ||
1039 | bool triggered; | ||
1040 | |||
1041 | /* | ||
1042 | * Only concern ourselves with NMI events. | ||
1043 | * | ||
1044 | * Normally we would check the die chain value, but as this needs | ||
1045 | * to be architecture independent, check for NMI context instead. | ||
1046 | */ | ||
1047 | if (!in_nmi()) | ||
1048 | return NOTIFY_DONE; | ||
1049 | |||
1050 | rcu_read_lock(); | ||
1051 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
1052 | /* | ||
1053 | * Only stop if one of the controllers has NMIF asserted, | ||
1054 | * we do not want to interfere with regular address error | ||
1055 | * handling or NMI events that don't concern the DMACs. | ||
1056 | */ | ||
1057 | triggered = sh_dmae_nmi_notify(shdev); | ||
1058 | if (triggered == true) | ||
1059 | ret = NOTIFY_OK; | ||
1060 | } | ||
1061 | rcu_read_unlock(); | ||
1062 | |||
1063 | return ret; | ||
1064 | } | ||
1065 | |||
1066 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
1067 | .notifier_call = sh_dmae_nmi_handler, | ||
1068 | |||
1069 | /* Run before NMI debug handler and KGDB */ | ||
1070 | .priority = 1, | ||
1071 | }; | ||
1072 | |||
1073 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | ||
1074 | int irq, unsigned long flags) | ||
1075 | { | ||
1076 | int err; | ||
1077 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | ||
1078 | struct platform_device *pdev = to_platform_device(shdev->common.dev); | ||
1079 | struct sh_dmae_chan *new_sh_chan; | ||
1080 | |||
1081 | /* alloc channel */ | ||
1082 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | ||
1083 | if (!new_sh_chan) { | ||
1084 | dev_err(shdev->common.dev, | ||
1085 | "No free memory for allocating dma channels!\n"); | ||
1086 | return -ENOMEM; | ||
1087 | } | ||
1088 | |||
1089 | new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; | ||
1090 | |||
1091 | /* reference struct dma_device */ | ||
1092 | new_sh_chan->common.device = &shdev->common; | ||
1093 | dma_cookie_init(&new_sh_chan->common); | ||
1094 | |||
1095 | new_sh_chan->dev = shdev->common.dev; | ||
1096 | new_sh_chan->id = id; | ||
1097 | new_sh_chan->irq = irq; | ||
1098 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
1099 | |||
1100 | /* Init DMA tasklet */ | ||
1101 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | ||
1102 | (unsigned long)new_sh_chan); | ||
1103 | |||
1104 | spin_lock_init(&new_sh_chan->desc_lock); | ||
1105 | |||
1106 | /* Init descripter manage list */ | ||
1107 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | ||
1108 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | ||
1109 | |||
1110 | /* Add the channel to DMA device channel list */ | ||
1111 | list_add_tail(&new_sh_chan->common.device_node, | ||
1112 | &shdev->common.channels); | ||
1113 | shdev->common.chancnt++; | ||
1114 | |||
1115 | if (pdev->id >= 0) | ||
1116 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
1117 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); | ||
1118 | else | ||
1119 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
1120 | "sh-dma%d", new_sh_chan->id); | ||
1121 | |||
1122 | /* set up channel irq */ | ||
1123 | err = request_irq(irq, &sh_dmae_interrupt, flags, | ||
1124 | new_sh_chan->dev_id, new_sh_chan); | ||
1125 | if (err) { | ||
1126 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | ||
1127 | "with return %d\n", id, err); | ||
1128 | goto err_no_irq; | ||
1129 | } | ||
1130 | |||
1131 | shdev->chan[id] = new_sh_chan; | ||
1132 | return 0; | ||
1133 | |||
1134 | err_no_irq: | ||
1135 | /* remove from dmaengine device node */ | ||
1136 | list_del(&new_sh_chan->common.device_node); | ||
1137 | kfree(new_sh_chan); | ||
1138 | return err; | ||
1139 | } | ||
1140 | |||
1141 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | ||
1142 | { | ||
1143 | int i; | ||
1144 | |||
1145 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | ||
1146 | if (shdev->chan[i]) { | ||
1147 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
1148 | |||
1149 | free_irq(sh_chan->irq, sh_chan); | ||
1150 | |||
1151 | list_del(&sh_chan->common.device_node); | ||
1152 | kfree(sh_chan); | ||
1153 | shdev->chan[i] = NULL; | ||
1154 | } | ||
1155 | } | ||
1156 | shdev->common.chancnt = 0; | ||
1157 | } | ||
1158 | |||
1159 | static int __init sh_dmae_probe(struct platform_device *pdev) | ||
1160 | { | ||
1161 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | ||
1162 | unsigned long irqflags = IRQF_DISABLED, | ||
1163 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | ||
1164 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | ||
1165 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | ||
1166 | struct sh_dmae_device *shdev; | ||
1167 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
1168 | |||
1169 | /* get platform data */ | ||
1170 | if (!pdata || !pdata->channel_num) | ||
1171 | return -ENODEV; | ||
1172 | |||
1173 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1174 | /* DMARS area is optional */ | ||
1175 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1176 | /* | ||
1177 | * IRQ resources: | ||
1178 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
1179 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
1180 | * start == end. If it is the only IRQ resource, all channels also | ||
1181 | * use the same IRQ. | ||
1182 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
1183 | * ranges (start != end) | ||
1184 | * 3. iff all events (channels and, optionally, error) on this | ||
1185 | * controller use the same IRQ, only one IRQ resource can be | ||
1186 | * specified, otherwise there must be one IRQ per channel, even if | ||
1187 | * some of them are equal | ||
1188 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
1189 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
1190 | * requested with the IRQF_SHARED flag | ||
1191 | */ | ||
1192 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1193 | if (!chan || !errirq_res) | ||
1194 | return -ENODEV; | ||
1195 | |||
1196 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
1197 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
1198 | return -EBUSY; | ||
1199 | } | ||
1200 | |||
1201 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
1202 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
1203 | err = -EBUSY; | ||
1204 | goto ermrdmars; | ||
1205 | } | ||
1206 | |||
1207 | err = -ENOMEM; | ||
1208 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | ||
1209 | if (!shdev) { | ||
1210 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
1211 | goto ealloc; | ||
1212 | } | ||
1213 | |||
1214 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
1215 | if (!shdev->chan_reg) | ||
1216 | goto emapchan; | ||
1217 | if (dmars) { | ||
1218 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
1219 | if (!shdev->dmars) | ||
1220 | goto emapdmars; | ||
1221 | } | ||
1222 | |||
1223 | /* platform data */ | ||
1224 | shdev->pdata = pdata; | ||
1225 | |||
1226 | if (pdata->chcr_offset) | ||
1227 | shdev->chcr_offset = pdata->chcr_offset; | ||
1228 | else | ||
1229 | shdev->chcr_offset = CHCR; | ||
1230 | |||
1231 | if (pdata->chcr_ie_bit) | ||
1232 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; | ||
1233 | else | ||
1234 | shdev->chcr_ie_bit = CHCR_IE; | ||
1235 | |||
1236 | platform_set_drvdata(pdev, shdev); | ||
1237 | |||
1238 | shdev->common.dev = &pdev->dev; | ||
1239 | |||
1240 | pm_runtime_enable(&pdev->dev); | ||
1241 | pm_runtime_get_sync(&pdev->dev); | ||
1242 | |||
1243 | spin_lock_irq(&sh_dmae_lock); | ||
1244 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
1245 | spin_unlock_irq(&sh_dmae_lock); | ||
1246 | |||
1247 | /* reset dma controller - only needed as a test */ | ||
1248 | err = sh_dmae_rst(shdev); | ||
1249 | if (err) | ||
1250 | goto rst_err; | ||
1251 | |||
1252 | INIT_LIST_HEAD(&shdev->common.channels); | ||
1253 | |||
1254 | if (!pdata->slave_only) | ||
1255 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | ||
1256 | if (pdata->slave && pdata->slave_num) | ||
1257 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
1258 | |||
1259 | shdev->common.device_alloc_chan_resources | ||
1260 | = sh_dmae_alloc_chan_resources; | ||
1261 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | ||
1262 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | ||
1263 | shdev->common.device_tx_status = sh_dmae_tx_status; | ||
1264 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | ||
1265 | |||
1266 | /* Compulsory for DMA_SLAVE fields */ | ||
1267 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | ||
1268 | shdev->common.device_control = sh_dmae_control; | ||
1269 | |||
1270 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
1271 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
1272 | |||
1273 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
1274 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | ||
1275 | |||
1276 | if (!chanirq_res) | ||
1277 | chanirq_res = errirq_res; | ||
1278 | else | ||
1279 | irqres++; | ||
1280 | |||
1281 | if (chanirq_res == errirq_res || | ||
1282 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
1283 | irqflags = IRQF_SHARED; | ||
1284 | |||
1285 | errirq = errirq_res->start; | ||
1286 | |||
1287 | err = request_irq(errirq, sh_dmae_err, irqflags, | ||
1288 | "DMAC Address Error", shdev); | ||
1289 | if (err) { | ||
1290 | dev_err(&pdev->dev, | ||
1291 | "DMA failed requesting irq #%d, error %d\n", | ||
1292 | errirq, err); | ||
1293 | goto eirq_err; | ||
1294 | } | ||
1295 | |||
1296 | #else | ||
1297 | chanirq_res = errirq_res; | ||
1298 | #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ | ||
1299 | |||
1300 | if (chanirq_res->start == chanirq_res->end && | ||
1301 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | ||
1302 | /* Special case - all multiplexed */ | ||
1303 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | ||
1304 | if (irq_cnt < SH_DMAC_MAX_CHANNELS) { | ||
1305 | chan_irq[irq_cnt] = chanirq_res->start; | ||
1306 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
1307 | } else { | ||
1308 | irq_cap = 1; | ||
1309 | break; | ||
1310 | } | ||
1311 | } | ||
1312 | } else { | ||
1313 | do { | ||
1314 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
1315 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { | ||
1316 | irq_cap = 1; | ||
1317 | break; | ||
1318 | } | ||
1319 | |||
1320 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
1321 | IORESOURCE_IRQ_SHAREABLE) | ||
1322 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
1323 | else | ||
1324 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
1325 | dev_dbg(&pdev->dev, | ||
1326 | "Found IRQ %d for channel %d\n", | ||
1327 | i, irq_cnt); | ||
1328 | chan_irq[irq_cnt++] = i; | ||
1329 | } | ||
1330 | |||
1331 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) | ||
1332 | break; | ||
1333 | |||
1334 | chanirq_res = platform_get_resource(pdev, | ||
1335 | IORESOURCE_IRQ, ++irqres); | ||
1336 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
1337 | } | ||
1338 | |||
1339 | /* Create DMA Channel */ | ||
1340 | for (i = 0; i < irq_cnt; i++) { | ||
1341 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | ||
1342 | if (err) | ||
1343 | goto chan_probe_err; | ||
1344 | } | ||
1345 | |||
1346 | if (irq_cap) | ||
1347 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | ||
1348 | "channels when a maximum of %d are supported.\n", | ||
1349 | pdata->channel_num, SH_DMAC_MAX_CHANNELS); | ||
1350 | |||
1351 | pm_runtime_put(&pdev->dev); | ||
1352 | |||
1353 | dma_async_device_register(&shdev->common); | ||
1354 | |||
1355 | return err; | ||
1356 | |||
1357 | chan_probe_err: | ||
1358 | sh_dmae_chan_remove(shdev); | ||
1359 | |||
1360 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
1361 | free_irq(errirq, shdev); | ||
1362 | eirq_err: | ||
1363 | #endif | ||
1364 | rst_err: | ||
1365 | spin_lock_irq(&sh_dmae_lock); | ||
1366 | list_del_rcu(&shdev->node); | ||
1367 | spin_unlock_irq(&sh_dmae_lock); | ||
1368 | |||
1369 | pm_runtime_put(&pdev->dev); | ||
1370 | pm_runtime_disable(&pdev->dev); | ||
1371 | |||
1372 | if (dmars) | ||
1373 | iounmap(shdev->dmars); | ||
1374 | |||
1375 | platform_set_drvdata(pdev, NULL); | ||
1376 | emapdmars: | ||
1377 | iounmap(shdev->chan_reg); | ||
1378 | synchronize_rcu(); | ||
1379 | emapchan: | ||
1380 | kfree(shdev); | ||
1381 | ealloc: | ||
1382 | if (dmars) | ||
1383 | release_mem_region(dmars->start, resource_size(dmars)); | ||
1384 | ermrdmars: | ||
1385 | release_mem_region(chan->start, resource_size(chan)); | ||
1386 | |||
1387 | return err; | ||
1388 | } | ||
1389 | |||
1390 | static int __exit sh_dmae_remove(struct platform_device *pdev) | ||
1391 | { | ||
1392 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
1393 | struct resource *res; | ||
1394 | int errirq = platform_get_irq(pdev, 0); | ||
1395 | |||
1396 | dma_async_device_unregister(&shdev->common); | ||
1397 | |||
1398 | if (errirq > 0) | ||
1399 | free_irq(errirq, shdev); | ||
1400 | |||
1401 | spin_lock_irq(&sh_dmae_lock); | ||
1402 | list_del_rcu(&shdev->node); | ||
1403 | spin_unlock_irq(&sh_dmae_lock); | ||
1404 | |||
1405 | /* channel data remove */ | ||
1406 | sh_dmae_chan_remove(shdev); | ||
1407 | |||
1408 | pm_runtime_disable(&pdev->dev); | ||
1409 | |||
1410 | if (shdev->dmars) | ||
1411 | iounmap(shdev->dmars); | ||
1412 | iounmap(shdev->chan_reg); | ||
1413 | |||
1414 | platform_set_drvdata(pdev, NULL); | ||
1415 | |||
1416 | synchronize_rcu(); | ||
1417 | kfree(shdev); | ||
1418 | |||
1419 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1420 | if (res) | ||
1421 | release_mem_region(res->start, resource_size(res)); | ||
1422 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1423 | if (res) | ||
1424 | release_mem_region(res->start, resource_size(res)); | ||
1425 | |||
1426 | return 0; | ||
1427 | } | ||
1428 | |||
1429 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
1430 | { | ||
1431 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
1432 | sh_dmae_ctl_stop(shdev); | ||
1433 | } | ||
1434 | |||
1435 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
1436 | { | ||
1437 | return 0; | ||
1438 | } | ||
1439 | |||
1440 | static int sh_dmae_runtime_resume(struct device *dev) | ||
1441 | { | ||
1442 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1443 | |||
1444 | return sh_dmae_rst(shdev); | ||
1445 | } | ||
1446 | |||
1447 | #ifdef CONFIG_PM | ||
1448 | static int sh_dmae_suspend(struct device *dev) | ||
1449 | { | ||
1450 | return 0; | ||
1451 | } | ||
1452 | |||
1453 | static int sh_dmae_resume(struct device *dev) | ||
1454 | { | ||
1455 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1456 | int i, ret; | ||
1457 | |||
1458 | ret = sh_dmae_rst(shdev); | ||
1459 | if (ret < 0) | ||
1460 | dev_err(dev, "Failed to reset!\n"); | ||
1461 | |||
1462 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
1463 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
1464 | struct sh_dmae_slave *param = sh_chan->common.private; | ||
1465 | |||
1466 | if (!sh_chan->descs_allocated) | ||
1467 | continue; | ||
1468 | |||
1469 | if (param) { | ||
1470 | const struct sh_dmae_slave_config *cfg = param->config; | ||
1471 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
1472 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
1473 | } else { | ||
1474 | dmae_init(sh_chan); | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | #else | ||
1481 | #define sh_dmae_suspend NULL | ||
1482 | #define sh_dmae_resume NULL | ||
1483 | #endif | ||
1484 | |||
1485 | const struct dev_pm_ops sh_dmae_pm = { | ||
1486 | .suspend = sh_dmae_suspend, | ||
1487 | .resume = sh_dmae_resume, | ||
1488 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
1489 | .runtime_resume = sh_dmae_runtime_resume, | ||
1490 | }; | ||
1491 | |||
1492 | static struct platform_driver sh_dmae_driver = { | ||
1493 | .remove = __exit_p(sh_dmae_remove), | ||
1494 | .shutdown = sh_dmae_shutdown, | ||
1495 | .driver = { | ||
1496 | .owner = THIS_MODULE, | ||
1497 | .name = "sh-dma-engine", | ||
1498 | .pm = &sh_dmae_pm, | ||
1499 | }, | ||
1500 | }; | ||
1501 | |||
1502 | static int __init sh_dmae_init(void) | ||
1503 | { | ||
1504 | /* Wire up NMI handling */ | ||
1505 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
1506 | if (err) | ||
1507 | return err; | ||
1508 | |||
1509 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | ||
1510 | } | ||
1511 | module_init(sh_dmae_init); | ||
1512 | |||
1513 | static void __exit sh_dmae_exit(void) | ||
1514 | { | ||
1515 | platform_driver_unregister(&sh_dmae_driver); | ||
1516 | |||
1517 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
1518 | } | ||
1519 | module_exit(sh_dmae_exit); | ||
1520 | |||
1521 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | ||
1522 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | ||
1523 | MODULE_LICENSE("GPL"); | ||
1524 | MODULE_ALIAS("platform:sh-dma-engine"); | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c new file mode 100644 index 00000000000..d52dbc6c54a --- /dev/null +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -0,0 +1,1415 @@ | |||
1 | /* | ||
2 | * DMA driver for Nvidia's Tegra20 APB DMA controller. | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/dmaengine.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_device.h> | ||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/pm_runtime.h> | ||
33 | #include <linux/slab.h> | ||
34 | |||
35 | #include <mach/clk.h> | ||
36 | #include "dmaengine.h" | ||
37 | |||
38 | #define TEGRA_APBDMA_GENERAL 0x0 | ||
39 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) | ||
40 | |||
41 | #define TEGRA_APBDMA_CONTROL 0x010 | ||
42 | #define TEGRA_APBDMA_IRQ_MASK 0x01c | ||
43 | #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 | ||
44 | |||
45 | /* CSR register */ | ||
46 | #define TEGRA_APBDMA_CHAN_CSR 0x00 | ||
47 | #define TEGRA_APBDMA_CSR_ENB BIT(31) | ||
48 | #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) | ||
49 | #define TEGRA_APBDMA_CSR_HOLD BIT(29) | ||
50 | #define TEGRA_APBDMA_CSR_DIR BIT(28) | ||
51 | #define TEGRA_APBDMA_CSR_ONCE BIT(27) | ||
52 | #define TEGRA_APBDMA_CSR_FLOW BIT(21) | ||
53 | #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 | ||
54 | #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC | ||
55 | |||
56 | /* STATUS register */ | ||
57 | #define TEGRA_APBDMA_CHAN_STATUS 0x004 | ||
58 | #define TEGRA_APBDMA_STATUS_BUSY BIT(31) | ||
59 | #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) | ||
60 | #define TEGRA_APBDMA_STATUS_HALT BIT(29) | ||
61 | #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) | ||
62 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 | ||
63 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC | ||
64 | |||
65 | /* AHB memory address */ | ||
66 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 | ||
67 | |||
68 | /* AHB sequence register */ | ||
69 | #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 | ||
70 | #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) | ||
71 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) | ||
72 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) | ||
73 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) | ||
74 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) | ||
75 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) | ||
76 | #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) | ||
77 | #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) | ||
78 | #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) | ||
79 | #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) | ||
80 | #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) | ||
81 | #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 | ||
82 | #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 | ||
83 | |||
84 | /* APB address */ | ||
85 | #define TEGRA_APBDMA_CHAN_APBPTR 0x018 | ||
86 | |||
87 | /* APB sequence register */ | ||
88 | #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c | ||
89 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) | ||
90 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) | ||
91 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) | ||
92 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) | ||
93 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) | ||
94 | #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) | ||
95 | #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) | ||
96 | |||
97 | /* | ||
98 | * If any burst is in flight and DMA paused then this is the time to complete | ||
99 | * on-flight burst and update DMA status register. | ||
100 | */ | ||
101 | #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 | ||
102 | |||
103 | /* Channel base address offset from APBDMA base address */ | ||
104 | #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 | ||
105 | |||
106 | /* DMA channel register space size */ | ||
107 | #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 | ||
108 | |||
109 | struct tegra_dma; | ||
110 | |||
111 | /* | ||
112 | * tegra_dma_chip_data Tegra chip specific DMA data | ||
113 | * @nr_channels: Number of channels available in the controller. | ||
114 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. | ||
115 | */ | ||
116 | struct tegra_dma_chip_data { | ||
117 | int nr_channels; | ||
118 | int max_dma_count; | ||
119 | }; | ||
120 | |||
121 | /* DMA channel registers */ | ||
122 | struct tegra_dma_channel_regs { | ||
123 | unsigned long csr; | ||
124 | unsigned long ahb_ptr; | ||
125 | unsigned long apb_ptr; | ||
126 | unsigned long ahb_seq; | ||
127 | unsigned long apb_seq; | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * tegra_dma_sg_req: Dma request details to configure hardware. This | ||
132 | * contains the details for one transfer to configure DMA hw. | ||
133 | * The client's request for data transfer can be broken into multiple | ||
134 | * sub-transfer as per requester details and hw support. | ||
135 | * This sub transfer get added in the list of transfer and point to Tegra | ||
136 | * DMA descriptor which manages the transfer details. | ||
137 | */ | ||
138 | struct tegra_dma_sg_req { | ||
139 | struct tegra_dma_channel_regs ch_regs; | ||
140 | int req_len; | ||
141 | bool configured; | ||
142 | bool last_sg; | ||
143 | bool half_done; | ||
144 | struct list_head node; | ||
145 | struct tegra_dma_desc *dma_desc; | ||
146 | }; | ||
147 | |||
148 | /* | ||
149 | * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. | ||
150 | * This descriptor keep track of transfer status, callbacks and request | ||
151 | * counts etc. | ||
152 | */ | ||
153 | struct tegra_dma_desc { | ||
154 | struct dma_async_tx_descriptor txd; | ||
155 | int bytes_requested; | ||
156 | int bytes_transferred; | ||
157 | enum dma_status dma_status; | ||
158 | struct list_head node; | ||
159 | struct list_head tx_list; | ||
160 | struct list_head cb_node; | ||
161 | int cb_count; | ||
162 | }; | ||
163 | |||
164 | struct tegra_dma_channel; | ||
165 | |||
166 | typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | ||
167 | bool to_terminate); | ||
168 | |||
169 | /* tegra_dma_channel: Channel specific information */ | ||
170 | struct tegra_dma_channel { | ||
171 | struct dma_chan dma_chan; | ||
172 | bool config_init; | ||
173 | int id; | ||
174 | int irq; | ||
175 | unsigned long chan_base_offset; | ||
176 | spinlock_t lock; | ||
177 | bool busy; | ||
178 | struct tegra_dma *tdma; | ||
179 | bool cyclic; | ||
180 | |||
181 | /* Different lists for managing the requests */ | ||
182 | struct list_head free_sg_req; | ||
183 | struct list_head pending_sg_req; | ||
184 | struct list_head free_dma_desc; | ||
185 | struct list_head cb_desc; | ||
186 | |||
187 | /* ISR handler and tasklet for bottom half of isr handling */ | ||
188 | dma_isr_handler isr_handler; | ||
189 | struct tasklet_struct tasklet; | ||
190 | dma_async_tx_callback callback; | ||
191 | void *callback_param; | ||
192 | |||
193 | /* Channel-slave specific configuration */ | ||
194 | struct dma_slave_config dma_sconfig; | ||
195 | }; | ||
196 | |||
197 | /* tegra_dma: Tegra DMA specific information */ | ||
198 | struct tegra_dma { | ||
199 | struct dma_device dma_dev; | ||
200 | struct device *dev; | ||
201 | struct clk *dma_clk; | ||
202 | spinlock_t global_lock; | ||
203 | void __iomem *base_addr; | ||
204 | struct tegra_dma_chip_data *chip_data; | ||
205 | |||
206 | /* Some register need to be cache before suspend */ | ||
207 | u32 reg_gen; | ||
208 | |||
209 | /* Last member of the structure */ | ||
210 | struct tegra_dma_channel channels[0]; | ||
211 | }; | ||
212 | |||
213 | static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) | ||
214 | { | ||
215 | writel(val, tdma->base_addr + reg); | ||
216 | } | ||
217 | |||
218 | static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) | ||
219 | { | ||
220 | return readl(tdma->base_addr + reg); | ||
221 | } | ||
222 | |||
223 | static inline void tdc_write(struct tegra_dma_channel *tdc, | ||
224 | u32 reg, u32 val) | ||
225 | { | ||
226 | writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); | ||
227 | } | ||
228 | |||
229 | static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) | ||
230 | { | ||
231 | return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); | ||
232 | } | ||
233 | |||
234 | static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) | ||
235 | { | ||
236 | return container_of(dc, struct tegra_dma_channel, dma_chan); | ||
237 | } | ||
238 | |||
239 | static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( | ||
240 | struct dma_async_tx_descriptor *td) | ||
241 | { | ||
242 | return container_of(td, struct tegra_dma_desc, txd); | ||
243 | } | ||
244 | |||
245 | static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) | ||
246 | { | ||
247 | return &tdc->dma_chan.dev->device; | ||
248 | } | ||
249 | |||
250 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); | ||
251 | static int tegra_dma_runtime_suspend(struct device *dev); | ||
252 | static int tegra_dma_runtime_resume(struct device *dev); | ||
253 | |||
254 | /* Get DMA desc from free list, if not there then allocate it. */ | ||
255 | static struct tegra_dma_desc *tegra_dma_desc_get( | ||
256 | struct tegra_dma_channel *tdc) | ||
257 | { | ||
258 | struct tegra_dma_desc *dma_desc; | ||
259 | unsigned long flags; | ||
260 | |||
261 | spin_lock_irqsave(&tdc->lock, flags); | ||
262 | |||
263 | /* Do not allocate if desc are waiting for ack */ | ||
264 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | ||
265 | if (async_tx_test_ack(&dma_desc->txd)) { | ||
266 | list_del(&dma_desc->node); | ||
267 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
268 | return dma_desc; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
273 | |||
274 | /* Allocate DMA desc */ | ||
275 | dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); | ||
276 | if (!dma_desc) { | ||
277 | dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); | ||
282 | dma_desc->txd.tx_submit = tegra_dma_tx_submit; | ||
283 | dma_desc->txd.flags = 0; | ||
284 | return dma_desc; | ||
285 | } | ||
286 | |||
287 | static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, | ||
288 | struct tegra_dma_desc *dma_desc) | ||
289 | { | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&tdc->lock, flags); | ||
293 | if (!list_empty(&dma_desc->tx_list)) | ||
294 | list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); | ||
295 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
296 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
297 | } | ||
298 | |||
299 | static struct tegra_dma_sg_req *tegra_dma_sg_req_get( | ||
300 | struct tegra_dma_channel *tdc) | ||
301 | { | ||
302 | struct tegra_dma_sg_req *sg_req = NULL; | ||
303 | unsigned long flags; | ||
304 | |||
305 | spin_lock_irqsave(&tdc->lock, flags); | ||
306 | if (!list_empty(&tdc->free_sg_req)) { | ||
307 | sg_req = list_first_entry(&tdc->free_sg_req, | ||
308 | typeof(*sg_req), node); | ||
309 | list_del(&sg_req->node); | ||
310 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
311 | return sg_req; | ||
312 | } | ||
313 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
314 | |||
315 | sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); | ||
316 | if (!sg_req) | ||
317 | dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); | ||
318 | return sg_req; | ||
319 | } | ||
320 | |||
321 | static int tegra_dma_slave_config(struct dma_chan *dc, | ||
322 | struct dma_slave_config *sconfig) | ||
323 | { | ||
324 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
325 | |||
326 | if (!list_empty(&tdc->pending_sg_req)) { | ||
327 | dev_err(tdc2dev(tdc), "Configuration not allowed\n"); | ||
328 | return -EBUSY; | ||
329 | } | ||
330 | |||
331 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
332 | tdc->config_init = true; | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, | ||
337 | bool wait_for_burst_complete) | ||
338 | { | ||
339 | struct tegra_dma *tdma = tdc->tdma; | ||
340 | |||
341 | spin_lock(&tdma->global_lock); | ||
342 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); | ||
343 | if (wait_for_burst_complete) | ||
344 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
345 | } | ||
346 | |||
347 | static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) | ||
348 | { | ||
349 | struct tegra_dma *tdma = tdc->tdma; | ||
350 | |||
351 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); | ||
352 | spin_unlock(&tdma->global_lock); | ||
353 | } | ||
354 | |||
355 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) | ||
356 | { | ||
357 | u32 csr; | ||
358 | u32 status; | ||
359 | |||
360 | /* Disable interrupts */ | ||
361 | csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); | ||
362 | csr &= ~TEGRA_APBDMA_CSR_IE_EOC; | ||
363 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); | ||
364 | |||
365 | /* Disable DMA */ | ||
366 | csr &= ~TEGRA_APBDMA_CSR_ENB; | ||
367 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); | ||
368 | |||
369 | /* Clear interrupt status if it is there */ | ||
370 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
371 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
372 | dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); | ||
373 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | ||
374 | } | ||
375 | tdc->busy = false; | ||
376 | } | ||
377 | |||
378 | static void tegra_dma_start(struct tegra_dma_channel *tdc, | ||
379 | struct tegra_dma_sg_req *sg_req) | ||
380 | { | ||
381 | struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; | ||
382 | |||
383 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); | ||
384 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); | ||
385 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); | ||
386 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); | ||
387 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); | ||
388 | |||
389 | /* Start DMA */ | ||
390 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
391 | ch_regs->csr | TEGRA_APBDMA_CSR_ENB); | ||
392 | } | ||
393 | |||
394 | static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | ||
395 | struct tegra_dma_sg_req *nsg_req) | ||
396 | { | ||
397 | unsigned long status; | ||
398 | |||
399 | /* | ||
400 | * The DMA controller reloads the new configuration for next transfer | ||
401 | * after last burst of current transfer completes. | ||
402 | * If there is no IEC status then this makes sure that last burst | ||
403 | * has not be completed. There may be case that last burst is on | ||
404 | * flight and so it can complete but because DMA is paused, it | ||
405 | * will not generates interrupt as well as not reload the new | ||
406 | * configuration. | ||
407 | * If there is already IEC status then interrupt handler need to | ||
408 | * load new configuration. | ||
409 | */ | ||
410 | tegra_dma_global_pause(tdc, false); | ||
411 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
412 | |||
413 | /* | ||
414 | * If interrupt is pending then do nothing as the ISR will handle | ||
415 | * the programing for new request. | ||
416 | */ | ||
417 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
418 | dev_err(tdc2dev(tdc), | ||
419 | "Skipping new configuration as interrupt is pending\n"); | ||
420 | tegra_dma_global_resume(tdc); | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | /* Safe to program new configuration */ | ||
425 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); | ||
426 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); | ||
427 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
428 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); | ||
429 | nsg_req->configured = true; | ||
430 | |||
431 | tegra_dma_global_resume(tdc); | ||
432 | } | ||
433 | |||
434 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) | ||
435 | { | ||
436 | struct tegra_dma_sg_req *sg_req; | ||
437 | |||
438 | if (list_empty(&tdc->pending_sg_req)) | ||
439 | return; | ||
440 | |||
441 | sg_req = list_first_entry(&tdc->pending_sg_req, | ||
442 | typeof(*sg_req), node); | ||
443 | tegra_dma_start(tdc, sg_req); | ||
444 | sg_req->configured = true; | ||
445 | tdc->busy = true; | ||
446 | } | ||
447 | |||
448 | static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) | ||
449 | { | ||
450 | struct tegra_dma_sg_req *hsgreq; | ||
451 | struct tegra_dma_sg_req *hnsgreq; | ||
452 | |||
453 | if (list_empty(&tdc->pending_sg_req)) | ||
454 | return; | ||
455 | |||
456 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | ||
457 | if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { | ||
458 | hnsgreq = list_first_entry(&hsgreq->node, | ||
459 | typeof(*hnsgreq), node); | ||
460 | tegra_dma_configure_for_next(tdc, hnsgreq); | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, | ||
465 | struct tegra_dma_sg_req *sg_req, unsigned long status) | ||
466 | { | ||
467 | return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; | ||
468 | } | ||
469 | |||
470 | static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) | ||
471 | { | ||
472 | struct tegra_dma_sg_req *sgreq; | ||
473 | struct tegra_dma_desc *dma_desc; | ||
474 | |||
475 | while (!list_empty(&tdc->pending_sg_req)) { | ||
476 | sgreq = list_first_entry(&tdc->pending_sg_req, | ||
477 | typeof(*sgreq), node); | ||
478 | list_del(&sgreq->node); | ||
479 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
480 | if (sgreq->last_sg) { | ||
481 | dma_desc = sgreq->dma_desc; | ||
482 | dma_desc->dma_status = DMA_ERROR; | ||
483 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
484 | |||
485 | /* Add in cb list if it is not there. */ | ||
486 | if (!dma_desc->cb_count) | ||
487 | list_add_tail(&dma_desc->cb_node, | ||
488 | &tdc->cb_desc); | ||
489 | dma_desc->cb_count++; | ||
490 | } | ||
491 | } | ||
492 | tdc->isr_handler = NULL; | ||
493 | } | ||
494 | |||
495 | static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | ||
496 | struct tegra_dma_sg_req *last_sg_req, bool to_terminate) | ||
497 | { | ||
498 | struct tegra_dma_sg_req *hsgreq = NULL; | ||
499 | |||
500 | if (list_empty(&tdc->pending_sg_req)) { | ||
501 | dev_err(tdc2dev(tdc), "Dma is running without req\n"); | ||
502 | tegra_dma_stop(tdc); | ||
503 | return false; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Check that head req on list should be in flight. | ||
508 | * If it is not in flight then abort transfer as | ||
509 | * looping of transfer can not continue. | ||
510 | */ | ||
511 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | ||
512 | if (!hsgreq->configured) { | ||
513 | tegra_dma_stop(tdc); | ||
514 | dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); | ||
515 | tegra_dma_abort_all(tdc); | ||
516 | return false; | ||
517 | } | ||
518 | |||
519 | /* Configure next request */ | ||
520 | if (!to_terminate) | ||
521 | tdc_configure_next_head_desc(tdc); | ||
522 | return true; | ||
523 | } | ||
524 | |||
525 | static void handle_once_dma_done(struct tegra_dma_channel *tdc, | ||
526 | bool to_terminate) | ||
527 | { | ||
528 | struct tegra_dma_sg_req *sgreq; | ||
529 | struct tegra_dma_desc *dma_desc; | ||
530 | |||
531 | tdc->busy = false; | ||
532 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | ||
533 | dma_desc = sgreq->dma_desc; | ||
534 | dma_desc->bytes_transferred += sgreq->req_len; | ||
535 | |||
536 | list_del(&sgreq->node); | ||
537 | if (sgreq->last_sg) { | ||
538 | dma_desc->dma_status = DMA_SUCCESS; | ||
539 | dma_cookie_complete(&dma_desc->txd); | ||
540 | if (!dma_desc->cb_count) | ||
541 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | ||
542 | dma_desc->cb_count++; | ||
543 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
544 | } | ||
545 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
546 | |||
547 | /* Do not start DMA if it is going to be terminate */ | ||
548 | if (to_terminate || list_empty(&tdc->pending_sg_req)) | ||
549 | return; | ||
550 | |||
551 | tdc_start_head_req(tdc); | ||
552 | return; | ||
553 | } | ||
554 | |||
555 | static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | ||
556 | bool to_terminate) | ||
557 | { | ||
558 | struct tegra_dma_sg_req *sgreq; | ||
559 | struct tegra_dma_desc *dma_desc; | ||
560 | bool st; | ||
561 | |||
562 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | ||
563 | dma_desc = sgreq->dma_desc; | ||
564 | dma_desc->bytes_transferred += sgreq->req_len; | ||
565 | |||
566 | /* Callback need to be call */ | ||
567 | if (!dma_desc->cb_count) | ||
568 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | ||
569 | dma_desc->cb_count++; | ||
570 | |||
571 | /* If not last req then put at end of pending list */ | ||
572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { | ||
573 | list_del(&sgreq->node); | ||
574 | list_add_tail(&sgreq->node, &tdc->pending_sg_req); | ||
575 | sgreq->configured = false; | ||
576 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); | ||
577 | if (!st) | ||
578 | dma_desc->dma_status = DMA_ERROR; | ||
579 | } | ||
580 | return; | ||
581 | } | ||
582 | |||
583 | static void tegra_dma_tasklet(unsigned long data) | ||
584 | { | ||
585 | struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; | ||
586 | dma_async_tx_callback callback = NULL; | ||
587 | void *callback_param = NULL; | ||
588 | struct tegra_dma_desc *dma_desc; | ||
589 | unsigned long flags; | ||
590 | int cb_count; | ||
591 | |||
592 | spin_lock_irqsave(&tdc->lock, flags); | ||
593 | while (!list_empty(&tdc->cb_desc)) { | ||
594 | dma_desc = list_first_entry(&tdc->cb_desc, | ||
595 | typeof(*dma_desc), cb_node); | ||
596 | list_del(&dma_desc->cb_node); | ||
597 | callback = dma_desc->txd.callback; | ||
598 | callback_param = dma_desc->txd.callback_param; | ||
599 | cb_count = dma_desc->cb_count; | ||
600 | dma_desc->cb_count = 0; | ||
601 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
602 | while (cb_count-- && callback) | ||
603 | callback(callback_param); | ||
604 | spin_lock_irqsave(&tdc->lock, flags); | ||
605 | } | ||
606 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
607 | } | ||
608 | |||
609 | static irqreturn_t tegra_dma_isr(int irq, void *dev_id) | ||
610 | { | ||
611 | struct tegra_dma_channel *tdc = dev_id; | ||
612 | unsigned long status; | ||
613 | unsigned long flags; | ||
614 | |||
615 | spin_lock_irqsave(&tdc->lock, flags); | ||
616 | |||
617 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
618 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
619 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | ||
620 | tdc->isr_handler(tdc, false); | ||
621 | tasklet_schedule(&tdc->tasklet); | ||
622 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
623 | return IRQ_HANDLED; | ||
624 | } | ||
625 | |||
626 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
627 | dev_info(tdc2dev(tdc), | ||
628 | "Interrupt already served status 0x%08lx\n", status); | ||
629 | return IRQ_NONE; | ||
630 | } | ||
631 | |||
632 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
633 | { | ||
634 | struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); | ||
635 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); | ||
636 | unsigned long flags; | ||
637 | dma_cookie_t cookie; | ||
638 | |||
639 | spin_lock_irqsave(&tdc->lock, flags); | ||
640 | dma_desc->dma_status = DMA_IN_PROGRESS; | ||
641 | cookie = dma_cookie_assign(&dma_desc->txd); | ||
642 | list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); | ||
643 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
644 | return cookie; | ||
645 | } | ||
646 | |||
647 | static void tegra_dma_issue_pending(struct dma_chan *dc) | ||
648 | { | ||
649 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
650 | unsigned long flags; | ||
651 | |||
652 | spin_lock_irqsave(&tdc->lock, flags); | ||
653 | if (list_empty(&tdc->pending_sg_req)) { | ||
654 | dev_err(tdc2dev(tdc), "No DMA request\n"); | ||
655 | goto end; | ||
656 | } | ||
657 | if (!tdc->busy) { | ||
658 | tdc_start_head_req(tdc); | ||
659 | |||
660 | /* Continuous single mode: Configure next req */ | ||
661 | if (tdc->cyclic) { | ||
662 | /* | ||
663 | * Wait for 1 burst time for configure DMA for | ||
664 | * next transfer. | ||
665 | */ | ||
666 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
667 | tdc_configure_next_head_desc(tdc); | ||
668 | } | ||
669 | } | ||
670 | end: | ||
671 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
672 | return; | ||
673 | } | ||
674 | |||
675 | static void tegra_dma_terminate_all(struct dma_chan *dc) | ||
676 | { | ||
677 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
678 | struct tegra_dma_sg_req *sgreq; | ||
679 | struct tegra_dma_desc *dma_desc; | ||
680 | unsigned long flags; | ||
681 | unsigned long status; | ||
682 | bool was_busy; | ||
683 | |||
684 | spin_lock_irqsave(&tdc->lock, flags); | ||
685 | if (list_empty(&tdc->pending_sg_req)) { | ||
686 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
687 | return; | ||
688 | } | ||
689 | |||
690 | if (!tdc->busy) | ||
691 | goto skip_dma_stop; | ||
692 | |||
693 | /* Pause DMA before checking the queue status */ | ||
694 | tegra_dma_global_pause(tdc, true); | ||
695 | |||
696 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
697 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
698 | dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); | ||
699 | tdc->isr_handler(tdc, true); | ||
700 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
701 | } | ||
702 | |||
703 | was_busy = tdc->busy; | ||
704 | tegra_dma_stop(tdc); | ||
705 | |||
706 | if (!list_empty(&tdc->pending_sg_req) && was_busy) { | ||
707 | sgreq = list_first_entry(&tdc->pending_sg_req, | ||
708 | typeof(*sgreq), node); | ||
709 | sgreq->dma_desc->bytes_transferred += | ||
710 | get_current_xferred_count(tdc, sgreq, status); | ||
711 | } | ||
712 | tegra_dma_global_resume(tdc); | ||
713 | |||
714 | skip_dma_stop: | ||
715 | tegra_dma_abort_all(tdc); | ||
716 | |||
717 | while (!list_empty(&tdc->cb_desc)) { | ||
718 | dma_desc = list_first_entry(&tdc->cb_desc, | ||
719 | typeof(*dma_desc), cb_node); | ||
720 | list_del(&dma_desc->cb_node); | ||
721 | dma_desc->cb_count = 0; | ||
722 | } | ||
723 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
724 | } | ||
725 | |||
726 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | ||
727 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
728 | { | ||
729 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
730 | struct tegra_dma_desc *dma_desc; | ||
731 | struct tegra_dma_sg_req *sg_req; | ||
732 | enum dma_status ret; | ||
733 | unsigned long flags; | ||
734 | unsigned int residual; | ||
735 | |||
736 | spin_lock_irqsave(&tdc->lock, flags); | ||
737 | |||
738 | ret = dma_cookie_status(dc, cookie, txstate); | ||
739 | if (ret == DMA_SUCCESS) { | ||
740 | dma_set_residue(txstate, 0); | ||
741 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
742 | return ret; | ||
743 | } | ||
744 | |||
745 | /* Check on wait_ack desc status */ | ||
746 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | ||
747 | if (dma_desc->txd.cookie == cookie) { | ||
748 | residual = dma_desc->bytes_requested - | ||
749 | (dma_desc->bytes_transferred % | ||
750 | dma_desc->bytes_requested); | ||
751 | dma_set_residue(txstate, residual); | ||
752 | ret = dma_desc->dma_status; | ||
753 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
754 | return ret; | ||
755 | } | ||
756 | } | ||
757 | |||
758 | /* Check in pending list */ | ||
759 | list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { | ||
760 | dma_desc = sg_req->dma_desc; | ||
761 | if (dma_desc->txd.cookie == cookie) { | ||
762 | residual = dma_desc->bytes_requested - | ||
763 | (dma_desc->bytes_transferred % | ||
764 | dma_desc->bytes_requested); | ||
765 | dma_set_residue(txstate, residual); | ||
766 | ret = dma_desc->dma_status; | ||
767 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
768 | return ret; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); | ||
773 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
774 | return ret; | ||
775 | } | ||
776 | |||
777 | static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, | ||
778 | unsigned long arg) | ||
779 | { | ||
780 | switch (cmd) { | ||
781 | case DMA_SLAVE_CONFIG: | ||
782 | return tegra_dma_slave_config(dc, | ||
783 | (struct dma_slave_config *)arg); | ||
784 | |||
785 | case DMA_TERMINATE_ALL: | ||
786 | tegra_dma_terminate_all(dc); | ||
787 | return 0; | ||
788 | |||
789 | default: | ||
790 | break; | ||
791 | } | ||
792 | |||
793 | return -ENXIO; | ||
794 | } | ||
795 | |||
796 | static inline int get_bus_width(struct tegra_dma_channel *tdc, | ||
797 | enum dma_slave_buswidth slave_bw) | ||
798 | { | ||
799 | switch (slave_bw) { | ||
800 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
801 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; | ||
802 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
803 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; | ||
804 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
805 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; | ||
806 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
807 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; | ||
808 | default: | ||
809 | dev_warn(tdc2dev(tdc), | ||
810 | "slave bw is not supported, using 32bits\n"); | ||
811 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; | ||
812 | } | ||
813 | } | ||
814 | |||
815 | static inline int get_burst_size(struct tegra_dma_channel *tdc, | ||
816 | u32 burst_size, enum dma_slave_buswidth slave_bw, int len) | ||
817 | { | ||
818 | int burst_byte; | ||
819 | int burst_ahb_width; | ||
820 | |||
821 | /* | ||
822 | * burst_size from client is in terms of the bus_width. | ||
823 | * convert them into AHB memory width which is 4 byte. | ||
824 | */ | ||
825 | burst_byte = burst_size * slave_bw; | ||
826 | burst_ahb_width = burst_byte / 4; | ||
827 | |||
828 | /* If burst size is 0 then calculate the burst size based on length */ | ||
829 | if (!burst_ahb_width) { | ||
830 | if (len & 0xF) | ||
831 | return TEGRA_APBDMA_AHBSEQ_BURST_1; | ||
832 | else if ((len >> 4) & 0x1) | ||
833 | return TEGRA_APBDMA_AHBSEQ_BURST_4; | ||
834 | else | ||
835 | return TEGRA_APBDMA_AHBSEQ_BURST_8; | ||
836 | } | ||
837 | if (burst_ahb_width < 4) | ||
838 | return TEGRA_APBDMA_AHBSEQ_BURST_1; | ||
839 | else if (burst_ahb_width < 8) | ||
840 | return TEGRA_APBDMA_AHBSEQ_BURST_4; | ||
841 | else | ||
842 | return TEGRA_APBDMA_AHBSEQ_BURST_8; | ||
843 | } | ||
844 | |||
845 | static int get_transfer_param(struct tegra_dma_channel *tdc, | ||
846 | enum dma_transfer_direction direction, unsigned long *apb_addr, | ||
847 | unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, | ||
848 | enum dma_slave_buswidth *slave_bw) | ||
849 | { | ||
850 | |||
851 | switch (direction) { | ||
852 | case DMA_MEM_TO_DEV: | ||
853 | *apb_addr = tdc->dma_sconfig.dst_addr; | ||
854 | *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); | ||
855 | *burst_size = tdc->dma_sconfig.dst_maxburst; | ||
856 | *slave_bw = tdc->dma_sconfig.dst_addr_width; | ||
857 | *csr = TEGRA_APBDMA_CSR_DIR; | ||
858 | return 0; | ||
859 | |||
860 | case DMA_DEV_TO_MEM: | ||
861 | *apb_addr = tdc->dma_sconfig.src_addr; | ||
862 | *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); | ||
863 | *burst_size = tdc->dma_sconfig.src_maxburst; | ||
864 | *slave_bw = tdc->dma_sconfig.src_addr_width; | ||
865 | *csr = 0; | ||
866 | return 0; | ||
867 | |||
868 | default: | ||
869 | dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); | ||
870 | return -EINVAL; | ||
871 | } | ||
872 | return -EINVAL; | ||
873 | } | ||
874 | |||
875 | static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | ||
876 | struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, | ||
877 | enum dma_transfer_direction direction, unsigned long flags, | ||
878 | void *context) | ||
879 | { | ||
880 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
881 | struct tegra_dma_desc *dma_desc; | ||
882 | unsigned int i; | ||
883 | struct scatterlist *sg; | ||
884 | unsigned long csr, ahb_seq, apb_ptr, apb_seq; | ||
885 | struct list_head req_list; | ||
886 | struct tegra_dma_sg_req *sg_req = NULL; | ||
887 | u32 burst_size; | ||
888 | enum dma_slave_buswidth slave_bw; | ||
889 | int ret; | ||
890 | |||
891 | if (!tdc->config_init) { | ||
892 | dev_err(tdc2dev(tdc), "dma channel is not configured\n"); | ||
893 | return NULL; | ||
894 | } | ||
895 | if (sg_len < 1) { | ||
896 | dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); | ||
897 | return NULL; | ||
898 | } | ||
899 | |||
900 | ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, | ||
901 | &burst_size, &slave_bw); | ||
902 | if (ret < 0) | ||
903 | return NULL; | ||
904 | |||
905 | INIT_LIST_HEAD(&req_list); | ||
906 | |||
907 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; | ||
908 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << | ||
909 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; | ||
910 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; | ||
911 | |||
912 | csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; | ||
913 | csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | ||
914 | if (flags & DMA_PREP_INTERRUPT) | ||
915 | csr |= TEGRA_APBDMA_CSR_IE_EOC; | ||
916 | |||
917 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | ||
918 | |||
919 | dma_desc = tegra_dma_desc_get(tdc); | ||
920 | if (!dma_desc) { | ||
921 | dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); | ||
922 | return NULL; | ||
923 | } | ||
924 | INIT_LIST_HEAD(&dma_desc->tx_list); | ||
925 | INIT_LIST_HEAD(&dma_desc->cb_node); | ||
926 | dma_desc->cb_count = 0; | ||
927 | dma_desc->bytes_requested = 0; | ||
928 | dma_desc->bytes_transferred = 0; | ||
929 | dma_desc->dma_status = DMA_IN_PROGRESS; | ||
930 | |||
931 | /* Make transfer requests */ | ||
932 | for_each_sg(sgl, sg, sg_len, i) { | ||
933 | u32 len, mem; | ||
934 | |||
935 | mem = sg_dma_address(sg); | ||
936 | len = sg_dma_len(sg); | ||
937 | |||
938 | if ((len & 3) || (mem & 3) || | ||
939 | (len > tdc->tdma->chip_data->max_dma_count)) { | ||
940 | dev_err(tdc2dev(tdc), | ||
941 | "Dma length/memory address is not supported\n"); | ||
942 | tegra_dma_desc_put(tdc, dma_desc); | ||
943 | return NULL; | ||
944 | } | ||
945 | |||
946 | sg_req = tegra_dma_sg_req_get(tdc); | ||
947 | if (!sg_req) { | ||
948 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | ||
949 | tegra_dma_desc_put(tdc, dma_desc); | ||
950 | return NULL; | ||
951 | } | ||
952 | |||
953 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); | ||
954 | dma_desc->bytes_requested += len; | ||
955 | |||
956 | sg_req->ch_regs.apb_ptr = apb_ptr; | ||
957 | sg_req->ch_regs.ahb_ptr = mem; | ||
958 | sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); | ||
959 | sg_req->ch_regs.apb_seq = apb_seq; | ||
960 | sg_req->ch_regs.ahb_seq = ahb_seq; | ||
961 | sg_req->configured = false; | ||
962 | sg_req->last_sg = false; | ||
963 | sg_req->dma_desc = dma_desc; | ||
964 | sg_req->req_len = len; | ||
965 | |||
966 | list_add_tail(&sg_req->node, &dma_desc->tx_list); | ||
967 | } | ||
968 | sg_req->last_sg = true; | ||
969 | if (flags & DMA_CTRL_ACK) | ||
970 | dma_desc->txd.flags = DMA_CTRL_ACK; | ||
971 | |||
972 | /* | ||
973 | * Make sure that mode should not be conflicting with currently | ||
974 | * configured mode. | ||
975 | */ | ||
976 | if (!tdc->isr_handler) { | ||
977 | tdc->isr_handler = handle_once_dma_done; | ||
978 | tdc->cyclic = false; | ||
979 | } else { | ||
980 | if (tdc->cyclic) { | ||
981 | dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); | ||
982 | tegra_dma_desc_put(tdc, dma_desc); | ||
983 | return NULL; | ||
984 | } | ||
985 | } | ||
986 | |||
987 | return &dma_desc->txd; | ||
988 | } | ||
989 | |||
990 | struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | ||
991 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | ||
992 | size_t period_len, enum dma_transfer_direction direction, | ||
993 | void *context) | ||
994 | { | ||
995 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
996 | struct tegra_dma_desc *dma_desc = NULL; | ||
997 | struct tegra_dma_sg_req *sg_req = NULL; | ||
998 | unsigned long csr, ahb_seq, apb_ptr, apb_seq; | ||
999 | int len; | ||
1000 | size_t remain_len; | ||
1001 | dma_addr_t mem = buf_addr; | ||
1002 | u32 burst_size; | ||
1003 | enum dma_slave_buswidth slave_bw; | ||
1004 | int ret; | ||
1005 | |||
1006 | if (!buf_len || !period_len) { | ||
1007 | dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); | ||
1008 | return NULL; | ||
1009 | } | ||
1010 | |||
1011 | if (!tdc->config_init) { | ||
1012 | dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); | ||
1013 | return NULL; | ||
1014 | } | ||
1015 | |||
1016 | /* | ||
1017 | * We allow to take more number of requests till DMA is | ||
1018 | * not started. The driver will loop over all requests. | ||
1019 | * Once DMA is started then new requests can be queued only after | ||
1020 | * terminating the DMA. | ||
1021 | */ | ||
1022 | if (tdc->busy) { | ||
1023 | dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); | ||
1024 | return NULL; | ||
1025 | } | ||
1026 | |||
1027 | /* | ||
1028 | * We only support cycle transfer when buf_len is multiple of | ||
1029 | * period_len. | ||
1030 | */ | ||
1031 | if (buf_len % period_len) { | ||
1032 | dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); | ||
1033 | return NULL; | ||
1034 | } | ||
1035 | |||
1036 | len = period_len; | ||
1037 | if ((len & 3) || (buf_addr & 3) || | ||
1038 | (len > tdc->tdma->chip_data->max_dma_count)) { | ||
1039 | dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); | ||
1040 | return NULL; | ||
1041 | } | ||
1042 | |||
1043 | ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, | ||
1044 | &burst_size, &slave_bw); | ||
1045 | if (ret < 0) | ||
1046 | return NULL; | ||
1047 | |||
1048 | |||
1049 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; | ||
1050 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << | ||
1051 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; | ||
1052 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; | ||
1053 | |||
1054 | csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; | ||
1055 | csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | ||
1056 | |||
1057 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | ||
1058 | |||
1059 | dma_desc = tegra_dma_desc_get(tdc); | ||
1060 | if (!dma_desc) { | ||
1061 | dev_err(tdc2dev(tdc), "not enough descriptors available\n"); | ||
1062 | return NULL; | ||
1063 | } | ||
1064 | |||
1065 | INIT_LIST_HEAD(&dma_desc->tx_list); | ||
1066 | INIT_LIST_HEAD(&dma_desc->cb_node); | ||
1067 | dma_desc->cb_count = 0; | ||
1068 | |||
1069 | dma_desc->bytes_transferred = 0; | ||
1070 | dma_desc->bytes_requested = buf_len; | ||
1071 | remain_len = buf_len; | ||
1072 | |||
1073 | /* Split transfer equal to period size */ | ||
1074 | while (remain_len) { | ||
1075 | sg_req = tegra_dma_sg_req_get(tdc); | ||
1076 | if (!sg_req) { | ||
1077 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | ||
1078 | tegra_dma_desc_put(tdc, dma_desc); | ||
1079 | return NULL; | ||
1080 | } | ||
1081 | |||
1082 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); | ||
1083 | sg_req->ch_regs.apb_ptr = apb_ptr; | ||
1084 | sg_req->ch_regs.ahb_ptr = mem; | ||
1085 | sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); | ||
1086 | sg_req->ch_regs.apb_seq = apb_seq; | ||
1087 | sg_req->ch_regs.ahb_seq = ahb_seq; | ||
1088 | sg_req->configured = false; | ||
1089 | sg_req->half_done = false; | ||
1090 | sg_req->last_sg = false; | ||
1091 | sg_req->dma_desc = dma_desc; | ||
1092 | sg_req->req_len = len; | ||
1093 | |||
1094 | list_add_tail(&sg_req->node, &dma_desc->tx_list); | ||
1095 | remain_len -= len; | ||
1096 | mem += len; | ||
1097 | } | ||
1098 | sg_req->last_sg = true; | ||
1099 | dma_desc->txd.flags = 0; | ||
1100 | |||
1101 | /* | ||
1102 | * Make sure that mode should not be conflicting with currently | ||
1103 | * configured mode. | ||
1104 | */ | ||
1105 | if (!tdc->isr_handler) { | ||
1106 | tdc->isr_handler = handle_cont_sngl_cycle_dma_done; | ||
1107 | tdc->cyclic = true; | ||
1108 | } else { | ||
1109 | if (!tdc->cyclic) { | ||
1110 | dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); | ||
1111 | tegra_dma_desc_put(tdc, dma_desc); | ||
1112 | return NULL; | ||
1113 | } | ||
1114 | } | ||
1115 | |||
1116 | return &dma_desc->txd; | ||
1117 | } | ||
1118 | |||
1119 | static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) | ||
1120 | { | ||
1121 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
1122 | |||
1123 | dma_cookie_init(&tdc->dma_chan); | ||
1124 | tdc->config_init = false; | ||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) | ||
1129 | { | ||
1130 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
1131 | |||
1132 | struct tegra_dma_desc *dma_desc; | ||
1133 | struct tegra_dma_sg_req *sg_req; | ||
1134 | struct list_head dma_desc_list; | ||
1135 | struct list_head sg_req_list; | ||
1136 | unsigned long flags; | ||
1137 | |||
1138 | INIT_LIST_HEAD(&dma_desc_list); | ||
1139 | INIT_LIST_HEAD(&sg_req_list); | ||
1140 | |||
1141 | dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); | ||
1142 | |||
1143 | if (tdc->busy) | ||
1144 | tegra_dma_terminate_all(dc); | ||
1145 | |||
1146 | spin_lock_irqsave(&tdc->lock, flags); | ||
1147 | list_splice_init(&tdc->pending_sg_req, &sg_req_list); | ||
1148 | list_splice_init(&tdc->free_sg_req, &sg_req_list); | ||
1149 | list_splice_init(&tdc->free_dma_desc, &dma_desc_list); | ||
1150 | INIT_LIST_HEAD(&tdc->cb_desc); | ||
1151 | tdc->config_init = false; | ||
1152 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
1153 | |||
1154 | while (!list_empty(&dma_desc_list)) { | ||
1155 | dma_desc = list_first_entry(&dma_desc_list, | ||
1156 | typeof(*dma_desc), node); | ||
1157 | list_del(&dma_desc->node); | ||
1158 | kfree(dma_desc); | ||
1159 | } | ||
1160 | |||
1161 | while (!list_empty(&sg_req_list)) { | ||
1162 | sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); | ||
1163 | list_del(&sg_req->node); | ||
1164 | kfree(sg_req); | ||
1165 | } | ||
1166 | } | ||
1167 | |||
1168 | /* Tegra20 specific DMA controller information */ | ||
1169 | static struct tegra_dma_chip_data tegra20_dma_chip_data = { | ||
1170 | .nr_channels = 16, | ||
1171 | .max_dma_count = 1024UL * 64, | ||
1172 | }; | ||
1173 | |||
1174 | #if defined(CONFIG_OF) | ||
1175 | /* Tegra30 specific DMA controller information */ | ||
1176 | static struct tegra_dma_chip_data tegra30_dma_chip_data = { | ||
1177 | .nr_channels = 32, | ||
1178 | .max_dma_count = 1024UL * 64, | ||
1179 | }; | ||
1180 | |||
1181 | static const struct of_device_id tegra_dma_of_match[] __devinitconst = { | ||
1182 | { | ||
1183 | .compatible = "nvidia,tegra30-apbdma", | ||
1184 | .data = &tegra30_dma_chip_data, | ||
1185 | }, { | ||
1186 | .compatible = "nvidia,tegra20-apbdma", | ||
1187 | .data = &tegra20_dma_chip_data, | ||
1188 | }, { | ||
1189 | }, | ||
1190 | }; | ||
1191 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); | ||
1192 | #endif | ||
1193 | |||
1194 | static int __devinit tegra_dma_probe(struct platform_device *pdev) | ||
1195 | { | ||
1196 | struct resource *res; | ||
1197 | struct tegra_dma *tdma; | ||
1198 | int ret; | ||
1199 | int i; | ||
1200 | struct tegra_dma_chip_data *cdata = NULL; | ||
1201 | |||
1202 | if (pdev->dev.of_node) { | ||
1203 | const struct of_device_id *match; | ||
1204 | match = of_match_device(of_match_ptr(tegra_dma_of_match), | ||
1205 | &pdev->dev); | ||
1206 | if (!match) { | ||
1207 | dev_err(&pdev->dev, "Error: No device match found\n"); | ||
1208 | return -ENODEV; | ||
1209 | } | ||
1210 | cdata = match->data; | ||
1211 | } else { | ||
1212 | /* If no device tree then fallback to tegra20 */ | ||
1213 | cdata = &tegra20_dma_chip_data; | ||
1214 | } | ||
1215 | |||
1216 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | ||
1217 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | ||
1218 | if (!tdma) { | ||
1219 | dev_err(&pdev->dev, "Error: memory allocation failed\n"); | ||
1220 | return -ENOMEM; | ||
1221 | } | ||
1222 | |||
1223 | tdma->dev = &pdev->dev; | ||
1224 | tdma->chip_data = cdata; | ||
1225 | platform_set_drvdata(pdev, tdma); | ||
1226 | |||
1227 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1228 | if (!res) { | ||
1229 | dev_err(&pdev->dev, "No mem resource for DMA\n"); | ||
1230 | return -EINVAL; | ||
1231 | } | ||
1232 | |||
1233 | tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res); | ||
1234 | if (!tdma->base_addr) { | ||
1235 | dev_err(&pdev->dev, | ||
1236 | "Cannot request memregion/iomap dma address\n"); | ||
1237 | return -EADDRNOTAVAIL; | ||
1238 | } | ||
1239 | |||
1240 | tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); | ||
1241 | if (IS_ERR(tdma->dma_clk)) { | ||
1242 | dev_err(&pdev->dev, "Error: Missing controller clock\n"); | ||
1243 | return PTR_ERR(tdma->dma_clk); | ||
1244 | } | ||
1245 | |||
1246 | spin_lock_init(&tdma->global_lock); | ||
1247 | |||
1248 | pm_runtime_enable(&pdev->dev); | ||
1249 | if (!pm_runtime_enabled(&pdev->dev)) { | ||
1250 | ret = tegra_dma_runtime_resume(&pdev->dev); | ||
1251 | if (ret) { | ||
1252 | dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", | ||
1253 | ret); | ||
1254 | goto err_pm_disable; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | /* Reset DMA controller */ | ||
1259 | tegra_periph_reset_assert(tdma->dma_clk); | ||
1260 | udelay(2); | ||
1261 | tegra_periph_reset_deassert(tdma->dma_clk); | ||
1262 | |||
1263 | /* Enable global DMA registers */ | ||
1264 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); | ||
1265 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); | ||
1266 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); | ||
1267 | |||
1268 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | ||
1269 | for (i = 0; i < cdata->nr_channels; i++) { | ||
1270 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1271 | char irq_name[30]; | ||
1272 | |||
1273 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + | ||
1274 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; | ||
1275 | |||
1276 | res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | ||
1277 | if (!res) { | ||
1278 | ret = -EINVAL; | ||
1279 | dev_err(&pdev->dev, "No irq resource for chan %d\n", i); | ||
1280 | goto err_irq; | ||
1281 | } | ||
1282 | tdc->irq = res->start; | ||
1283 | snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); | ||
1284 | ret = devm_request_irq(&pdev->dev, tdc->irq, | ||
1285 | tegra_dma_isr, 0, irq_name, tdc); | ||
1286 | if (ret) { | ||
1287 | dev_err(&pdev->dev, | ||
1288 | "request_irq failed with err %d channel %d\n", | ||
1289 | i, ret); | ||
1290 | goto err_irq; | ||
1291 | } | ||
1292 | |||
1293 | tdc->dma_chan.device = &tdma->dma_dev; | ||
1294 | dma_cookie_init(&tdc->dma_chan); | ||
1295 | list_add_tail(&tdc->dma_chan.device_node, | ||
1296 | &tdma->dma_dev.channels); | ||
1297 | tdc->tdma = tdma; | ||
1298 | tdc->id = i; | ||
1299 | |||
1300 | tasklet_init(&tdc->tasklet, tegra_dma_tasklet, | ||
1301 | (unsigned long)tdc); | ||
1302 | spin_lock_init(&tdc->lock); | ||
1303 | |||
1304 | INIT_LIST_HEAD(&tdc->pending_sg_req); | ||
1305 | INIT_LIST_HEAD(&tdc->free_sg_req); | ||
1306 | INIT_LIST_HEAD(&tdc->free_dma_desc); | ||
1307 | INIT_LIST_HEAD(&tdc->cb_desc); | ||
1308 | } | ||
1309 | |||
1310 | dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); | ||
1311 | dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); | ||
1312 | dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); | ||
1313 | |||
1314 | tdma->dma_dev.dev = &pdev->dev; | ||
1315 | tdma->dma_dev.device_alloc_chan_resources = | ||
1316 | tegra_dma_alloc_chan_resources; | ||
1317 | tdma->dma_dev.device_free_chan_resources = | ||
1318 | tegra_dma_free_chan_resources; | ||
1319 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; | ||
1320 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; | ||
1321 | tdma->dma_dev.device_control = tegra_dma_device_control; | ||
1322 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; | ||
1323 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; | ||
1324 | |||
1325 | ret = dma_async_device_register(&tdma->dma_dev); | ||
1326 | if (ret < 0) { | ||
1327 | dev_err(&pdev->dev, | ||
1328 | "Tegra20 APB DMA driver registration failed %d\n", ret); | ||
1329 | goto err_irq; | ||
1330 | } | ||
1331 | |||
1332 | dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", | ||
1333 | cdata->nr_channels); | ||
1334 | return 0; | ||
1335 | |||
1336 | err_irq: | ||
1337 | while (--i >= 0) { | ||
1338 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1339 | tasklet_kill(&tdc->tasklet); | ||
1340 | } | ||
1341 | |||
1342 | err_pm_disable: | ||
1343 | pm_runtime_disable(&pdev->dev); | ||
1344 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1345 | tegra_dma_runtime_suspend(&pdev->dev); | ||
1346 | return ret; | ||
1347 | } | ||
1348 | |||
1349 | static int __devexit tegra_dma_remove(struct platform_device *pdev) | ||
1350 | { | ||
1351 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1352 | int i; | ||
1353 | struct tegra_dma_channel *tdc; | ||
1354 | |||
1355 | dma_async_device_unregister(&tdma->dma_dev); | ||
1356 | |||
1357 | for (i = 0; i < tdma->chip_data->nr_channels; ++i) { | ||
1358 | tdc = &tdma->channels[i]; | ||
1359 | tasklet_kill(&tdc->tasklet); | ||
1360 | } | ||
1361 | |||
1362 | pm_runtime_disable(&pdev->dev); | ||
1363 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1364 | tegra_dma_runtime_suspend(&pdev->dev); | ||
1365 | |||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1369 | static int tegra_dma_runtime_suspend(struct device *dev) | ||
1370 | { | ||
1371 | struct platform_device *pdev = to_platform_device(dev); | ||
1372 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1373 | |||
1374 | clk_disable_unprepare(tdma->dma_clk); | ||
1375 | return 0; | ||
1376 | } | ||
1377 | |||
1378 | static int tegra_dma_runtime_resume(struct device *dev) | ||
1379 | { | ||
1380 | struct platform_device *pdev = to_platform_device(dev); | ||
1381 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1382 | int ret; | ||
1383 | |||
1384 | ret = clk_prepare_enable(tdma->dma_clk); | ||
1385 | if (ret < 0) { | ||
1386 | dev_err(dev, "clk_enable failed: %d\n", ret); | ||
1387 | return ret; | ||
1388 | } | ||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = { | ||
1393 | #ifdef CONFIG_PM_RUNTIME | ||
1394 | .runtime_suspend = tegra_dma_runtime_suspend, | ||
1395 | .runtime_resume = tegra_dma_runtime_resume, | ||
1396 | #endif | ||
1397 | }; | ||
1398 | |||
1399 | static struct platform_driver tegra_dmac_driver = { | ||
1400 | .driver = { | ||
1401 | .name = "tegra-apbdma", | ||
1402 | .owner = THIS_MODULE, | ||
1403 | .pm = &tegra_dma_dev_pm_ops, | ||
1404 | .of_match_table = of_match_ptr(tegra_dma_of_match), | ||
1405 | }, | ||
1406 | .probe = tegra_dma_probe, | ||
1407 | .remove = __devexit_p(tegra_dma_remove), | ||
1408 | }; | ||
1409 | |||
1410 | module_platform_driver(tegra_dmac_driver); | ||
1411 | |||
1412 | MODULE_ALIAS("platform:tegra20-apbdma"); | ||
1413 | MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); | ||
1414 | MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); | ||
1415 | MODULE_LICENSE("GPL v2"); | ||