diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-10 13:28:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-10 13:28:45 -0400 |
commit | 77c32bbbe0d0e963ba5723b8d1f6c42c5d56858b (patch) | |
tree | 2b819d3b7c00173c072fe61cadb0fd82ba51f417 /drivers/dma | |
parent | fad0701eaa091beb8ce5ef2eef04b5e833617368 (diff) | |
parent | 06822788faa21529a9a97965e266d77a596dc921 (diff) |
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
- new Xilixn VDMA driver from Srikanth
- bunch of updates for edma driver by Thomas, Joel and Peter
- fixes and updates on dw, ste_dma, freescale, mpc512x, sudmac etc
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (45 commits)
dmaengine: sh: don't use dynamic static allocation
dmaengine: sh: fix print specifier warnings
dmaengine: sh: make shdma_prep_dma_cyclic static
dmaengine: Kconfig: Update MXS_DMA help text to include MX6Q/MX6DL
of: dma: Grammar s/requests/request/, s/used required/required/
dmaengine: shdma: Enable driver compilation with COMPILE_TEST
dmaengine: rcar-hpbdma: Include linux/err.h
dmaengine: sudmac: Include linux/err.h
dmaengine: sudmac: Keep #include sorted alphabetically
dmaengine: shdmac: Include linux/err.h
dmaengine: shdmac: Keep #include sorted alphabetically
dmaengine: s3c24xx-dma: Add cyclic transfer support
dmaengine: s3c24xx-dma: Process whole SG chain
dmaengine: imx: correct sdmac->status for cyclic dma tx
dmaengine: pch: fix compilation for alpha target
dmaengine: dw: check return code of dma_async_device_register()
dmaengine: dw: fix regression in dw_probe() function
dmaengine: dw: enable clock before access
dma: pch_dma: Fix Kconfig dependencies
dmaengine: mpc512x: add support for peripheral transfers
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 18 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dw/core.c | 49 | ||||
-rw-r--r-- | drivers/dma/dw/pci.c | 12 | ||||
-rw-r--r-- | drivers/dma/dw/platform.c | 18 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 306 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 2 | ||||
-rw-r--r-- | drivers/dma/mmp_pdma.c | 95 | ||||
-rw-r--r-- | drivers/dma/mpc512x_dma.c | 342 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/s3c24xx-dma.c | 113 | ||||
-rw-r--r-- | drivers/dma/sh/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-hpbdma.c | 1 | ||||
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 98 | ||||
-rw-r--r-- | drivers/dma/sh/shdmac.c | 15 | ||||
-rw-r--r-- | drivers/dma/sh/sudmac.c | 7 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 182 | ||||
-rw-r--r-- | drivers/dma/xilinx/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_vdma.c | 1379 |
19 files changed, 2270 insertions, 374 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5c5863842de9..1eca7b9760e6 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -234,7 +234,7 @@ config PL330_DMA | |||
234 | 234 | ||
235 | config PCH_DMA | 235 | config PCH_DMA |
236 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" | 236 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" |
237 | depends on PCI && X86 | 237 | depends on PCI && (X86_32 || COMPILE_TEST) |
238 | select DMA_ENGINE | 238 | select DMA_ENGINE |
239 | help | 239 | help |
240 | Enable support for Intel EG20T PCH DMA engine. | 240 | Enable support for Intel EG20T PCH DMA engine. |
@@ -269,7 +269,7 @@ config MXS_DMA | |||
269 | select DMA_ENGINE | 269 | select DMA_ENGINE |
270 | help | 270 | help |
271 | Support the MXS DMA engine. This engine including APBH-DMA | 271 | Support the MXS DMA engine. This engine including APBH-DMA |
272 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | 272 | and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips. |
273 | 273 | ||
274 | config EP93XX_DMA | 274 | config EP93XX_DMA |
275 | bool "Cirrus Logic EP93xx DMA support" | 275 | bool "Cirrus Logic EP93xx DMA support" |
@@ -361,6 +361,20 @@ config FSL_EDMA | |||
361 | multiplexing capability for DMA request sources(slot). | 361 | multiplexing capability for DMA request sources(slot). |
362 | This module can be found on Freescale Vybrid and LS-1 SoCs. | 362 | This module can be found on Freescale Vybrid and LS-1 SoCs. |
363 | 363 | ||
364 | config XILINX_VDMA | ||
365 | tristate "Xilinx AXI VDMA Engine" | ||
366 | depends on (ARCH_ZYNQ || MICROBLAZE) | ||
367 | select DMA_ENGINE | ||
368 | help | ||
369 | Enable support for Xilinx AXI VDMA Soft IP. | ||
370 | |||
371 | This engine provides high-bandwidth direct memory access | ||
372 | between memory and AXI4-Stream video type target | ||
373 | peripherals including peripherals which support AXI4- | ||
374 | Stream Video Protocol. It has two stream interfaces/ | ||
375 | channels, Memory Mapped to Stream (MM2S) and Stream to | ||
376 | Memory Mapped (S2MM) for the data transfers. | ||
377 | |||
364 | config DMA_ENGINE | 378 | config DMA_ENGINE |
365 | bool | 379 | bool |
366 | 380 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 5150c82c9caf..c779e1eb2db2 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -46,3 +46,4 @@ obj-$(CONFIG_K3_DMA) += k3dma.o | |||
46 | obj-$(CONFIG_MOXART_DMA) += moxart-dma.o | 46 | obj-$(CONFIG_MOXART_DMA) += moxart-dma.o |
47 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o | 47 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o |
48 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o | 48 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o |
49 | obj-y += xilinx/ | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 7a740769c2fa..a27ded53ab4f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1493,6 +1493,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1493 | dw->regs = chip->regs; | 1493 | dw->regs = chip->regs; |
1494 | chip->dw = dw; | 1494 | chip->dw = dw; |
1495 | 1495 | ||
1496 | dw->clk = devm_clk_get(chip->dev, "hclk"); | ||
1497 | if (IS_ERR(dw->clk)) | ||
1498 | return PTR_ERR(dw->clk); | ||
1499 | err = clk_prepare_enable(dw->clk); | ||
1500 | if (err) | ||
1501 | return err; | ||
1502 | |||
1496 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | 1503 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); |
1497 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1504 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; |
1498 | 1505 | ||
@@ -1500,15 +1507,19 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1500 | 1507 | ||
1501 | if (!pdata && autocfg) { | 1508 | if (!pdata && autocfg) { |
1502 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); | 1509 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); |
1503 | if (!pdata) | 1510 | if (!pdata) { |
1504 | return -ENOMEM; | 1511 | err = -ENOMEM; |
1512 | goto err_pdata; | ||
1513 | } | ||
1505 | 1514 | ||
1506 | /* Fill platform data with the default values */ | 1515 | /* Fill platform data with the default values */ |
1507 | pdata->is_private = true; | 1516 | pdata->is_private = true; |
1508 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 1517 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1509 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 1518 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
1510 | } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | 1519 | } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
1511 | return -EINVAL; | 1520 | err = -EINVAL; |
1521 | goto err_pdata; | ||
1522 | } | ||
1512 | 1523 | ||
1513 | if (autocfg) | 1524 | if (autocfg) |
1514 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | 1525 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; |
@@ -1517,13 +1528,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1517 | 1528 | ||
1518 | dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan), | 1529 | dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan), |
1519 | GFP_KERNEL); | 1530 | GFP_KERNEL); |
1520 | if (!dw->chan) | 1531 | if (!dw->chan) { |
1521 | return -ENOMEM; | 1532 | err = -ENOMEM; |
1522 | 1533 | goto err_pdata; | |
1523 | dw->clk = devm_clk_get(chip->dev, "hclk"); | 1534 | } |
1524 | if (IS_ERR(dw->clk)) | ||
1525 | return PTR_ERR(dw->clk); | ||
1526 | clk_prepare_enable(dw->clk); | ||
1527 | 1535 | ||
1528 | /* Get hardware configuration parameters */ | 1536 | /* Get hardware configuration parameters */ |
1529 | if (autocfg) { | 1537 | if (autocfg) { |
@@ -1553,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1553 | sizeof(struct dw_desc), 4, 0); | 1561 | sizeof(struct dw_desc), 4, 0); |
1554 | if (!dw->desc_pool) { | 1562 | if (!dw->desc_pool) { |
1555 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); | 1563 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); |
1556 | return -ENOMEM; | 1564 | err = -ENOMEM; |
1565 | goto err_pdata; | ||
1557 | } | 1566 | } |
1558 | 1567 | ||
1559 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1568 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
@@ -1561,7 +1570,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1561 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, | 1570 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, |
1562 | "dw_dmac", dw); | 1571 | "dw_dmac", dw); |
1563 | if (err) | 1572 | if (err) |
1564 | return err; | 1573 | goto err_pdata; |
1565 | 1574 | ||
1566 | INIT_LIST_HEAD(&dw->dma.channels); | 1575 | INIT_LIST_HEAD(&dw->dma.channels); |
1567 | for (i = 0; i < nr_channels; i++) { | 1576 | for (i = 0; i < nr_channels; i++) { |
@@ -1650,12 +1659,20 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1650 | 1659 | ||
1651 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1660 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1652 | 1661 | ||
1662 | err = dma_async_device_register(&dw->dma); | ||
1663 | if (err) | ||
1664 | goto err_dma_register; | ||
1665 | |||
1653 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", | 1666 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
1654 | nr_channels); | 1667 | nr_channels); |
1655 | 1668 | ||
1656 | dma_async_device_register(&dw->dma); | ||
1657 | |||
1658 | return 0; | 1669 | return 0; |
1670 | |||
1671 | err_dma_register: | ||
1672 | free_irq(chip->irq, dw); | ||
1673 | err_pdata: | ||
1674 | clk_disable_unprepare(dw->clk); | ||
1675 | return err; | ||
1659 | } | 1676 | } |
1660 | EXPORT_SYMBOL_GPL(dw_dma_probe); | 1677 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
1661 | 1678 | ||
@@ -1676,6 +1693,8 @@ int dw_dma_remove(struct dw_dma_chip *chip) | |||
1676 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1693 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1677 | } | 1694 | } |
1678 | 1695 | ||
1696 | clk_disable_unprepare(dw->clk); | ||
1697 | |||
1679 | return 0; | 1698 | return 0; |
1680 | } | 1699 | } |
1681 | EXPORT_SYMBOL_GPL(dw_dma_remove); | 1700 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index fec59f1a77bb..39e30c3c7a9d 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -93,19 +93,13 @@ static int dw_pci_resume_early(struct device *dev) | |||
93 | return dw_dma_resume(chip); | 93 | return dw_dma_resume(chip); |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #else /* !CONFIG_PM_SLEEP */ | 96 | #endif /* CONFIG_PM_SLEEP */ |
97 | |||
98 | #define dw_pci_suspend_late NULL | ||
99 | #define dw_pci_resume_early NULL | ||
100 | |||
101 | #endif /* !CONFIG_PM_SLEEP */ | ||
102 | 97 | ||
103 | static const struct dev_pm_ops dw_pci_dev_pm_ops = { | 98 | static const struct dev_pm_ops dw_pci_dev_pm_ops = { |
104 | .suspend_late = dw_pci_suspend_late, | 99 | SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early) |
105 | .resume_early = dw_pci_resume_early, | ||
106 | }; | 100 | }; |
107 | 101 | ||
108 | static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { | 102 | static const struct pci_device_id dw_pci_id_table[] = { |
109 | /* Medfield */ | 103 | /* Medfield */ |
110 | { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, | 104 | { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, |
111 | { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, | 105 | { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 453822cc4f9d..c5b339af6be5 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -256,7 +256,7 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); | |||
256 | 256 | ||
257 | #ifdef CONFIG_PM_SLEEP | 257 | #ifdef CONFIG_PM_SLEEP |
258 | 258 | ||
259 | static int dw_suspend_noirq(struct device *dev) | 259 | static int dw_suspend_late(struct device *dev) |
260 | { | 260 | { |
261 | struct platform_device *pdev = to_platform_device(dev); | 261 | struct platform_device *pdev = to_platform_device(dev); |
262 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 262 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
@@ -264,7 +264,7 @@ static int dw_suspend_noirq(struct device *dev) | |||
264 | return dw_dma_suspend(chip); | 264 | return dw_dma_suspend(chip); |
265 | } | 265 | } |
266 | 266 | ||
267 | static int dw_resume_noirq(struct device *dev) | 267 | static int dw_resume_early(struct device *dev) |
268 | { | 268 | { |
269 | struct platform_device *pdev = to_platform_device(dev); | 269 | struct platform_device *pdev = to_platform_device(dev); |
270 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 270 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
@@ -272,20 +272,10 @@ static int dw_resume_noirq(struct device *dev) | |||
272 | return dw_dma_resume(chip); | 272 | return dw_dma_resume(chip); |
273 | } | 273 | } |
274 | 274 | ||
275 | #else /* !CONFIG_PM_SLEEP */ | 275 | #endif /* CONFIG_PM_SLEEP */ |
276 | |||
277 | #define dw_suspend_noirq NULL | ||
278 | #define dw_resume_noirq NULL | ||
279 | |||
280 | #endif /* !CONFIG_PM_SLEEP */ | ||
281 | 276 | ||
282 | static const struct dev_pm_ops dw_dev_pm_ops = { | 277 | static const struct dev_pm_ops dw_dev_pm_ops = { |
283 | .suspend_noirq = dw_suspend_noirq, | 278 | SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early) |
284 | .resume_noirq = dw_resume_noirq, | ||
285 | .freeze_noirq = dw_suspend_noirq, | ||
286 | .thaw_noirq = dw_resume_noirq, | ||
287 | .restore_noirq = dw_resume_noirq, | ||
288 | .poweroff_noirq = dw_suspend_noirq, | ||
289 | }; | 279 | }; |
290 | 280 | ||
291 | static struct platform_driver dw_driver = { | 281 | static struct platform_driver dw_driver = { |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index f157c6f76b32..e0fec68aed25 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -61,6 +61,16 @@ static u32 get_sr(struct fsldma_chan *chan) | |||
61 | return DMA_IN(chan, &chan->regs->sr, 32); | 61 | return DMA_IN(chan, &chan->regs->sr, 32); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void set_mr(struct fsldma_chan *chan, u32 val) | ||
65 | { | ||
66 | DMA_OUT(chan, &chan->regs->mr, val, 32); | ||
67 | } | ||
68 | |||
69 | static u32 get_mr(struct fsldma_chan *chan) | ||
70 | { | ||
71 | return DMA_IN(chan, &chan->regs->mr, 32); | ||
72 | } | ||
73 | |||
64 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 74 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) |
65 | { | 75 | { |
66 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 76 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); |
@@ -71,6 +81,11 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan) | |||
71 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | 81 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
72 | } | 82 | } |
73 | 83 | ||
84 | static void set_bcr(struct fsldma_chan *chan, u32 val) | ||
85 | { | ||
86 | DMA_OUT(chan, &chan->regs->bcr, val, 32); | ||
87 | } | ||
88 | |||
74 | static u32 get_bcr(struct fsldma_chan *chan) | 89 | static u32 get_bcr(struct fsldma_chan *chan) |
75 | { | 90 | { |
76 | return DMA_IN(chan, &chan->regs->bcr, 32); | 91 | return DMA_IN(chan, &chan->regs->bcr, 32); |
@@ -135,7 +150,7 @@ static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | |||
135 | static void dma_init(struct fsldma_chan *chan) | 150 | static void dma_init(struct fsldma_chan *chan) |
136 | { | 151 | { |
137 | /* Reset the channel */ | 152 | /* Reset the channel */ |
138 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | 153 | set_mr(chan, 0); |
139 | 154 | ||
140 | switch (chan->feature & FSL_DMA_IP_MASK) { | 155 | switch (chan->feature & FSL_DMA_IP_MASK) { |
141 | case FSL_DMA_IP_85XX: | 156 | case FSL_DMA_IP_85XX: |
@@ -144,16 +159,15 @@ static void dma_init(struct fsldma_chan *chan) | |||
144 | * EOLNIE - End of links interrupt enable | 159 | * EOLNIE - End of links interrupt enable |
145 | * BWC - Bandwidth sharing among channels | 160 | * BWC - Bandwidth sharing among channels |
146 | */ | 161 | */ |
147 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | 162 | set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE |
148 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); | 163 | | FSL_DMA_MR_EOLNIE); |
149 | break; | 164 | break; |
150 | case FSL_DMA_IP_83XX: | 165 | case FSL_DMA_IP_83XX: |
151 | /* Set the channel to below modes: | 166 | /* Set the channel to below modes: |
152 | * EOTIE - End-of-transfer interrupt enable | 167 | * EOTIE - End-of-transfer interrupt enable |
153 | * PRC_RM - PCI read multiple | 168 | * PRC_RM - PCI read multiple |
154 | */ | 169 | */ |
155 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | 170 | set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM); |
156 | | FSL_DMA_MR_PRC_RM, 32); | ||
157 | break; | 171 | break; |
158 | } | 172 | } |
159 | } | 173 | } |
@@ -175,10 +189,10 @@ static void dma_start(struct fsldma_chan *chan) | |||
175 | { | 189 | { |
176 | u32 mode; | 190 | u32 mode; |
177 | 191 | ||
178 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 192 | mode = get_mr(chan); |
179 | 193 | ||
180 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 194 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
181 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); | 195 | set_bcr(chan, 0); |
182 | mode |= FSL_DMA_MR_EMP_EN; | 196 | mode |= FSL_DMA_MR_EMP_EN; |
183 | } else { | 197 | } else { |
184 | mode &= ~FSL_DMA_MR_EMP_EN; | 198 | mode &= ~FSL_DMA_MR_EMP_EN; |
@@ -191,7 +205,7 @@ static void dma_start(struct fsldma_chan *chan) | |||
191 | mode |= FSL_DMA_MR_CS; | 205 | mode |= FSL_DMA_MR_CS; |
192 | } | 206 | } |
193 | 207 | ||
194 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 208 | set_mr(chan, mode); |
195 | } | 209 | } |
196 | 210 | ||
197 | static void dma_halt(struct fsldma_chan *chan) | 211 | static void dma_halt(struct fsldma_chan *chan) |
@@ -200,7 +214,7 @@ static void dma_halt(struct fsldma_chan *chan) | |||
200 | int i; | 214 | int i; |
201 | 215 | ||
202 | /* read the mode register */ | 216 | /* read the mode register */ |
203 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 217 | mode = get_mr(chan); |
204 | 218 | ||
205 | /* | 219 | /* |
206 | * The 85xx controller supports channel abort, which will stop | 220 | * The 85xx controller supports channel abort, which will stop |
@@ -209,14 +223,14 @@ static void dma_halt(struct fsldma_chan *chan) | |||
209 | */ | 223 | */ |
210 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 224 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { |
211 | mode |= FSL_DMA_MR_CA; | 225 | mode |= FSL_DMA_MR_CA; |
212 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 226 | set_mr(chan, mode); |
213 | 227 | ||
214 | mode &= ~FSL_DMA_MR_CA; | 228 | mode &= ~FSL_DMA_MR_CA; |
215 | } | 229 | } |
216 | 230 | ||
217 | /* stop the DMA controller */ | 231 | /* stop the DMA controller */ |
218 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); | 232 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); |
219 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 233 | set_mr(chan, mode); |
220 | 234 | ||
221 | /* wait for the DMA controller to become idle */ | 235 | /* wait for the DMA controller to become idle */ |
222 | for (i = 0; i < 100; i++) { | 236 | for (i = 0; i < 100; i++) { |
@@ -245,7 +259,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) | |||
245 | { | 259 | { |
246 | u32 mode; | 260 | u32 mode; |
247 | 261 | ||
248 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 262 | mode = get_mr(chan); |
249 | 263 | ||
250 | switch (size) { | 264 | switch (size) { |
251 | case 0: | 265 | case 0: |
@@ -259,7 +273,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) | |||
259 | break; | 273 | break; |
260 | } | 274 | } |
261 | 275 | ||
262 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 276 | set_mr(chan, mode); |
263 | } | 277 | } |
264 | 278 | ||
265 | /** | 279 | /** |
@@ -277,7 +291,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) | |||
277 | { | 291 | { |
278 | u32 mode; | 292 | u32 mode; |
279 | 293 | ||
280 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 294 | mode = get_mr(chan); |
281 | 295 | ||
282 | switch (size) { | 296 | switch (size) { |
283 | case 0: | 297 | case 0: |
@@ -291,7 +305,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) | |||
291 | break; | 305 | break; |
292 | } | 306 | } |
293 | 307 | ||
294 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 308 | set_mr(chan, mode); |
295 | } | 309 | } |
296 | 310 | ||
297 | /** | 311 | /** |
@@ -312,10 +326,10 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) | |||
312 | 326 | ||
313 | BUG_ON(size > 1024); | 327 | BUG_ON(size > 1024); |
314 | 328 | ||
315 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 329 | mode = get_mr(chan); |
316 | mode |= (__ilog2(size) << 24) & 0x0f000000; | 330 | mode |= (__ilog2(size) << 24) & 0x0f000000; |
317 | 331 | ||
318 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 332 | set_mr(chan, mode); |
319 | } | 333 | } |
320 | 334 | ||
321 | /** | 335 | /** |
@@ -404,6 +418,19 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
404 | } | 418 | } |
405 | 419 | ||
406 | /** | 420 | /** |
421 | * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool. | ||
422 | * @chan : Freescale DMA channel | ||
423 | * @desc: descriptor to be freed | ||
424 | */ | ||
425 | static void fsl_dma_free_descriptor(struct fsldma_chan *chan, | ||
426 | struct fsl_desc_sw *desc) | ||
427 | { | ||
428 | list_del(&desc->node); | ||
429 | chan_dbg(chan, "LD %p free\n", desc); | ||
430 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
431 | } | ||
432 | |||
433 | /** | ||
407 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | 434 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. |
408 | * @chan : Freescale DMA channel | 435 | * @chan : Freescale DMA channel |
409 | * | 436 | * |
@@ -426,14 +453,107 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) | |||
426 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 453 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
427 | desc->async_tx.phys = pdesc; | 454 | desc->async_tx.phys = pdesc; |
428 | 455 | ||
429 | #ifdef FSL_DMA_LD_DEBUG | ||
430 | chan_dbg(chan, "LD %p allocated\n", desc); | 456 | chan_dbg(chan, "LD %p allocated\n", desc); |
431 | #endif | ||
432 | 457 | ||
433 | return desc; | 458 | return desc; |
434 | } | 459 | } |
435 | 460 | ||
436 | /** | 461 | /** |
462 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | ||
463 | * @chan : Freescale DMA channel | ||
464 | * | ||
465 | * HARDWARE STATE: idle | ||
466 | * LOCKING: must hold chan->desc_lock | ||
467 | */ | ||
468 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | ||
469 | { | ||
470 | struct fsl_desc_sw *desc; | ||
471 | |||
472 | /* | ||
473 | * If the list of pending descriptors is empty, then we | ||
474 | * don't need to do any work at all | ||
475 | */ | ||
476 | if (list_empty(&chan->ld_pending)) { | ||
477 | chan_dbg(chan, "no pending LDs\n"); | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * The DMA controller is not idle, which means that the interrupt | ||
483 | * handler will start any queued transactions when it runs after | ||
484 | * this transaction finishes | ||
485 | */ | ||
486 | if (!chan->idle) { | ||
487 | chan_dbg(chan, "DMA controller still busy\n"); | ||
488 | return; | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * If there are some link descriptors which have not been | ||
493 | * transferred, we need to start the controller | ||
494 | */ | ||
495 | |||
496 | /* | ||
497 | * Move all elements from the queue of pending transactions | ||
498 | * onto the list of running transactions | ||
499 | */ | ||
500 | chan_dbg(chan, "idle, starting controller\n"); | ||
501 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | ||
502 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | ||
503 | |||
504 | /* | ||
505 | * The 85xx DMA controller doesn't clear the channel start bit | ||
506 | * automatically at the end of a transfer. Therefore we must clear | ||
507 | * it in software before starting the transfer. | ||
508 | */ | ||
509 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
510 | u32 mode; | ||
511 | |||
512 | mode = get_mr(chan); | ||
513 | mode &= ~FSL_DMA_MR_CS; | ||
514 | set_mr(chan, mode); | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * Program the descriptor's address into the DMA controller, | ||
519 | * then start the DMA transaction | ||
520 | */ | ||
521 | set_cdar(chan, desc->async_tx.phys); | ||
522 | get_cdar(chan); | ||
523 | |||
524 | dma_start(chan); | ||
525 | chan->idle = false; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * fsldma_cleanup_descriptor - cleanup and free a single link descriptor | ||
530 | * @chan: Freescale DMA channel | ||
531 | * @desc: descriptor to cleanup and free | ||
532 | * | ||
533 | * This function is used on a descriptor which has been executed by the DMA | ||
534 | * controller. It will run any callbacks, submit any dependencies, and then | ||
535 | * free the descriptor. | ||
536 | */ | ||
537 | static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | ||
538 | struct fsl_desc_sw *desc) | ||
539 | { | ||
540 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||
541 | |||
542 | /* Run the link descriptor callback function */ | ||
543 | if (txd->callback) { | ||
544 | chan_dbg(chan, "LD %p callback\n", desc); | ||
545 | txd->callback(txd->callback_param); | ||
546 | } | ||
547 | |||
548 | /* Run any dependencies */ | ||
549 | dma_run_dependencies(txd); | ||
550 | |||
551 | dma_descriptor_unmap(txd); | ||
552 | chan_dbg(chan, "LD %p free\n", desc); | ||
553 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
554 | } | ||
555 | |||
556 | /** | ||
437 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 557 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
438 | * @chan : Freescale DMA channel | 558 | * @chan : Freescale DMA channel |
439 | * | 559 | * |
@@ -477,13 +597,8 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, | |||
477 | { | 597 | { |
478 | struct fsl_desc_sw *desc, *_desc; | 598 | struct fsl_desc_sw *desc, *_desc; |
479 | 599 | ||
480 | list_for_each_entry_safe(desc, _desc, list, node) { | 600 | list_for_each_entry_safe(desc, _desc, list, node) |
481 | list_del(&desc->node); | 601 | fsl_dma_free_descriptor(chan, desc); |
482 | #ifdef FSL_DMA_LD_DEBUG | ||
483 | chan_dbg(chan, "LD %p free\n", desc); | ||
484 | #endif | ||
485 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
486 | } | ||
487 | } | 602 | } |
488 | 603 | ||
489 | static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | 604 | static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, |
@@ -491,13 +606,8 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | |||
491 | { | 606 | { |
492 | struct fsl_desc_sw *desc, *_desc; | 607 | struct fsl_desc_sw *desc, *_desc; |
493 | 608 | ||
494 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { | 609 | list_for_each_entry_safe_reverse(desc, _desc, list, node) |
495 | list_del(&desc->node); | 610 | fsl_dma_free_descriptor(chan, desc); |
496 | #ifdef FSL_DMA_LD_DEBUG | ||
497 | chan_dbg(chan, "LD %p free\n", desc); | ||
498 | #endif | ||
499 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
500 | } | ||
501 | } | 611 | } |
502 | 612 | ||
503 | /** | 613 | /** |
@@ -520,35 +630,6 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | |||
520 | } | 630 | } |
521 | 631 | ||
522 | static struct dma_async_tx_descriptor * | 632 | static struct dma_async_tx_descriptor * |
523 | fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | ||
524 | { | ||
525 | struct fsldma_chan *chan; | ||
526 | struct fsl_desc_sw *new; | ||
527 | |||
528 | if (!dchan) | ||
529 | return NULL; | ||
530 | |||
531 | chan = to_fsl_chan(dchan); | ||
532 | |||
533 | new = fsl_dma_alloc_descriptor(chan); | ||
534 | if (!new) { | ||
535 | chan_err(chan, "%s\n", msg_ld_oom); | ||
536 | return NULL; | ||
537 | } | ||
538 | |||
539 | new->async_tx.cookie = -EBUSY; | ||
540 | new->async_tx.flags = flags; | ||
541 | |||
542 | /* Insert the link descriptor to the LD ring */ | ||
543 | list_add_tail(&new->node, &new->tx_list); | ||
544 | |||
545 | /* Set End-of-link to the last link descriptor of new list */ | ||
546 | set_ld_eol(chan, new); | ||
547 | |||
548 | return &new->async_tx; | ||
549 | } | ||
550 | |||
551 | static struct dma_async_tx_descriptor * | ||
552 | fsl_dma_prep_memcpy(struct dma_chan *dchan, | 633 | fsl_dma_prep_memcpy(struct dma_chan *dchan, |
553 | dma_addr_t dma_dst, dma_addr_t dma_src, | 634 | dma_addr_t dma_dst, dma_addr_t dma_src, |
554 | size_t len, unsigned long flags) | 635 | size_t len, unsigned long flags) |
@@ -817,105 +898,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
817 | } | 898 | } |
818 | 899 | ||
819 | /** | 900 | /** |
820 | * fsldma_cleanup_descriptor - cleanup and free a single link descriptor | ||
821 | * @chan: Freescale DMA channel | ||
822 | * @desc: descriptor to cleanup and free | ||
823 | * | ||
824 | * This function is used on a descriptor which has been executed by the DMA | ||
825 | * controller. It will run any callbacks, submit any dependencies, and then | ||
826 | * free the descriptor. | ||
827 | */ | ||
828 | static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | ||
829 | struct fsl_desc_sw *desc) | ||
830 | { | ||
831 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||
832 | |||
833 | /* Run the link descriptor callback function */ | ||
834 | if (txd->callback) { | ||
835 | #ifdef FSL_DMA_LD_DEBUG | ||
836 | chan_dbg(chan, "LD %p callback\n", desc); | ||
837 | #endif | ||
838 | txd->callback(txd->callback_param); | ||
839 | } | ||
840 | |||
841 | /* Run any dependencies */ | ||
842 | dma_run_dependencies(txd); | ||
843 | |||
844 | dma_descriptor_unmap(txd); | ||
845 | #ifdef FSL_DMA_LD_DEBUG | ||
846 | chan_dbg(chan, "LD %p free\n", desc); | ||
847 | #endif | ||
848 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | ||
853 | * @chan : Freescale DMA channel | ||
854 | * | ||
855 | * HARDWARE STATE: idle | ||
856 | * LOCKING: must hold chan->desc_lock | ||
857 | */ | ||
858 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | ||
859 | { | ||
860 | struct fsl_desc_sw *desc; | ||
861 | |||
862 | /* | ||
863 | * If the list of pending descriptors is empty, then we | ||
864 | * don't need to do any work at all | ||
865 | */ | ||
866 | if (list_empty(&chan->ld_pending)) { | ||
867 | chan_dbg(chan, "no pending LDs\n"); | ||
868 | return; | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * The DMA controller is not idle, which means that the interrupt | ||
873 | * handler will start any queued transactions when it runs after | ||
874 | * this transaction finishes | ||
875 | */ | ||
876 | if (!chan->idle) { | ||
877 | chan_dbg(chan, "DMA controller still busy\n"); | ||
878 | return; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * If there are some link descriptors which have not been | ||
883 | * transferred, we need to start the controller | ||
884 | */ | ||
885 | |||
886 | /* | ||
887 | * Move all elements from the queue of pending transactions | ||
888 | * onto the list of running transactions | ||
889 | */ | ||
890 | chan_dbg(chan, "idle, starting controller\n"); | ||
891 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | ||
892 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | ||
893 | |||
894 | /* | ||
895 | * The 85xx DMA controller doesn't clear the channel start bit | ||
896 | * automatically at the end of a transfer. Therefore we must clear | ||
897 | * it in software before starting the transfer. | ||
898 | */ | ||
899 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
900 | u32 mode; | ||
901 | |||
902 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
903 | mode &= ~FSL_DMA_MR_CS; | ||
904 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * Program the descriptor's address into the DMA controller, | ||
909 | * then start the DMA transaction | ||
910 | */ | ||
911 | set_cdar(chan, desc->async_tx.phys); | ||
912 | get_cdar(chan); | ||
913 | |||
914 | dma_start(chan); | ||
915 | chan->idle = false; | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | 901 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command |
920 | * @chan : Freescale DMA channel | 902 | * @chan : Freescale DMA channel |
921 | */ | 903 | */ |
@@ -1304,12 +1286,10 @@ static int fsldma_of_probe(struct platform_device *op) | |||
1304 | fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); | 1286 | fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); |
1305 | 1287 | ||
1306 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1288 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
1307 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | ||
1308 | dma_cap_set(DMA_SG, fdev->common.cap_mask); | 1289 | dma_cap_set(DMA_SG, fdev->common.cap_mask); |
1309 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1290 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
1310 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1291 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
1311 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1292 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
1312 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | ||
1313 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1293 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
1314 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | 1294 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; |
1315 | fdev->common.device_tx_status = fsl_tx_status; | 1295 | fdev->common.device_tx_status = fsl_tx_status; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 19041cefabb1..128714622bf5 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -607,8 +607,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | |||
607 | 607 | ||
608 | if (bd->mode.status & BD_RROR) | 608 | if (bd->mode.status & BD_RROR) |
609 | sdmac->status = DMA_ERROR; | 609 | sdmac->status = DMA_ERROR; |
610 | else | ||
611 | sdmac->status = DMA_IN_PROGRESS; | ||
612 | 610 | ||
613 | bd->mode.status |= BD_DONE; | 611 | bd->mode.status |= BD_DONE; |
614 | sdmac->buf_tail++; | 612 | sdmac->buf_tail++; |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index bf02e7beb51a..a7b186d536b3 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -29,8 +29,8 @@ | |||
29 | #define DALGN 0x00a0 | 29 | #define DALGN 0x00a0 |
30 | #define DINT 0x00f0 | 30 | #define DINT 0x00f0 |
31 | #define DDADR 0x0200 | 31 | #define DDADR 0x0200 |
32 | #define DSADR 0x0204 | 32 | #define DSADR(n) (0x0204 + ((n) << 4)) |
33 | #define DTADR 0x0208 | 33 | #define DTADR(n) (0x0208 + ((n) << 4)) |
34 | #define DCMD 0x020c | 34 | #define DCMD 0x020c |
35 | 35 | ||
36 | #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ | 36 | #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ |
@@ -277,7 +277,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) | |||
277 | return; | 277 | return; |
278 | 278 | ||
279 | /* clear the channel mapping in DRCMR */ | 279 | /* clear the channel mapping in DRCMR */ |
280 | reg = DRCMR(pchan->phy->vchan->drcmr); | 280 | reg = DRCMR(pchan->drcmr); |
281 | writel(0, pchan->phy->base + reg); | 281 | writel(0, pchan->phy->base + reg); |
282 | 282 | ||
283 | spin_lock_irqsave(&pdev->phy_lock, flags); | 283 | spin_lock_irqsave(&pdev->phy_lock, flags); |
@@ -748,11 +748,92 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
748 | return 0; | 748 | return 0; |
749 | } | 749 | } |
750 | 750 | ||
751 | static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, | ||
752 | dma_cookie_t cookie) | ||
753 | { | ||
754 | struct mmp_pdma_desc_sw *sw; | ||
755 | u32 curr, residue = 0; | ||
756 | bool passed = false; | ||
757 | bool cyclic = chan->cyclic_first != NULL; | ||
758 | |||
759 | /* | ||
760 | * If the channel does not have a phy pointer anymore, it has already | ||
761 | * been completed. Therefore, its residue is 0. | ||
762 | */ | ||
763 | if (!chan->phy) | ||
764 | return 0; | ||
765 | |||
766 | if (chan->dir == DMA_DEV_TO_MEM) | ||
767 | curr = readl(chan->phy->base + DTADR(chan->phy->idx)); | ||
768 | else | ||
769 | curr = readl(chan->phy->base + DSADR(chan->phy->idx)); | ||
770 | |||
771 | list_for_each_entry(sw, &chan->chain_running, node) { | ||
772 | u32 start, end, len; | ||
773 | |||
774 | if (chan->dir == DMA_DEV_TO_MEM) | ||
775 | start = sw->desc.dtadr; | ||
776 | else | ||
777 | start = sw->desc.dsadr; | ||
778 | |||
779 | len = sw->desc.dcmd & DCMD_LENGTH; | ||
780 | end = start + len; | ||
781 | |||
782 | /* | ||
783 | * 'passed' will be latched once we found the descriptor which | ||
784 | * lies inside the boundaries of the curr pointer. All | ||
785 | * descriptors that occur in the list _after_ we found that | ||
786 | * partially handled descriptor are still to be processed and | ||
787 | * are hence added to the residual bytes counter. | ||
788 | */ | ||
789 | |||
790 | if (passed) { | ||
791 | residue += len; | ||
792 | } else if (curr >= start && curr <= end) { | ||
793 | residue += end - curr; | ||
794 | passed = true; | ||
795 | } | ||
796 | |||
797 | /* | ||
798 | * Descriptors that have the ENDIRQEN bit set mark the end of a | ||
799 | * transaction chain, and the cookie assigned with it has been | ||
800 | * returned previously from mmp_pdma_tx_submit(). | ||
801 | * | ||
802 | * In case we have multiple transactions in the running chain, | ||
803 | * and the cookie does not match the one the user asked us | ||
804 | * about, reset the state variables and start over. | ||
805 | * | ||
806 | * This logic does not apply to cyclic transactions, where all | ||
807 | * descriptors have the ENDIRQEN bit set, and for which we | ||
808 | * can't have multiple transactions on one channel anyway. | ||
809 | */ | ||
810 | if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) | ||
811 | continue; | ||
812 | |||
813 | if (sw->async_tx.cookie == cookie) { | ||
814 | return residue; | ||
815 | } else { | ||
816 | residue = 0; | ||
817 | passed = false; | ||
818 | } | ||
819 | } | ||
820 | |||
821 | /* We should only get here in case of cyclic transactions */ | ||
822 | return residue; | ||
823 | } | ||
824 | |||
751 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | 825 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, |
752 | dma_cookie_t cookie, | 826 | dma_cookie_t cookie, |
753 | struct dma_tx_state *txstate) | 827 | struct dma_tx_state *txstate) |
754 | { | 828 | { |
755 | return dma_cookie_status(dchan, cookie, txstate); | 829 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
830 | enum dma_status ret; | ||
831 | |||
832 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
833 | if (likely(ret != DMA_ERROR)) | ||
834 | dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); | ||
835 | |||
836 | return ret; | ||
756 | } | 837 | } |
757 | 838 | ||
758 | /** | 839 | /** |
@@ -858,8 +939,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) | |||
858 | struct mmp_pdma_chan *chan; | 939 | struct mmp_pdma_chan *chan; |
859 | int ret; | 940 | int ret; |
860 | 941 | ||
861 | chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan), | 942 | chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); |
862 | GFP_KERNEL); | ||
863 | if (chan == NULL) | 943 | if (chan == NULL) |
864 | return -ENOMEM; | 944 | return -ENOMEM; |
865 | 945 | ||
@@ -946,8 +1026,7 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
946 | irq_num++; | 1026 | irq_num++; |
947 | } | 1027 | } |
948 | 1028 | ||
949 | pdev->phy = devm_kcalloc(pdev->dev, | 1029 | pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), |
950 | dma_channels, sizeof(struct mmp_pdma_chan), | ||
951 | GFP_KERNEL); | 1030 | GFP_KERNEL); |
952 | if (pdev->phy == NULL) | 1031 | if (pdev->phy == NULL) |
953 | return -ENOMEM; | 1032 | return -ENOMEM; |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 448750da4402..2ad43738ac8b 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | 2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. |
3 | * Copyright (C) Semihalf 2009 | 3 | * Copyright (C) Semihalf 2009 |
4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | 4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
5 | * Copyright (C) Alexander Popov, Promcontroller 2014 | ||
5 | * | 6 | * |
6 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | 7 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
7 | * (defines, structures and comments) was taken from MPC5121 DMA driver | 8 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
@@ -29,8 +30,18 @@ | |||
29 | */ | 30 | */ |
30 | 31 | ||
31 | /* | 32 | /* |
32 | * This is initial version of MPC5121 DMA driver. Only memory to memory | 33 | * MPC512x and MPC8308 DMA driver. It supports |
33 | * transfers are supported (tested using dmatest module). | 34 | * memory to memory data transfers (tested using dmatest module) and |
35 | * data transfers between memory and peripheral I/O memory | ||
36 | * by means of slave scatter/gather with these limitations: | ||
37 | * - chunked transfers (described by s/g lists with more than one item) | ||
38 | * are refused as long as proper support for scatter/gather is missing; | ||
39 | * - transfers on MPC8308 always start from software as this SoC appears | ||
40 | * not to have external request lines for peripheral flow control; | ||
41 | * - only peripheral devices with 4-byte FIFO access register are supported; | ||
42 | * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently | ||
43 | * source and destination addresses must be 4-byte aligned | ||
44 | * and transfer size must be aligned on (4 * maxburst) boundary; | ||
34 | */ | 45 | */ |
35 | 46 | ||
36 | #include <linux/module.h> | 47 | #include <linux/module.h> |
@@ -52,9 +63,17 @@ | |||
52 | #define MPC_DMA_DESCRIPTORS 64 | 63 | #define MPC_DMA_DESCRIPTORS 64 |
53 | 64 | ||
54 | /* Macro definitions */ | 65 | /* Macro definitions */ |
55 | #define MPC_DMA_CHANNELS 64 | ||
56 | #define MPC_DMA_TCD_OFFSET 0x1000 | 66 | #define MPC_DMA_TCD_OFFSET 0x1000 |
57 | 67 | ||
68 | /* | ||
69 | * Maximum channel counts for individual hardware variants | ||
70 | * and the maximum channel count over all supported controllers, | ||
71 | * used for data structure size | ||
72 | */ | ||
73 | #define MPC8308_DMACHAN_MAX 16 | ||
74 | #define MPC512x_DMACHAN_MAX 64 | ||
75 | #define MPC_DMA_CHANNELS 64 | ||
76 | |||
58 | /* Arbitration mode of group and channel */ | 77 | /* Arbitration mode of group and channel */ |
59 | #define MPC_DMA_DMACR_EDCG (1 << 31) | 78 | #define MPC_DMA_DMACR_EDCG (1 << 31) |
60 | #define MPC_DMA_DMACR_ERGA (1 << 3) | 79 | #define MPC_DMA_DMACR_ERGA (1 << 3) |
@@ -181,6 +200,7 @@ struct mpc_dma_desc { | |||
181 | dma_addr_t tcd_paddr; | 200 | dma_addr_t tcd_paddr; |
182 | int error; | 201 | int error; |
183 | struct list_head node; | 202 | struct list_head node; |
203 | int will_access_peripheral; | ||
184 | }; | 204 | }; |
185 | 205 | ||
186 | struct mpc_dma_chan { | 206 | struct mpc_dma_chan { |
@@ -193,6 +213,12 @@ struct mpc_dma_chan { | |||
193 | struct mpc_dma_tcd *tcd; | 213 | struct mpc_dma_tcd *tcd; |
194 | dma_addr_t tcd_paddr; | 214 | dma_addr_t tcd_paddr; |
195 | 215 | ||
216 | /* Settings for access to peripheral FIFO */ | ||
217 | dma_addr_t src_per_paddr; | ||
218 | u32 src_tcd_nunits; | ||
219 | dma_addr_t dst_per_paddr; | ||
220 | u32 dst_tcd_nunits; | ||
221 | |||
196 | /* Lock for this structure */ | 222 | /* Lock for this structure */ |
197 | spinlock_t lock; | 223 | spinlock_t lock; |
198 | }; | 224 | }; |
@@ -243,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan) | |||
243 | struct mpc_dma_desc *mdesc; | 269 | struct mpc_dma_desc *mdesc; |
244 | int cid = mchan->chan.chan_id; | 270 | int cid = mchan->chan.chan_id; |
245 | 271 | ||
246 | /* Move all queued descriptors to active list */ | 272 | while (!list_empty(&mchan->queued)) { |
247 | list_splice_tail_init(&mchan->queued, &mchan->active); | 273 | mdesc = list_first_entry(&mchan->queued, |
274 | struct mpc_dma_desc, node); | ||
275 | /* | ||
276 | * Grab either several mem-to-mem transfer descriptors | ||
277 | * or one peripheral transfer descriptor, | ||
278 | * don't mix mem-to-mem and peripheral transfer descriptors | ||
279 | * within the same 'active' list. | ||
280 | */ | ||
281 | if (mdesc->will_access_peripheral) { | ||
282 | if (list_empty(&mchan->active)) | ||
283 | list_move_tail(&mdesc->node, &mchan->active); | ||
284 | break; | ||
285 | } else { | ||
286 | list_move_tail(&mdesc->node, &mchan->active); | ||
287 | } | ||
288 | } | ||
248 | 289 | ||
249 | /* Chain descriptors into one transaction */ | 290 | /* Chain descriptors into one transaction */ |
250 | list_for_each_entry(mdesc, &mchan->active, node) { | 291 | list_for_each_entry(mdesc, &mchan->active, node) { |
@@ -270,7 +311,17 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan) | |||
270 | 311 | ||
271 | if (first != prev) | 312 | if (first != prev) |
272 | mdma->tcd[cid].e_sg = 1; | 313 | mdma->tcd[cid].e_sg = 1; |
273 | out_8(&mdma->regs->dmassrt, cid); | 314 | |
315 | if (mdma->is_mpc8308) { | ||
316 | /* MPC8308, no request lines, software initiated start */ | ||
317 | out_8(&mdma->regs->dmassrt, cid); | ||
318 | } else if (first->will_access_peripheral) { | ||
319 | /* Peripherals involved, start by external request signal */ | ||
320 | out_8(&mdma->regs->dmaserq, cid); | ||
321 | } else { | ||
322 | /* Memory to memory transfer, software initiated start */ | ||
323 | out_8(&mdma->regs->dmassrt, cid); | ||
324 | } | ||
274 | } | 325 | } |
275 | 326 | ||
276 | /* Handle interrupt on one half of DMA controller (32 channels) */ | 327 | /* Handle interrupt on one half of DMA controller (32 channels) */ |
@@ -588,6 +639,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
588 | } | 639 | } |
589 | 640 | ||
590 | mdesc->error = 0; | 641 | mdesc->error = 0; |
642 | mdesc->will_access_peripheral = 0; | ||
591 | tcd = mdesc->tcd; | 643 | tcd = mdesc->tcd; |
592 | 644 | ||
593 | /* Prepare Transfer Control Descriptor for this transaction */ | 645 | /* Prepare Transfer Control Descriptor for this transaction */ |
@@ -635,6 +687,193 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
635 | return &mdesc->desc; | 687 | return &mdesc->desc; |
636 | } | 688 | } |
637 | 689 | ||
690 | static struct dma_async_tx_descriptor * | ||
691 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
692 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
693 | unsigned long flags, void *context) | ||
694 | { | ||
695 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | ||
696 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | ||
697 | struct mpc_dma_desc *mdesc = NULL; | ||
698 | dma_addr_t per_paddr; | ||
699 | u32 tcd_nunits; | ||
700 | struct mpc_dma_tcd *tcd; | ||
701 | unsigned long iflags; | ||
702 | struct scatterlist *sg; | ||
703 | size_t len; | ||
704 | int iter, i; | ||
705 | |||
706 | /* Currently there is no proper support for scatter/gather */ | ||
707 | if (sg_len != 1) | ||
708 | return NULL; | ||
709 | |||
710 | if (!is_slave_direction(direction)) | ||
711 | return NULL; | ||
712 | |||
713 | for_each_sg(sgl, sg, sg_len, i) { | ||
714 | spin_lock_irqsave(&mchan->lock, iflags); | ||
715 | |||
716 | mdesc = list_first_entry(&mchan->free, | ||
717 | struct mpc_dma_desc, node); | ||
718 | if (!mdesc) { | ||
719 | spin_unlock_irqrestore(&mchan->lock, iflags); | ||
720 | /* Try to free completed descriptors */ | ||
721 | mpc_dma_process_completed(mdma); | ||
722 | return NULL; | ||
723 | } | ||
724 | |||
725 | list_del(&mdesc->node); | ||
726 | |||
727 | if (direction == DMA_DEV_TO_MEM) { | ||
728 | per_paddr = mchan->src_per_paddr; | ||
729 | tcd_nunits = mchan->src_tcd_nunits; | ||
730 | } else { | ||
731 | per_paddr = mchan->dst_per_paddr; | ||
732 | tcd_nunits = mchan->dst_tcd_nunits; | ||
733 | } | ||
734 | |||
735 | spin_unlock_irqrestore(&mchan->lock, iflags); | ||
736 | |||
737 | if (per_paddr == 0 || tcd_nunits == 0) | ||
738 | goto err_prep; | ||
739 | |||
740 | mdesc->error = 0; | ||
741 | mdesc->will_access_peripheral = 1; | ||
742 | |||
743 | /* Prepare Transfer Control Descriptor for this transaction */ | ||
744 | tcd = mdesc->tcd; | ||
745 | |||
746 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); | ||
747 | |||
748 | if (!IS_ALIGNED(sg_dma_address(sg), 4)) | ||
749 | goto err_prep; | ||
750 | |||
751 | if (direction == DMA_DEV_TO_MEM) { | ||
752 | tcd->saddr = per_paddr; | ||
753 | tcd->daddr = sg_dma_address(sg); | ||
754 | tcd->soff = 0; | ||
755 | tcd->doff = 4; | ||
756 | } else { | ||
757 | tcd->saddr = sg_dma_address(sg); | ||
758 | tcd->daddr = per_paddr; | ||
759 | tcd->soff = 4; | ||
760 | tcd->doff = 0; | ||
761 | } | ||
762 | |||
763 | tcd->ssize = MPC_DMA_TSIZE_4; | ||
764 | tcd->dsize = MPC_DMA_TSIZE_4; | ||
765 | |||
766 | len = sg_dma_len(sg); | ||
767 | tcd->nbytes = tcd_nunits * 4; | ||
768 | if (!IS_ALIGNED(len, tcd->nbytes)) | ||
769 | goto err_prep; | ||
770 | |||
771 | iter = len / tcd->nbytes; | ||
772 | if (iter >= 1 << 15) { | ||
773 | /* len is too big */ | ||
774 | goto err_prep; | ||
775 | } | ||
776 | /* citer_linkch contains the high bits of iter */ | ||
777 | tcd->biter = iter & 0x1ff; | ||
778 | tcd->biter_linkch = iter >> 9; | ||
779 | tcd->citer = tcd->biter; | ||
780 | tcd->citer_linkch = tcd->biter_linkch; | ||
781 | |||
782 | tcd->e_sg = 0; | ||
783 | tcd->d_req = 1; | ||
784 | |||
785 | /* Place descriptor in prepared list */ | ||
786 | spin_lock_irqsave(&mchan->lock, iflags); | ||
787 | list_add_tail(&mdesc->node, &mchan->prepared); | ||
788 | spin_unlock_irqrestore(&mchan->lock, iflags); | ||
789 | } | ||
790 | |||
791 | return &mdesc->desc; | ||
792 | |||
793 | err_prep: | ||
794 | /* Put the descriptor back */ | ||
795 | spin_lock_irqsave(&mchan->lock, iflags); | ||
796 | list_add_tail(&mdesc->node, &mchan->free); | ||
797 | spin_unlock_irqrestore(&mchan->lock, iflags); | ||
798 | |||
799 | return NULL; | ||
800 | } | ||
801 | |||
802 | static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
803 | unsigned long arg) | ||
804 | { | ||
805 | struct mpc_dma_chan *mchan; | ||
806 | struct mpc_dma *mdma; | ||
807 | struct dma_slave_config *cfg; | ||
808 | unsigned long flags; | ||
809 | |||
810 | mchan = dma_chan_to_mpc_dma_chan(chan); | ||
811 | switch (cmd) { | ||
812 | case DMA_TERMINATE_ALL: | ||
813 | /* Disable channel requests */ | ||
814 | mdma = dma_chan_to_mpc_dma(chan); | ||
815 | |||
816 | spin_lock_irqsave(&mchan->lock, flags); | ||
817 | |||
818 | out_8(&mdma->regs->dmacerq, chan->chan_id); | ||
819 | list_splice_tail_init(&mchan->prepared, &mchan->free); | ||
820 | list_splice_tail_init(&mchan->queued, &mchan->free); | ||
821 | list_splice_tail_init(&mchan->active, &mchan->free); | ||
822 | |||
823 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | case DMA_SLAVE_CONFIG: | ||
828 | /* | ||
829 | * Software constraints: | ||
830 | * - only transfers between a peripheral device and | ||
831 | * memory are supported; | ||
832 | * - only peripheral devices with 4-byte FIFO access register | ||
833 | * are supported; | ||
834 | * - minimal transfer chunk is 4 bytes and consequently | ||
835 | * source and destination addresses must be 4-byte aligned | ||
836 | * and transfer size must be aligned on (4 * maxburst) | ||
837 | * boundary; | ||
838 | * - during the transfer RAM address is being incremented by | ||
839 | * the size of minimal transfer chunk; | ||
840 | * - peripheral port's address is constant during the transfer. | ||
841 | */ | ||
842 | |||
843 | cfg = (void *)arg; | ||
844 | |||
845 | if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | ||
846 | cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | ||
847 | !IS_ALIGNED(cfg->src_addr, 4) || | ||
848 | !IS_ALIGNED(cfg->dst_addr, 4)) { | ||
849 | return -EINVAL; | ||
850 | } | ||
851 | |||
852 | spin_lock_irqsave(&mchan->lock, flags); | ||
853 | |||
854 | mchan->src_per_paddr = cfg->src_addr; | ||
855 | mchan->src_tcd_nunits = cfg->src_maxburst; | ||
856 | mchan->dst_per_paddr = cfg->dst_addr; | ||
857 | mchan->dst_tcd_nunits = cfg->dst_maxburst; | ||
858 | |||
859 | /* Apply defaults */ | ||
860 | if (mchan->src_tcd_nunits == 0) | ||
861 | mchan->src_tcd_nunits = 1; | ||
862 | if (mchan->dst_tcd_nunits == 0) | ||
863 | mchan->dst_tcd_nunits = 1; | ||
864 | |||
865 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
866 | |||
867 | return 0; | ||
868 | |||
869 | default: | ||
870 | /* Unknown command */ | ||
871 | break; | ||
872 | } | ||
873 | |||
874 | return -ENXIO; | ||
875 | } | ||
876 | |||
638 | static int mpc_dma_probe(struct platform_device *op) | 877 | static int mpc_dma_probe(struct platform_device *op) |
639 | { | 878 | { |
640 | struct device_node *dn = op->dev.of_node; | 879 | struct device_node *dn = op->dev.of_node; |
@@ -649,13 +888,15 @@ static int mpc_dma_probe(struct platform_device *op) | |||
649 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); | 888 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); |
650 | if (!mdma) { | 889 | if (!mdma) { |
651 | dev_err(dev, "Memory exhausted!\n"); | 890 | dev_err(dev, "Memory exhausted!\n"); |
652 | return -ENOMEM; | 891 | retval = -ENOMEM; |
892 | goto err; | ||
653 | } | 893 | } |
654 | 894 | ||
655 | mdma->irq = irq_of_parse_and_map(dn, 0); | 895 | mdma->irq = irq_of_parse_and_map(dn, 0); |
656 | if (mdma->irq == NO_IRQ) { | 896 | if (mdma->irq == NO_IRQ) { |
657 | dev_err(dev, "Error mapping IRQ!\n"); | 897 | dev_err(dev, "Error mapping IRQ!\n"); |
658 | return -EINVAL; | 898 | retval = -EINVAL; |
899 | goto err; | ||
659 | } | 900 | } |
660 | 901 | ||
661 | if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { | 902 | if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { |
@@ -663,14 +904,15 @@ static int mpc_dma_probe(struct platform_device *op) | |||
663 | mdma->irq2 = irq_of_parse_and_map(dn, 1); | 904 | mdma->irq2 = irq_of_parse_and_map(dn, 1); |
664 | if (mdma->irq2 == NO_IRQ) { | 905 | if (mdma->irq2 == NO_IRQ) { |
665 | dev_err(dev, "Error mapping IRQ!\n"); | 906 | dev_err(dev, "Error mapping IRQ!\n"); |
666 | return -EINVAL; | 907 | retval = -EINVAL; |
908 | goto err_dispose1; | ||
667 | } | 909 | } |
668 | } | 910 | } |
669 | 911 | ||
670 | retval = of_address_to_resource(dn, 0, &res); | 912 | retval = of_address_to_resource(dn, 0, &res); |
671 | if (retval) { | 913 | if (retval) { |
672 | dev_err(dev, "Error parsing memory region!\n"); | 914 | dev_err(dev, "Error parsing memory region!\n"); |
673 | return retval; | 915 | goto err_dispose2; |
674 | } | 916 | } |
675 | 917 | ||
676 | regs_start = res.start; | 918 | regs_start = res.start; |
@@ -678,31 +920,34 @@ static int mpc_dma_probe(struct platform_device *op) | |||
678 | 920 | ||
679 | if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { | 921 | if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { |
680 | dev_err(dev, "Error requesting memory region!\n"); | 922 | dev_err(dev, "Error requesting memory region!\n"); |
681 | return -EBUSY; | 923 | retval = -EBUSY; |
924 | goto err_dispose2; | ||
682 | } | 925 | } |
683 | 926 | ||
684 | mdma->regs = devm_ioremap(dev, regs_start, regs_size); | 927 | mdma->regs = devm_ioremap(dev, regs_start, regs_size); |
685 | if (!mdma->regs) { | 928 | if (!mdma->regs) { |
686 | dev_err(dev, "Error mapping memory region!\n"); | 929 | dev_err(dev, "Error mapping memory region!\n"); |
687 | return -ENOMEM; | 930 | retval = -ENOMEM; |
931 | goto err_dispose2; | ||
688 | } | 932 | } |
689 | 933 | ||
690 | mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) | 934 | mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) |
691 | + MPC_DMA_TCD_OFFSET); | 935 | + MPC_DMA_TCD_OFFSET); |
692 | 936 | ||
693 | retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, | 937 | retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma); |
694 | mdma); | ||
695 | if (retval) { | 938 | if (retval) { |
696 | dev_err(dev, "Error requesting IRQ!\n"); | 939 | dev_err(dev, "Error requesting IRQ!\n"); |
697 | return -EINVAL; | 940 | retval = -EINVAL; |
941 | goto err_dispose2; | ||
698 | } | 942 | } |
699 | 943 | ||
700 | if (mdma->is_mpc8308) { | 944 | if (mdma->is_mpc8308) { |
701 | retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0, | 945 | retval = request_irq(mdma->irq2, &mpc_dma_irq, 0, |
702 | DRV_NAME, mdma); | 946 | DRV_NAME, mdma); |
703 | if (retval) { | 947 | if (retval) { |
704 | dev_err(dev, "Error requesting IRQ2!\n"); | 948 | dev_err(dev, "Error requesting IRQ2!\n"); |
705 | return -EINVAL; | 949 | retval = -EINVAL; |
950 | goto err_free1; | ||
706 | } | 951 | } |
707 | } | 952 | } |
708 | 953 | ||
@@ -710,18 +955,21 @@ static int mpc_dma_probe(struct platform_device *op) | |||
710 | 955 | ||
711 | dma = &mdma->dma; | 956 | dma = &mdma->dma; |
712 | dma->dev = dev; | 957 | dma->dev = dev; |
713 | if (!mdma->is_mpc8308) | 958 | if (mdma->is_mpc8308) |
714 | dma->chancnt = MPC_DMA_CHANNELS; | 959 | dma->chancnt = MPC8308_DMACHAN_MAX; |
715 | else | 960 | else |
716 | dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */ | 961 | dma->chancnt = MPC512x_DMACHAN_MAX; |
717 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; | 962 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; |
718 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; | 963 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; |
719 | dma->device_issue_pending = mpc_dma_issue_pending; | 964 | dma->device_issue_pending = mpc_dma_issue_pending; |
720 | dma->device_tx_status = mpc_dma_tx_status; | 965 | dma->device_tx_status = mpc_dma_tx_status; |
721 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; | 966 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; |
967 | dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; | ||
968 | dma->device_control = mpc_dma_device_control; | ||
722 | 969 | ||
723 | INIT_LIST_HEAD(&dma->channels); | 970 | INIT_LIST_HEAD(&dma->channels); |
724 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | 971 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
972 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | ||
725 | 973 | ||
726 | for (i = 0; i < dma->chancnt; i++) { | 974 | for (i = 0; i < dma->chancnt; i++) { |
727 | mchan = &mdma->channels[i]; | 975 | mchan = &mdma->channels[i]; |
@@ -747,7 +995,19 @@ static int mpc_dma_probe(struct platform_device *op) | |||
747 | * - Round-robin group arbitration, | 995 | * - Round-robin group arbitration, |
748 | * - Round-robin channel arbitration. | 996 | * - Round-robin channel arbitration. |
749 | */ | 997 | */ |
750 | if (!mdma->is_mpc8308) { | 998 | if (mdma->is_mpc8308) { |
999 | /* MPC8308 has 16 channels and lacks some registers */ | ||
1000 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); | ||
1001 | |||
1002 | /* enable snooping */ | ||
1003 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); | ||
1004 | /* Disable error interrupts */ | ||
1005 | out_be32(&mdma->regs->dmaeeil, 0); | ||
1006 | |||
1007 | /* Clear interrupts status */ | ||
1008 | out_be32(&mdma->regs->dmaintl, 0xFFFF); | ||
1009 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); | ||
1010 | } else { | ||
751 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | | 1011 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
752 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | 1012 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); |
753 | 1013 | ||
@@ -768,29 +1028,28 @@ static int mpc_dma_probe(struct platform_device *op) | |||
768 | /* Route interrupts to IPIC */ | 1028 | /* Route interrupts to IPIC */ |
769 | out_be32(&mdma->regs->dmaihsa, 0); | 1029 | out_be32(&mdma->regs->dmaihsa, 0); |
770 | out_be32(&mdma->regs->dmailsa, 0); | 1030 | out_be32(&mdma->regs->dmailsa, 0); |
771 | } else { | ||
772 | /* MPC8308 has 16 channels and lacks some registers */ | ||
773 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); | ||
774 | |||
775 | /* enable snooping */ | ||
776 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); | ||
777 | /* Disable error interrupts */ | ||
778 | out_be32(&mdma->regs->dmaeeil, 0); | ||
779 | |||
780 | /* Clear interrupts status */ | ||
781 | out_be32(&mdma->regs->dmaintl, 0xFFFF); | ||
782 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); | ||
783 | } | 1031 | } |
784 | 1032 | ||
785 | /* Register DMA engine */ | 1033 | /* Register DMA engine */ |
786 | dev_set_drvdata(dev, mdma); | 1034 | dev_set_drvdata(dev, mdma); |
787 | retval = dma_async_device_register(dma); | 1035 | retval = dma_async_device_register(dma); |
788 | if (retval) { | 1036 | if (retval) |
789 | devm_free_irq(dev, mdma->irq, mdma); | 1037 | goto err_free2; |
790 | irq_dispose_mapping(mdma->irq); | ||
791 | } | ||
792 | 1038 | ||
793 | return retval; | 1039 | return retval; |
1040 | |||
1041 | err_free2: | ||
1042 | if (mdma->is_mpc8308) | ||
1043 | free_irq(mdma->irq2, mdma); | ||
1044 | err_free1: | ||
1045 | free_irq(mdma->irq, mdma); | ||
1046 | err_dispose2: | ||
1047 | if (mdma->is_mpc8308) | ||
1048 | irq_dispose_mapping(mdma->irq2); | ||
1049 | err_dispose1: | ||
1050 | irq_dispose_mapping(mdma->irq); | ||
1051 | err: | ||
1052 | return retval; | ||
794 | } | 1053 | } |
795 | 1054 | ||
796 | static int mpc_dma_remove(struct platform_device *op) | 1055 | static int mpc_dma_remove(struct platform_device *op) |
@@ -799,7 +1058,11 @@ static int mpc_dma_remove(struct platform_device *op) | |||
799 | struct mpc_dma *mdma = dev_get_drvdata(dev); | 1058 | struct mpc_dma *mdma = dev_get_drvdata(dev); |
800 | 1059 | ||
801 | dma_async_device_unregister(&mdma->dma); | 1060 | dma_async_device_unregister(&mdma->dma); |
802 | devm_free_irq(dev, mdma->irq, mdma); | 1061 | if (mdma->is_mpc8308) { |
1062 | free_irq(mdma->irq2, mdma); | ||
1063 | irq_dispose_mapping(mdma->irq2); | ||
1064 | } | ||
1065 | free_irq(mdma->irq, mdma); | ||
803 | irq_dispose_mapping(mdma->irq); | 1066 | irq_dispose_mapping(mdma->irq); |
804 | 1067 | ||
805 | return 0; | 1068 | return 0; |
@@ -807,6 +1070,7 @@ static int mpc_dma_remove(struct platform_device *op) | |||
807 | 1070 | ||
808 | static struct of_device_id mpc_dma_match[] = { | 1071 | static struct of_device_id mpc_dma_match[] = { |
809 | { .compatible = "fsl,mpc5121-dma", }, | 1072 | { .compatible = "fsl,mpc5121-dma", }, |
1073 | { .compatible = "fsl,mpc8308-dma", }, | ||
810 | {}, | 1074 | {}, |
811 | }; | 1075 | }; |
812 | 1076 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 05fa548bd659..9f9ca9fe5ce6 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/dma-mapping.h> | 21 | #include <linux/dma-mapping.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | #include <linux/pch_dma.h> | 27 | #include <linux/pch_dma.h> |
@@ -996,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev) | |||
996 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 | 997 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 |
997 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 | 998 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 |
998 | 999 | ||
999 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | 1000 | const struct pci_device_id pch_dma_id_table[] = { |
1000 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | 1001 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
1001 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, | 1002 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
1002 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | 1003 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index b209a0f17344..012520c9fd79 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -164,6 +164,7 @@ struct s3c24xx_sg { | |||
164 | * @disrcc: value for source control register | 164 | * @disrcc: value for source control register |
165 | * @didstc: value for destination control register | 165 | * @didstc: value for destination control register |
166 | * @dcon: base value for dcon register | 166 | * @dcon: base value for dcon register |
167 | * @cyclic: indicate cyclic transfer | ||
167 | */ | 168 | */ |
168 | struct s3c24xx_txd { | 169 | struct s3c24xx_txd { |
169 | struct virt_dma_desc vd; | 170 | struct virt_dma_desc vd; |
@@ -173,6 +174,7 @@ struct s3c24xx_txd { | |||
173 | u32 disrcc; | 174 | u32 disrcc; |
174 | u32 didstc; | 175 | u32 didstc; |
175 | u32 dcon; | 176 | u32 dcon; |
177 | bool cyclic; | ||
176 | }; | 178 | }; |
177 | 179 | ||
178 | struct s3c24xx_dma_chan; | 180 | struct s3c24xx_dma_chan; |
@@ -669,8 +671,10 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data) | |||
669 | /* when more sg's are in this txd, start the next one */ | 671 | /* when more sg's are in this txd, start the next one */ |
670 | if (!list_is_last(txd->at, &txd->dsg_list)) { | 672 | if (!list_is_last(txd->at, &txd->dsg_list)) { |
671 | txd->at = txd->at->next; | 673 | txd->at = txd->at->next; |
674 | if (txd->cyclic) | ||
675 | vchan_cyclic_callback(&txd->vd); | ||
672 | s3c24xx_dma_start_next_sg(s3cchan, txd); | 676 | s3c24xx_dma_start_next_sg(s3cchan, txd); |
673 | } else { | 677 | } else if (!txd->cyclic) { |
674 | s3cchan->at = NULL; | 678 | s3cchan->at = NULL; |
675 | vchan_cookie_complete(&txd->vd); | 679 | vchan_cookie_complete(&txd->vd); |
676 | 680 | ||
@@ -682,6 +686,12 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data) | |||
682 | s3c24xx_dma_start_next_txd(s3cchan); | 686 | s3c24xx_dma_start_next_txd(s3cchan); |
683 | else | 687 | else |
684 | s3c24xx_dma_phy_free(s3cchan); | 688 | s3c24xx_dma_phy_free(s3cchan); |
689 | } else { | ||
690 | vchan_cyclic_callback(&txd->vd); | ||
691 | |||
692 | /* Cyclic: reset at beginning */ | ||
693 | txd->at = txd->dsg_list.next; | ||
694 | s3c24xx_dma_start_next_sg(s3cchan, txd); | ||
685 | } | 695 | } |
686 | } | 696 | } |
687 | spin_unlock(&s3cchan->vc.lock); | 697 | spin_unlock(&s3cchan->vc.lock); |
@@ -877,6 +887,104 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( | |||
877 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | 887 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); |
878 | } | 888 | } |
879 | 889 | ||
890 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( | ||
891 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, | ||
892 | enum dma_transfer_direction direction, unsigned long flags, | ||
893 | void *context) | ||
894 | { | ||
895 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
896 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
897 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
898 | struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; | ||
899 | struct s3c24xx_txd *txd; | ||
900 | struct s3c24xx_sg *dsg; | ||
901 | unsigned sg_len; | ||
902 | dma_addr_t slave_addr; | ||
903 | u32 hwcfg = 0; | ||
904 | int i; | ||
905 | |||
906 | dev_dbg(&s3cdma->pdev->dev, | ||
907 | "prepare cyclic transaction of %zu bytes with period %zu from %s\n", | ||
908 | size, period, s3cchan->name); | ||
909 | |||
910 | if (!is_slave_direction(direction)) { | ||
911 | dev_err(&s3cdma->pdev->dev, | ||
912 | "direction %d unsupported\n", direction); | ||
913 | return NULL; | ||
914 | } | ||
915 | |||
916 | txd = s3c24xx_dma_get_txd(); | ||
917 | if (!txd) | ||
918 | return NULL; | ||
919 | |||
920 | txd->cyclic = 1; | ||
921 | |||
922 | if (cdata->handshake) | ||
923 | txd->dcon |= S3C24XX_DCON_HANDSHAKE; | ||
924 | |||
925 | switch (cdata->bus) { | ||
926 | case S3C24XX_DMA_APB: | ||
927 | txd->dcon |= S3C24XX_DCON_SYNC_PCLK; | ||
928 | hwcfg |= S3C24XX_DISRCC_LOC_APB; | ||
929 | break; | ||
930 | case S3C24XX_DMA_AHB: | ||
931 | txd->dcon |= S3C24XX_DCON_SYNC_HCLK; | ||
932 | hwcfg |= S3C24XX_DISRCC_LOC_AHB; | ||
933 | break; | ||
934 | } | ||
935 | |||
936 | /* | ||
937 | * Always assume our peripheral desintation is a fixed | ||
938 | * address in memory. | ||
939 | */ | ||
940 | hwcfg |= S3C24XX_DISRCC_INC_FIXED; | ||
941 | |||
942 | /* | ||
943 | * Individual dma operations are requested by the slave, | ||
944 | * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). | ||
945 | */ | ||
946 | txd->dcon |= S3C24XX_DCON_SERV_SINGLE; | ||
947 | |||
948 | if (direction == DMA_MEM_TO_DEV) { | ||
949 | txd->disrcc = S3C24XX_DISRCC_LOC_AHB | | ||
950 | S3C24XX_DISRCC_INC_INCREMENT; | ||
951 | txd->didstc = hwcfg; | ||
952 | slave_addr = s3cchan->cfg.dst_addr; | ||
953 | txd->width = s3cchan->cfg.dst_addr_width; | ||
954 | } else { | ||
955 | txd->disrcc = hwcfg; | ||
956 | txd->didstc = S3C24XX_DIDSTC_LOC_AHB | | ||
957 | S3C24XX_DIDSTC_INC_INCREMENT; | ||
958 | slave_addr = s3cchan->cfg.src_addr; | ||
959 | txd->width = s3cchan->cfg.src_addr_width; | ||
960 | } | ||
961 | |||
962 | sg_len = size / period; | ||
963 | |||
964 | for (i = 0; i < sg_len; i++) { | ||
965 | dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); | ||
966 | if (!dsg) { | ||
967 | s3c24xx_dma_free_txd(txd); | ||
968 | return NULL; | ||
969 | } | ||
970 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
971 | |||
972 | dsg->len = period; | ||
973 | /* Check last period length */ | ||
974 | if (i == sg_len - 1) | ||
975 | dsg->len = size - period * i; | ||
976 | if (direction == DMA_MEM_TO_DEV) { | ||
977 | dsg->src_addr = addr + period * i; | ||
978 | dsg->dst_addr = slave_addr; | ||
979 | } else { /* DMA_DEV_TO_MEM */ | ||
980 | dsg->src_addr = slave_addr; | ||
981 | dsg->dst_addr = addr + period * i; | ||
982 | } | ||
983 | } | ||
984 | |||
985 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | ||
986 | } | ||
987 | |||
880 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( | 988 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( |
881 | struct dma_chan *chan, struct scatterlist *sgl, | 989 | struct dma_chan *chan, struct scatterlist *sgl, |
882 | unsigned int sg_len, enum dma_transfer_direction direction, | 990 | unsigned int sg_len, enum dma_transfer_direction direction, |
@@ -961,7 +1069,6 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( | |||
961 | dsg->src_addr = slave_addr; | 1069 | dsg->src_addr = slave_addr; |
962 | dsg->dst_addr = sg_dma_address(sg); | 1070 | dsg->dst_addr = sg_dma_address(sg); |
963 | } | 1071 | } |
964 | break; | ||
965 | } | 1072 | } |
966 | 1073 | ||
967 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | 1074 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); |
@@ -1198,6 +1305,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
1198 | 1305 | ||
1199 | /* Initialize slave engine for SoC internal dedicated peripherals */ | 1306 | /* Initialize slave engine for SoC internal dedicated peripherals */ |
1200 | dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); | 1307 | dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); |
1308 | dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); | ||
1201 | dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); | 1309 | dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); |
1202 | s3cdma->slave.dev = &pdev->dev; | 1310 | s3cdma->slave.dev = &pdev->dev; |
1203 | s3cdma->slave.device_alloc_chan_resources = | 1311 | s3cdma->slave.device_alloc_chan_resources = |
@@ -1207,6 +1315,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
1207 | s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; | 1315 | s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; |
1208 | s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; | 1316 | s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; |
1209 | s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; | 1317 | s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; |
1318 | s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; | ||
1210 | s3cdma->slave.device_control = s3c24xx_dma_control; | 1319 | s3cdma->slave.device_control = s3c24xx_dma_control; |
1211 | 1320 | ||
1212 | /* Register as many memcpy channels as there are physical channels */ | 1321 | /* Register as many memcpy channels as there are physical channels */ |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index b4c813831006..0f719816c91b 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | config SH_DMAE_BASE | 5 | config SH_DMAE_BASE |
6 | bool "Renesas SuperH DMA Engine support" | 6 | bool "Renesas SuperH DMA Engine support" |
7 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) | 7 | depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST |
8 | depends on !SH_DMA_API | 8 | depends on !SH_DMA_API |
9 | default y | 9 | default y |
10 | select DMA_ENGINE | 10 | select DMA_ENGINE |
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 3083d901a414..b212d9471ab5 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/dmaengine.h> | 19 | #include <linux/dmaengine.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/err.h> | ||
21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 52396771acbe..b35007e21e6b 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -73,8 +73,7 @@ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) | |||
73 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | 73 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) |
74 | { | 74 | { |
75 | struct shdma_desc *chunk, *c, *desc = | 75 | struct shdma_desc *chunk, *c, *desc = |
76 | container_of(tx, struct shdma_desc, async_tx), | 76 | container_of(tx, struct shdma_desc, async_tx); |
77 | *last = desc; | ||
78 | struct shdma_chan *schan = to_shdma_chan(tx->chan); | 77 | struct shdma_chan *schan = to_shdma_chan(tx->chan); |
79 | dma_async_tx_callback callback = tx->callback; | 78 | dma_async_tx_callback callback = tx->callback; |
80 | dma_cookie_t cookie; | 79 | dma_cookie_t cookie; |
@@ -98,19 +97,20 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
98 | &chunk->node == &schan->ld_free)) | 97 | &chunk->node == &schan->ld_free)) |
99 | break; | 98 | break; |
100 | chunk->mark = DESC_SUBMITTED; | 99 | chunk->mark = DESC_SUBMITTED; |
101 | /* Callback goes to the last chunk */ | 100 | if (chunk->chunks == 1) { |
102 | chunk->async_tx.callback = NULL; | 101 | chunk->async_tx.callback = callback; |
102 | chunk->async_tx.callback_param = tx->callback_param; | ||
103 | } else { | ||
104 | /* Callback goes to the last chunk */ | ||
105 | chunk->async_tx.callback = NULL; | ||
106 | } | ||
103 | chunk->cookie = cookie; | 107 | chunk->cookie = cookie; |
104 | list_move_tail(&chunk->node, &schan->ld_queue); | 108 | list_move_tail(&chunk->node, &schan->ld_queue); |
105 | last = chunk; | ||
106 | 109 | ||
107 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", | 110 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", |
108 | tx->cookie, &last->async_tx, schan->id); | 111 | tx->cookie, &chunk->async_tx, schan->id); |
109 | } | 112 | } |
110 | 113 | ||
111 | last->async_tx.callback = callback; | ||
112 | last->async_tx.callback_param = tx->callback_param; | ||
113 | |||
114 | if (power_up) { | 114 | if (power_up) { |
115 | int ret; | 115 | int ret; |
116 | schan->pm_state = SHDMA_PM_BUSY; | 116 | schan->pm_state = SHDMA_PM_BUSY; |
@@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |||
304 | dma_async_tx_callback callback = NULL; | 304 | dma_async_tx_callback callback = NULL; |
305 | void *param = NULL; | 305 | void *param = NULL; |
306 | unsigned long flags; | 306 | unsigned long flags; |
307 | LIST_HEAD(cyclic_list); | ||
307 | 308 | ||
308 | spin_lock_irqsave(&schan->chan_lock, flags); | 309 | spin_lock_irqsave(&schan->chan_lock, flags); |
309 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | 310 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { |
@@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |||
369 | if (((desc->mark == DESC_COMPLETED || | 370 | if (((desc->mark == DESC_COMPLETED || |
370 | desc->mark == DESC_WAITING) && | 371 | desc->mark == DESC_WAITING) && |
371 | async_tx_test_ack(&desc->async_tx)) || all) { | 372 | async_tx_test_ack(&desc->async_tx)) || all) { |
372 | /* Remove from ld_queue list */ | ||
373 | desc->mark = DESC_IDLE; | ||
374 | 373 | ||
375 | list_move(&desc->node, &schan->ld_free); | 374 | if (all || !desc->cyclic) { |
375 | /* Remove from ld_queue list */ | ||
376 | desc->mark = DESC_IDLE; | ||
377 | list_move(&desc->node, &schan->ld_free); | ||
378 | } else { | ||
379 | /* reuse as cyclic */ | ||
380 | desc->mark = DESC_SUBMITTED; | ||
381 | list_move_tail(&desc->node, &cyclic_list); | ||
382 | } | ||
376 | 383 | ||
377 | if (list_empty(&schan->ld_queue)) { | 384 | if (list_empty(&schan->ld_queue)) { |
378 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | 385 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); |
@@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |||
389 | */ | 396 | */ |
390 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; | 397 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; |
391 | 398 | ||
399 | list_splice_tail(&cyclic_list, &schan->ld_queue); | ||
400 | |||
392 | spin_unlock_irqrestore(&schan->chan_lock, flags); | 401 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
393 | 402 | ||
394 | if (callback) | 403 | if (callback) |
@@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | |||
521 | */ | 530 | */ |
522 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | 531 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, |
523 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | 532 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
524 | enum dma_transfer_direction direction, unsigned long flags) | 533 | enum dma_transfer_direction direction, unsigned long flags, bool cyclic) |
525 | { | 534 | { |
526 | struct scatterlist *sg; | 535 | struct scatterlist *sg; |
527 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; | 536 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; |
@@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | |||
569 | if (!new) | 578 | if (!new) |
570 | goto err_get_desc; | 579 | goto err_get_desc; |
571 | 580 | ||
572 | new->chunks = chunks--; | 581 | new->cyclic = cyclic; |
582 | if (cyclic) | ||
583 | new->chunks = 1; | ||
584 | else | ||
585 | new->chunks = chunks--; | ||
573 | list_add_tail(&new->node, &tx_list); | 586 | list_add_tail(&new->node, &tx_list); |
574 | } while (len); | 587 | } while (len); |
575 | } | 588 | } |
@@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy( | |||
612 | sg_dma_address(&sg) = dma_src; | 625 | sg_dma_address(&sg) = dma_src; |
613 | sg_dma_len(&sg) = len; | 626 | sg_dma_len(&sg) = len; |
614 | 627 | ||
615 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); | 628 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, |
629 | flags, false); | ||
616 | } | 630 | } |
617 | 631 | ||
618 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | 632 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( |
@@ -640,7 +654,58 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | |||
640 | slave_addr = ops->slave_addr(schan); | 654 | slave_addr = ops->slave_addr(schan); |
641 | 655 | ||
642 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | 656 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, |
643 | direction, flags); | 657 | direction, flags, false); |
658 | } | ||
659 | |||
660 | #define SHDMA_MAX_SG_LEN 32 | ||
661 | |||
662 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( | ||
663 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
664 | size_t period_len, enum dma_transfer_direction direction, | ||
665 | unsigned long flags, void *context) | ||
666 | { | ||
667 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
668 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
669 | const struct shdma_ops *ops = sdev->ops; | ||
670 | unsigned int sg_len = buf_len / period_len; | ||
671 | int slave_id = schan->slave_id; | ||
672 | dma_addr_t slave_addr; | ||
673 | struct scatterlist sgl[SHDMA_MAX_SG_LEN]; | ||
674 | int i; | ||
675 | |||
676 | if (!chan) | ||
677 | return NULL; | ||
678 | |||
679 | BUG_ON(!schan->desc_num); | ||
680 | |||
681 | if (sg_len > SHDMA_MAX_SG_LEN) { | ||
682 | dev_err(schan->dev, "sg length %d exceds limit %d", | ||
683 | sg_len, SHDMA_MAX_SG_LEN); | ||
684 | return NULL; | ||
685 | } | ||
686 | |||
687 | /* Someone calling slave DMA on a generic channel? */ | ||
688 | if (slave_id < 0 || (buf_len < period_len)) { | ||
689 | dev_warn(schan->dev, | ||
690 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", | ||
691 | __func__, buf_len, period_len, slave_id); | ||
692 | return NULL; | ||
693 | } | ||
694 | |||
695 | slave_addr = ops->slave_addr(schan); | ||
696 | |||
697 | sg_init_table(sgl, sg_len); | ||
698 | for (i = 0; i < sg_len; i++) { | ||
699 | dma_addr_t src = buf_addr + (period_len * i); | ||
700 | |||
701 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, | ||
702 | offset_in_page(src)); | ||
703 | sg_dma_address(&sgl[i]) = src; | ||
704 | sg_dma_len(&sgl[i]) = period_len; | ||
705 | } | ||
706 | |||
707 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | ||
708 | direction, flags, true); | ||
644 | } | 709 | } |
645 | 710 | ||
646 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 711 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -915,6 +980,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, | |||
915 | 980 | ||
916 | /* Compulsory for DMA_SLAVE fields */ | 981 | /* Compulsory for DMA_SLAVE fields */ |
917 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | 982 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; |
983 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; | ||
918 | dma_dev->device_control = shdma_control; | 984 | dma_dev->device_control = shdma_control; |
919 | 985 | ||
920 | dma_dev->dev = dev; | 986 | dma_dev->dev = dev; |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index dda7e7563f5d..146d5df926db 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -18,21 +18,22 @@ | |||
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/dmaengine.h> | ||
23 | #include <linux/err.h> | ||
21 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/kdebug.h> | ||
22 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/notifier.h> | ||
23 | #include <linux/of.h> | 29 | #include <linux/of.h> |
24 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
25 | #include <linux/slab.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/dmaengine.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
30 | #include <linux/pm_runtime.h> | 32 | #include <linux/pm_runtime.h> |
33 | #include <linux/rculist.h> | ||
31 | #include <linux/sh_dma.h> | 34 | #include <linux/sh_dma.h> |
32 | #include <linux/notifier.h> | 35 | #include <linux/slab.h> |
33 | #include <linux/kdebug.h> | ||
34 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
35 | #include <linux/rculist.h> | ||
36 | 37 | ||
37 | #include "../dmaengine.h" | 38 | #include "../dmaengine.h" |
38 | #include "shdma.h" | 39 | #include "shdma.h" |
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index 4e7df43b50d6..3ce103909896 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c | |||
@@ -14,12 +14,13 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/dmaengine.h> | ||
18 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | 19 | #include <linux/init.h> |
18 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/dmaengine.h> | 21 | #include <linux/module.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/sudmac.h> | 24 | #include <linux/sudmac.h> |
24 | 25 | ||
25 | struct sudmac_chan { | 26 | struct sudmac_chan { |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index bf18c786ed40..c7984459ede7 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -556,7 +556,6 @@ struct d40_gen_dmac { | |||
556 | * later | 556 | * later |
557 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | 557 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. |
558 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | 558 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. |
559 | * @initialized: true if the dma has been initialized | ||
560 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 | 559 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 |
561 | * DMA controller | 560 | * DMA controller |
562 | */ | 561 | */ |
@@ -594,7 +593,6 @@ struct d40_base { | |||
594 | u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; | 593 | u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; |
595 | u32 *reg_val_backup_chan; | 594 | u32 *reg_val_backup_chan; |
596 | u16 gcc_pwr_off_mask; | 595 | u16 gcc_pwr_off_mask; |
597 | bool initialized; | ||
598 | struct d40_gen_dmac gen_dmac; | 596 | struct d40_gen_dmac gen_dmac; |
599 | }; | 597 | }; |
600 | 598 | ||
@@ -1056,62 +1054,6 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
1056 | return len; | 1054 | return len; |
1057 | } | 1055 | } |
1058 | 1056 | ||
1059 | |||
1060 | #ifdef CONFIG_PM | ||
1061 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | ||
1062 | u32 *regaddr, int num, bool save) | ||
1063 | { | ||
1064 | int i; | ||
1065 | |||
1066 | for (i = 0; i < num; i++) { | ||
1067 | void __iomem *addr = baseaddr + regaddr[i]; | ||
1068 | |||
1069 | if (save) | ||
1070 | backup[i] = readl_relaxed(addr); | ||
1071 | else | ||
1072 | writel_relaxed(backup[i], addr); | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
1077 | { | ||
1078 | int i; | ||
1079 | |||
1080 | /* Save/Restore channel specific registers */ | ||
1081 | for (i = 0; i < base->num_phy_chans; i++) { | ||
1082 | void __iomem *addr; | ||
1083 | int idx; | ||
1084 | |||
1085 | if (base->phy_res[i].reserved) | ||
1086 | continue; | ||
1087 | |||
1088 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
1089 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
1090 | |||
1091 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
1092 | d40_backup_regs_chan, | ||
1093 | ARRAY_SIZE(d40_backup_regs_chan), | ||
1094 | save); | ||
1095 | } | ||
1096 | |||
1097 | /* Save/Restore global registers */ | ||
1098 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
1099 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
1100 | save); | ||
1101 | |||
1102 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
1103 | if (base->gen_dmac.backup) | ||
1104 | dma40_backup(base->virtbase, base->reg_val_backup_v4, | ||
1105 | base->gen_dmac.backup, | ||
1106 | base->gen_dmac.backup_size, | ||
1107 | save); | ||
1108 | } | ||
1109 | #else | ||
1110 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
1111 | { | ||
1112 | } | ||
1113 | #endif | ||
1114 | |||
1115 | static int __d40_execute_command_phy(struct d40_chan *d40c, | 1057 | static int __d40_execute_command_phy(struct d40_chan *d40c, |
1116 | enum d40_command command) | 1058 | enum d40_command command) |
1117 | { | 1059 | { |
@@ -1495,8 +1437,8 @@ static int d40_pause(struct d40_chan *d40c) | |||
1495 | if (!d40c->busy) | 1437 | if (!d40c->busy) |
1496 | return 0; | 1438 | return 0; |
1497 | 1439 | ||
1498 | pm_runtime_get_sync(d40c->base->dev); | ||
1499 | spin_lock_irqsave(&d40c->lock, flags); | 1440 | spin_lock_irqsave(&d40c->lock, flags); |
1441 | pm_runtime_get_sync(d40c->base->dev); | ||
1500 | 1442 | ||
1501 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1443 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1502 | 1444 | ||
@@ -2998,18 +2940,88 @@ failure1: | |||
2998 | } | 2940 | } |
2999 | 2941 | ||
3000 | /* Suspend resume functionality */ | 2942 | /* Suspend resume functionality */ |
3001 | #ifdef CONFIG_PM | 2943 | #ifdef CONFIG_PM_SLEEP |
3002 | static int dma40_pm_suspend(struct device *dev) | 2944 | static int dma40_suspend(struct device *dev) |
3003 | { | 2945 | { |
3004 | struct platform_device *pdev = to_platform_device(dev); | 2946 | struct platform_device *pdev = to_platform_device(dev); |
3005 | struct d40_base *base = platform_get_drvdata(pdev); | 2947 | struct d40_base *base = platform_get_drvdata(pdev); |
3006 | int ret = 0; | 2948 | int ret; |
2949 | |||
2950 | ret = pm_runtime_force_suspend(dev); | ||
2951 | if (ret) | ||
2952 | return ret; | ||
3007 | 2953 | ||
3008 | if (base->lcpa_regulator) | 2954 | if (base->lcpa_regulator) |
3009 | ret = regulator_disable(base->lcpa_regulator); | 2955 | ret = regulator_disable(base->lcpa_regulator); |
3010 | return ret; | 2956 | return ret; |
3011 | } | 2957 | } |
3012 | 2958 | ||
2959 | static int dma40_resume(struct device *dev) | ||
2960 | { | ||
2961 | struct platform_device *pdev = to_platform_device(dev); | ||
2962 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2963 | int ret = 0; | ||
2964 | |||
2965 | if (base->lcpa_regulator) { | ||
2966 | ret = regulator_enable(base->lcpa_regulator); | ||
2967 | if (ret) | ||
2968 | return ret; | ||
2969 | } | ||
2970 | |||
2971 | return pm_runtime_force_resume(dev); | ||
2972 | } | ||
2973 | #endif | ||
2974 | |||
2975 | #ifdef CONFIG_PM | ||
2976 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | ||
2977 | u32 *regaddr, int num, bool save) | ||
2978 | { | ||
2979 | int i; | ||
2980 | |||
2981 | for (i = 0; i < num; i++) { | ||
2982 | void __iomem *addr = baseaddr + regaddr[i]; | ||
2983 | |||
2984 | if (save) | ||
2985 | backup[i] = readl_relaxed(addr); | ||
2986 | else | ||
2987 | writel_relaxed(backup[i], addr); | ||
2988 | } | ||
2989 | } | ||
2990 | |||
2991 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
2992 | { | ||
2993 | int i; | ||
2994 | |||
2995 | /* Save/Restore channel specific registers */ | ||
2996 | for (i = 0; i < base->num_phy_chans; i++) { | ||
2997 | void __iomem *addr; | ||
2998 | int idx; | ||
2999 | |||
3000 | if (base->phy_res[i].reserved) | ||
3001 | continue; | ||
3002 | |||
3003 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
3004 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
3005 | |||
3006 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
3007 | d40_backup_regs_chan, | ||
3008 | ARRAY_SIZE(d40_backup_regs_chan), | ||
3009 | save); | ||
3010 | } | ||
3011 | |||
3012 | /* Save/Restore global registers */ | ||
3013 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
3014 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
3015 | save); | ||
3016 | |||
3017 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
3018 | if (base->gen_dmac.backup) | ||
3019 | dma40_backup(base->virtbase, base->reg_val_backup_v4, | ||
3020 | base->gen_dmac.backup, | ||
3021 | base->gen_dmac.backup_size, | ||
3022 | save); | ||
3023 | } | ||
3024 | |||
3013 | static int dma40_runtime_suspend(struct device *dev) | 3025 | static int dma40_runtime_suspend(struct device *dev) |
3014 | { | 3026 | { |
3015 | struct platform_device *pdev = to_platform_device(dev); | 3027 | struct platform_device *pdev = to_platform_device(dev); |
@@ -3030,36 +3042,20 @@ static int dma40_runtime_resume(struct device *dev) | |||
3030 | struct platform_device *pdev = to_platform_device(dev); | 3042 | struct platform_device *pdev = to_platform_device(dev); |
3031 | struct d40_base *base = platform_get_drvdata(pdev); | 3043 | struct d40_base *base = platform_get_drvdata(pdev); |
3032 | 3044 | ||
3033 | if (base->initialized) | 3045 | d40_save_restore_registers(base, false); |
3034 | d40_save_restore_registers(base, false); | ||
3035 | 3046 | ||
3036 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | 3047 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, |
3037 | base->virtbase + D40_DREG_GCC); | 3048 | base->virtbase + D40_DREG_GCC); |
3038 | return 0; | 3049 | return 0; |
3039 | } | 3050 | } |
3040 | 3051 | #endif | |
3041 | static int dma40_resume(struct device *dev) | ||
3042 | { | ||
3043 | struct platform_device *pdev = to_platform_device(dev); | ||
3044 | struct d40_base *base = platform_get_drvdata(pdev); | ||
3045 | int ret = 0; | ||
3046 | |||
3047 | if (base->lcpa_regulator) | ||
3048 | ret = regulator_enable(base->lcpa_regulator); | ||
3049 | |||
3050 | return ret; | ||
3051 | } | ||
3052 | 3052 | ||
3053 | static const struct dev_pm_ops dma40_pm_ops = { | 3053 | static const struct dev_pm_ops dma40_pm_ops = { |
3054 | .suspend = dma40_pm_suspend, | 3054 | SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) |
3055 | .runtime_suspend = dma40_runtime_suspend, | 3055 | SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend, |
3056 | .runtime_resume = dma40_runtime_resume, | 3056 | dma40_runtime_resume, |
3057 | .resume = dma40_resume, | 3057 | NULL) |
3058 | }; | 3058 | }; |
3059 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
3060 | #else | ||
3061 | #define DMA40_PM_OPS NULL | ||
3062 | #endif | ||
3063 | 3059 | ||
3064 | /* Initialization functions. */ | 3060 | /* Initialization functions. */ |
3065 | 3061 | ||
@@ -3645,12 +3641,6 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3645 | goto failure; | 3641 | goto failure; |
3646 | } | 3642 | } |
3647 | 3643 | ||
3648 | pm_runtime_irq_safe(base->dev); | ||
3649 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3650 | pm_runtime_use_autosuspend(base->dev); | ||
3651 | pm_runtime_enable(base->dev); | ||
3652 | pm_runtime_resume(base->dev); | ||
3653 | |||
3654 | if (base->plat_data->use_esram_lcla) { | 3644 | if (base->plat_data->use_esram_lcla) { |
3655 | 3645 | ||
3656 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | 3646 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); |
@@ -3671,7 +3661,15 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3671 | } | 3661 | } |
3672 | } | 3662 | } |
3673 | 3663 | ||
3674 | base->initialized = true; | 3664 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); |
3665 | |||
3666 | pm_runtime_irq_safe(base->dev); | ||
3667 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3668 | pm_runtime_use_autosuspend(base->dev); | ||
3669 | pm_runtime_mark_last_busy(base->dev); | ||
3670 | pm_runtime_set_active(base->dev); | ||
3671 | pm_runtime_enable(base->dev); | ||
3672 | |||
3675 | ret = d40_dmaengine_init(base, num_reserved_chans); | 3673 | ret = d40_dmaengine_init(base, num_reserved_chans); |
3676 | if (ret) | 3674 | if (ret) |
3677 | goto failure; | 3675 | goto failure; |
@@ -3754,7 +3752,7 @@ static struct platform_driver d40_driver = { | |||
3754 | .driver = { | 3752 | .driver = { |
3755 | .owner = THIS_MODULE, | 3753 | .owner = THIS_MODULE, |
3756 | .name = D40_NAME, | 3754 | .name = D40_NAME, |
3757 | .pm = DMA40_PM_OPS, | 3755 | .pm = &dma40_pm_ops, |
3758 | .of_match_table = d40_match, | 3756 | .of_match_table = d40_match, |
3759 | }, | 3757 | }, |
3760 | }; | 3758 | }; |
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile new file mode 100644 index 000000000000..3c4e9f2fea28 --- /dev/null +++ b/drivers/dma/xilinx/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o | |||
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c new file mode 100644 index 000000000000..42a13e8d4607 --- /dev/null +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -0,0 +1,1379 @@ | |||
1 | /* | ||
2 | * DMA driver for Xilinx Video DMA Engine | ||
3 | * | ||
4 | * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. | ||
5 | * | ||
6 | * Based on the Freescale DMA driver. | ||
7 | * | ||
8 | * Description: | ||
9 | * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP | ||
10 | * core that provides high-bandwidth direct memory access between memory | ||
11 | * and AXI4-Stream type video target peripherals. The core provides efficient | ||
12 | * two dimensional DMA operations with independent asynchronous read (S2MM) | ||
13 | * and write (MM2S) channel operation. It can be configured to have either | ||
14 | * one channel or two channels. If configured as two channels, one is to | ||
15 | * transmit to the video device (MM2S) and another is to receive from the | ||
16 | * video device (S2MM). Initialization, status, interrupt and management | ||
17 | * registers are accessed through an AXI4-Lite slave interface. | ||
18 | * | ||
19 | * This program is free software: you can redistribute it and/or modify | ||
20 | * it under the terms of the GNU General Public License as published by | ||
21 | * the Free Software Foundation, either version 2 of the License, or | ||
22 | * (at your option) any later version. | ||
23 | */ | ||
24 | |||
25 | #include <linux/amba/xilinx_dma.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/dmapool.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/io.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/of_dma.h> | ||
34 | #include <linux/of_platform.h> | ||
35 | #include <linux/of_irq.h> | ||
36 | #include <linux/slab.h> | ||
37 | |||
38 | #include "../dmaengine.h" | ||
39 | |||
40 | /* Register/Descriptor Offsets */ | ||
41 | #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 | ||
42 | #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 | ||
43 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 | ||
44 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 | ||
45 | |||
46 | /* Control Registers */ | ||
47 | #define XILINX_VDMA_REG_DMACR 0x0000 | ||
48 | #define XILINX_VDMA_DMACR_DELAY_MAX 0xff | ||
49 | #define XILINX_VDMA_DMACR_DELAY_SHIFT 24 | ||
50 | #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff | ||
51 | #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 | ||
52 | #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) | ||
53 | #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) | ||
54 | #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) | ||
55 | #define XILINX_VDMA_DMACR_MASTER_SHIFT 8 | ||
56 | #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 | ||
57 | #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) | ||
58 | #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) | ||
59 | #define XILINX_VDMA_DMACR_RESET BIT(2) | ||
60 | #define XILINX_VDMA_DMACR_CIRC_EN BIT(1) | ||
61 | #define XILINX_VDMA_DMACR_RUNSTOP BIT(0) | ||
62 | #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) | ||
63 | |||
64 | #define XILINX_VDMA_REG_DMASR 0x0004 | ||
65 | #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) | ||
66 | #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) | ||
67 | #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) | ||
68 | #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) | ||
69 | #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) | ||
70 | #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) | ||
71 | #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) | ||
72 | #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) | ||
73 | #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) | ||
74 | #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) | ||
75 | #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) | ||
76 | #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) | ||
77 | #define XILINX_VDMA_DMASR_IDLE BIT(1) | ||
78 | #define XILINX_VDMA_DMASR_HALTED BIT(0) | ||
79 | #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) | ||
80 | #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) | ||
81 | |||
82 | #define XILINX_VDMA_REG_CURDESC 0x0008 | ||
83 | #define XILINX_VDMA_REG_TAILDESC 0x0010 | ||
84 | #define XILINX_VDMA_REG_REG_INDEX 0x0014 | ||
85 | #define XILINX_VDMA_REG_FRMSTORE 0x0018 | ||
86 | #define XILINX_VDMA_REG_THRESHOLD 0x001c | ||
87 | #define XILINX_VDMA_REG_FRMPTR_STS 0x0024 | ||
88 | #define XILINX_VDMA_REG_PARK_PTR 0x0028 | ||
89 | #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 | ||
90 | #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 | ||
91 | #define XILINX_VDMA_REG_VDMA_VERSION 0x002c | ||
92 | |||
93 | /* Register Direct Mode Registers */ | ||
94 | #define XILINX_VDMA_REG_VSIZE 0x0000 | ||
95 | #define XILINX_VDMA_REG_HSIZE 0x0004 | ||
96 | |||
97 | #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 | ||
98 | #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 | ||
99 | #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 | ||
100 | |||
101 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) | ||
102 | |||
103 | /* HW specific definitions */ | ||
104 | #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 | ||
105 | |||
106 | #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ | ||
107 | (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ | ||
108 | XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ | ||
109 | XILINX_VDMA_DMASR_ERR_IRQ) | ||
110 | |||
111 | #define XILINX_VDMA_DMASR_ALL_ERR_MASK \ | ||
112 | (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ | ||
113 | XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | ||
114 | XILINX_VDMA_DMASR_SG_DEC_ERR | \ | ||
115 | XILINX_VDMA_DMASR_SG_SLV_ERR | \ | ||
116 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | ||
117 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | ||
118 | XILINX_VDMA_DMASR_DMA_DEC_ERR | \ | ||
119 | XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ | ||
120 | XILINX_VDMA_DMASR_DMA_INT_ERR) | ||
121 | |||
122 | /* | ||
123 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early | ||
124 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC | ||
125 | * is enabled in the h/w system. | ||
126 | */ | ||
127 | #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ | ||
128 | (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | ||
129 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | ||
130 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | ||
131 | XILINX_VDMA_DMASR_DMA_INT_ERR) | ||
132 | |||
133 | /* Axi VDMA Flush on Fsync bits */ | ||
134 | #define XILINX_VDMA_FLUSH_S2MM 3 | ||
135 | #define XILINX_VDMA_FLUSH_MM2S 2 | ||
136 | #define XILINX_VDMA_FLUSH_BOTH 1 | ||
137 | |||
138 | /* Delay loop counter to prevent hardware failure */ | ||
139 | #define XILINX_VDMA_LOOP_COUNT 1000000 | ||
140 | |||
141 | /** | ||
142 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | ||
143 | * @next_desc: Next Descriptor Pointer @0x00 | ||
144 | * @pad1: Reserved @0x04 | ||
145 | * @buf_addr: Buffer address @0x08 | ||
146 | * @pad2: Reserved @0x0C | ||
147 | * @vsize: Vertical Size @0x10 | ||
148 | * @hsize: Horizontal Size @0x14 | ||
149 | * @stride: Number of bytes between the first | ||
150 | * pixels of each horizontal line @0x18 | ||
151 | */ | ||
152 | struct xilinx_vdma_desc_hw { | ||
153 | u32 next_desc; | ||
154 | u32 pad1; | ||
155 | u32 buf_addr; | ||
156 | u32 pad2; | ||
157 | u32 vsize; | ||
158 | u32 hsize; | ||
159 | u32 stride; | ||
160 | } __aligned(64); | ||
161 | |||
162 | /** | ||
163 | * struct xilinx_vdma_tx_segment - Descriptor segment | ||
164 | * @hw: Hardware descriptor | ||
165 | * @node: Node in the descriptor segments list | ||
166 | * @phys: Physical address of segment | ||
167 | */ | ||
168 | struct xilinx_vdma_tx_segment { | ||
169 | struct xilinx_vdma_desc_hw hw; | ||
170 | struct list_head node; | ||
171 | dma_addr_t phys; | ||
172 | } __aligned(64); | ||
173 | |||
174 | /** | ||
175 | * struct xilinx_vdma_tx_descriptor - Per Transaction structure | ||
176 | * @async_tx: Async transaction descriptor | ||
177 | * @segments: TX segments list | ||
178 | * @node: Node in the channel descriptors list | ||
179 | */ | ||
180 | struct xilinx_vdma_tx_descriptor { | ||
181 | struct dma_async_tx_descriptor async_tx; | ||
182 | struct list_head segments; | ||
183 | struct list_head node; | ||
184 | }; | ||
185 | |||
186 | /** | ||
187 | * struct xilinx_vdma_chan - Driver specific VDMA channel structure | ||
188 | * @xdev: Driver specific device structure | ||
189 | * @ctrl_offset: Control registers offset | ||
190 | * @desc_offset: TX descriptor registers offset | ||
191 | * @lock: Descriptor operation lock | ||
192 | * @pending_list: Descriptors waiting | ||
193 | * @active_desc: Active descriptor | ||
194 | * @allocated_desc: Allocated descriptor | ||
195 | * @done_list: Complete descriptors | ||
196 | * @common: DMA common channel | ||
197 | * @desc_pool: Descriptors pool | ||
198 | * @dev: The dma device | ||
199 | * @irq: Channel IRQ | ||
200 | * @id: Channel ID | ||
201 | * @direction: Transfer direction | ||
202 | * @num_frms: Number of frames | ||
203 | * @has_sg: Support scatter transfers | ||
204 | * @genlock: Support genlock mode | ||
205 | * @err: Channel has errors | ||
206 | * @tasklet: Cleanup work after irq | ||
207 | * @config: Device configuration info | ||
208 | * @flush_on_fsync: Flush on Frame sync | ||
209 | */ | ||
210 | struct xilinx_vdma_chan { | ||
211 | struct xilinx_vdma_device *xdev; | ||
212 | u32 ctrl_offset; | ||
213 | u32 desc_offset; | ||
214 | spinlock_t lock; | ||
215 | struct list_head pending_list; | ||
216 | struct xilinx_vdma_tx_descriptor *active_desc; | ||
217 | struct xilinx_vdma_tx_descriptor *allocated_desc; | ||
218 | struct list_head done_list; | ||
219 | struct dma_chan common; | ||
220 | struct dma_pool *desc_pool; | ||
221 | struct device *dev; | ||
222 | int irq; | ||
223 | int id; | ||
224 | enum dma_transfer_direction direction; | ||
225 | int num_frms; | ||
226 | bool has_sg; | ||
227 | bool genlock; | ||
228 | bool err; | ||
229 | struct tasklet_struct tasklet; | ||
230 | struct xilinx_vdma_config config; | ||
231 | bool flush_on_fsync; | ||
232 | }; | ||
233 | |||
234 | /** | ||
235 | * struct xilinx_vdma_device - VDMA device structure | ||
236 | * @regs: I/O mapped base address | ||
237 | * @dev: Device Structure | ||
238 | * @common: DMA device structure | ||
239 | * @chan: Driver specific VDMA channel | ||
240 | * @has_sg: Specifies whether Scatter-Gather is present or not | ||
241 | * @flush_on_fsync: Flush on frame sync | ||
242 | */ | ||
243 | struct xilinx_vdma_device { | ||
244 | void __iomem *regs; | ||
245 | struct device *dev; | ||
246 | struct dma_device common; | ||
247 | struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; | ||
248 | bool has_sg; | ||
249 | u32 flush_on_fsync; | ||
250 | }; | ||
251 | |||
252 | /* Macros */ | ||
253 | #define to_xilinx_chan(chan) \ | ||
254 | container_of(chan, struct xilinx_vdma_chan, common) | ||
255 | #define to_vdma_tx_descriptor(tx) \ | ||
256 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | ||
257 | |||
258 | /* IO accessors */ | ||
259 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | ||
260 | { | ||
261 | return ioread32(chan->xdev->regs + reg); | ||
262 | } | ||
263 | |||
264 | static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) | ||
265 | { | ||
266 | iowrite32(value, chan->xdev->regs + reg); | ||
267 | } | ||
268 | |||
269 | static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, | ||
270 | u32 value) | ||
271 | { | ||
272 | vdma_write(chan, chan->desc_offset + reg, value); | ||
273 | } | ||
274 | |||
275 | static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) | ||
276 | { | ||
277 | return vdma_read(chan, chan->ctrl_offset + reg); | ||
278 | } | ||
279 | |||
280 | static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, | ||
281 | u32 value) | ||
282 | { | ||
283 | vdma_write(chan, chan->ctrl_offset + reg, value); | ||
284 | } | ||
285 | |||
286 | static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, | ||
287 | u32 clr) | ||
288 | { | ||
289 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); | ||
290 | } | ||
291 | |||
292 | static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | ||
293 | u32 set) | ||
294 | { | ||
295 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); | ||
296 | } | ||
297 | |||
298 | /* ----------------------------------------------------------------------------- | ||
299 | * Descriptors and segments alloc and free | ||
300 | */ | ||
301 | |||
302 | /** | ||
303 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment | ||
304 | * @chan: Driver specific VDMA channel | ||
305 | * | ||
306 | * Return: The allocated segment on success and NULL on failure. | ||
307 | */ | ||
308 | static struct xilinx_vdma_tx_segment * | ||
309 | xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | ||
310 | { | ||
311 | struct xilinx_vdma_tx_segment *segment; | ||
312 | dma_addr_t phys; | ||
313 | |||
314 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); | ||
315 | if (!segment) | ||
316 | return NULL; | ||
317 | |||
318 | memset(segment, 0, sizeof(*segment)); | ||
319 | segment->phys = phys; | ||
320 | |||
321 | return segment; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * xilinx_vdma_free_tx_segment - Free transaction segment | ||
326 | * @chan: Driver specific VDMA channel | ||
327 | * @segment: VDMA transaction segment | ||
328 | */ | ||
329 | static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, | ||
330 | struct xilinx_vdma_tx_segment *segment) | ||
331 | { | ||
332 | dma_pool_free(chan->desc_pool, segment, segment->phys); | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * xilinx_vdma_tx_descriptor - Allocate transaction descriptor | ||
337 | * @chan: Driver specific VDMA channel | ||
338 | * | ||
339 | * Return: The allocated descriptor on success and NULL on failure. | ||
340 | */ | ||
341 | static struct xilinx_vdma_tx_descriptor * | ||
342 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | ||
343 | { | ||
344 | struct xilinx_vdma_tx_descriptor *desc; | ||
345 | unsigned long flags; | ||
346 | |||
347 | if (chan->allocated_desc) | ||
348 | return chan->allocated_desc; | ||
349 | |||
350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | ||
351 | if (!desc) | ||
352 | return NULL; | ||
353 | |||
354 | spin_lock_irqsave(&chan->lock, flags); | ||
355 | chan->allocated_desc = desc; | ||
356 | spin_unlock_irqrestore(&chan->lock, flags); | ||
357 | |||
358 | INIT_LIST_HEAD(&desc->segments); | ||
359 | |||
360 | return desc; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * xilinx_vdma_free_tx_descriptor - Free transaction descriptor | ||
365 | * @chan: Driver specific VDMA channel | ||
366 | * @desc: VDMA transaction descriptor | ||
367 | */ | ||
368 | static void | ||
369 | xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | ||
370 | struct xilinx_vdma_tx_descriptor *desc) | ||
371 | { | ||
372 | struct xilinx_vdma_tx_segment *segment, *next; | ||
373 | |||
374 | if (!desc) | ||
375 | return; | ||
376 | |||
377 | list_for_each_entry_safe(segment, next, &desc->segments, node) { | ||
378 | list_del(&segment->node); | ||
379 | xilinx_vdma_free_tx_segment(chan, segment); | ||
380 | } | ||
381 | |||
382 | kfree(desc); | ||
383 | } | ||
384 | |||
385 | /* Required functions */ | ||
386 | |||
387 | /** | ||
388 | * xilinx_vdma_free_desc_list - Free descriptors list | ||
389 | * @chan: Driver specific VDMA channel | ||
390 | * @list: List to parse and delete the descriptor | ||
391 | */ | ||
392 | static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, | ||
393 | struct list_head *list) | ||
394 | { | ||
395 | struct xilinx_vdma_tx_descriptor *desc, *next; | ||
396 | |||
397 | list_for_each_entry_safe(desc, next, list, node) { | ||
398 | list_del(&desc->node); | ||
399 | xilinx_vdma_free_tx_descriptor(chan, desc); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * xilinx_vdma_free_descriptors - Free channel descriptors | ||
405 | * @chan: Driver specific VDMA channel | ||
406 | */ | ||
407 | static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | ||
408 | { | ||
409 | unsigned long flags; | ||
410 | |||
411 | spin_lock_irqsave(&chan->lock, flags); | ||
412 | |||
413 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | ||
414 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | ||
415 | |||
416 | xilinx_vdma_free_tx_descriptor(chan, chan->active_desc); | ||
417 | chan->active_desc = NULL; | ||
418 | |||
419 | spin_unlock_irqrestore(&chan->lock, flags); | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * xilinx_vdma_free_chan_resources - Free channel resources | ||
424 | * @dchan: DMA channel | ||
425 | */ | ||
426 | static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) | ||
427 | { | ||
428 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
429 | |||
430 | dev_dbg(chan->dev, "Free all channel resources.\n"); | ||
431 | |||
432 | xilinx_vdma_free_descriptors(chan); | ||
433 | dma_pool_destroy(chan->desc_pool); | ||
434 | chan->desc_pool = NULL; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors | ||
439 | * @chan: Driver specific VDMA channel | ||
440 | */ | ||
441 | static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | ||
442 | { | ||
443 | struct xilinx_vdma_tx_descriptor *desc, *next; | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&chan->lock, flags); | ||
447 | |||
448 | list_for_each_entry_safe(desc, next, &chan->done_list, node) { | ||
449 | dma_async_tx_callback callback; | ||
450 | void *callback_param; | ||
451 | |||
452 | /* Remove from the list of running transactions */ | ||
453 | list_del(&desc->node); | ||
454 | |||
455 | /* Run the link descriptor callback function */ | ||
456 | callback = desc->async_tx.callback; | ||
457 | callback_param = desc->async_tx.callback_param; | ||
458 | if (callback) { | ||
459 | spin_unlock_irqrestore(&chan->lock, flags); | ||
460 | callback(callback_param); | ||
461 | spin_lock_irqsave(&chan->lock, flags); | ||
462 | } | ||
463 | |||
464 | /* Run any dependencies, then free the descriptor */ | ||
465 | dma_run_dependencies(&desc->async_tx); | ||
466 | xilinx_vdma_free_tx_descriptor(chan, desc); | ||
467 | } | ||
468 | |||
469 | spin_unlock_irqrestore(&chan->lock, flags); | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * xilinx_vdma_do_tasklet - Schedule completion tasklet | ||
474 | * @data: Pointer to the Xilinx VDMA channel structure | ||
475 | */ | ||
476 | static void xilinx_vdma_do_tasklet(unsigned long data) | ||
477 | { | ||
478 | struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; | ||
479 | |||
480 | xilinx_vdma_chan_desc_cleanup(chan); | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * xilinx_vdma_alloc_chan_resources - Allocate channel resources | ||
485 | * @dchan: DMA channel | ||
486 | * | ||
487 | * Return: '0' on success and failure value on error | ||
488 | */ | ||
489 | static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | ||
490 | { | ||
491 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
492 | |||
493 | /* Has this channel already been allocated? */ | ||
494 | if (chan->desc_pool) | ||
495 | return 0; | ||
496 | |||
497 | /* | ||
498 | * We need the descriptor to be aligned to 64bytes | ||
499 | * for meeting Xilinx VDMA specification requirement. | ||
500 | */ | ||
501 | chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", | ||
502 | chan->dev, | ||
503 | sizeof(struct xilinx_vdma_tx_segment), | ||
504 | __alignof__(struct xilinx_vdma_tx_segment), 0); | ||
505 | if (!chan->desc_pool) { | ||
506 | dev_err(chan->dev, | ||
507 | "unable to allocate channel %d descriptor pool\n", | ||
508 | chan->id); | ||
509 | return -ENOMEM; | ||
510 | } | ||
511 | |||
512 | dma_cookie_init(dchan); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * xilinx_vdma_tx_status - Get VDMA transaction status | ||
518 | * @dchan: DMA channel | ||
519 | * @cookie: Transaction identifier | ||
520 | * @txstate: Transaction state | ||
521 | * | ||
522 | * Return: DMA transaction status | ||
523 | */ | ||
524 | static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | ||
525 | dma_cookie_t cookie, | ||
526 | struct dma_tx_state *txstate) | ||
527 | { | ||
528 | return dma_cookie_status(dchan, cookie, txstate); | ||
529 | } | ||
530 | |||
531 | /** | ||
532 | * xilinx_vdma_is_running - Check if VDMA channel is running | ||
533 | * @chan: Driver specific VDMA channel | ||
534 | * | ||
535 | * Return: '1' if running, '0' if not. | ||
536 | */ | ||
537 | static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) | ||
538 | { | ||
539 | return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
540 | XILINX_VDMA_DMASR_HALTED) && | ||
541 | (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
542 | XILINX_VDMA_DMACR_RUNSTOP); | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * xilinx_vdma_is_idle - Check if VDMA channel is idle | ||
547 | * @chan: Driver specific VDMA channel | ||
548 | * | ||
549 | * Return: '1' if idle, '0' if not. | ||
550 | */ | ||
551 | static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | ||
552 | { | ||
553 | return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
554 | XILINX_VDMA_DMASR_IDLE; | ||
555 | } | ||
556 | |||
557 | /** | ||
558 | * xilinx_vdma_halt - Halt VDMA channel | ||
559 | * @chan: Driver specific VDMA channel | ||
560 | */ | ||
561 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | ||
562 | { | ||
563 | int loop = XILINX_VDMA_LOOP_COUNT; | ||
564 | |||
565 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | ||
566 | |||
567 | /* Wait for the hardware to halt */ | ||
568 | do { | ||
569 | if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
570 | XILINX_VDMA_DMASR_HALTED) | ||
571 | break; | ||
572 | } while (loop--); | ||
573 | |||
574 | if (!loop) { | ||
575 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", | ||
576 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | ||
577 | chan->err = true; | ||
578 | } | ||
579 | |||
580 | return; | ||
581 | } | ||
582 | |||
583 | /** | ||
584 | * xilinx_vdma_start - Start VDMA channel | ||
585 | * @chan: Driver specific VDMA channel | ||
586 | */ | ||
587 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | ||
588 | { | ||
589 | int loop = XILINX_VDMA_LOOP_COUNT; | ||
590 | |||
591 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | ||
592 | |||
593 | /* Wait for the hardware to start */ | ||
594 | do { | ||
595 | if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
596 | XILINX_VDMA_DMASR_HALTED)) | ||
597 | break; | ||
598 | } while (loop--); | ||
599 | |||
600 | if (!loop) { | ||
601 | dev_err(chan->dev, "Cannot start channel %p: %x\n", | ||
602 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | ||
603 | |||
604 | chan->err = true; | ||
605 | } | ||
606 | |||
607 | return; | ||
608 | } | ||
609 | |||
610 | /** | ||
611 | * xilinx_vdma_start_transfer - Starts VDMA transfer | ||
612 | * @chan: Driver specific channel struct pointer | ||
613 | */ | ||
614 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | ||
615 | { | ||
616 | struct xilinx_vdma_config *config = &chan->config; | ||
617 | struct xilinx_vdma_tx_descriptor *desc; | ||
618 | unsigned long flags; | ||
619 | u32 reg; | ||
620 | struct xilinx_vdma_tx_segment *head, *tail = NULL; | ||
621 | |||
622 | if (chan->err) | ||
623 | return; | ||
624 | |||
625 | spin_lock_irqsave(&chan->lock, flags); | ||
626 | |||
627 | /* There's already an active descriptor, bail out. */ | ||
628 | if (chan->active_desc) | ||
629 | goto out_unlock; | ||
630 | |||
631 | if (list_empty(&chan->pending_list)) | ||
632 | goto out_unlock; | ||
633 | |||
634 | desc = list_first_entry(&chan->pending_list, | ||
635 | struct xilinx_vdma_tx_descriptor, node); | ||
636 | |||
637 | /* If it is SG mode and hardware is busy, cannot submit */ | ||
638 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | ||
639 | !xilinx_vdma_is_idle(chan)) { | ||
640 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
641 | goto out_unlock; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * If hardware is idle, then all descriptors on the running lists are | ||
646 | * done, start new transfers | ||
647 | */ | ||
648 | if (chan->has_sg) { | ||
649 | head = list_first_entry(&desc->segments, | ||
650 | struct xilinx_vdma_tx_segment, node); | ||
651 | tail = list_entry(desc->segments.prev, | ||
652 | struct xilinx_vdma_tx_segment, node); | ||
653 | |||
654 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys); | ||
655 | } | ||
656 | |||
657 | /* Configure the hardware using info in the config structure */ | ||
658 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | ||
659 | |||
660 | if (config->frm_cnt_en) | ||
661 | reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; | ||
662 | else | ||
663 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | ||
664 | |||
665 | /* | ||
666 | * With SG, start with circular mode, so that BDs can be fetched. | ||
667 | * In direct register mode, if not parking, enable circular mode | ||
668 | */ | ||
669 | if (chan->has_sg || !config->park) | ||
670 | reg |= XILINX_VDMA_DMACR_CIRC_EN; | ||
671 | |||
672 | if (config->park) | ||
673 | reg &= ~XILINX_VDMA_DMACR_CIRC_EN; | ||
674 | |||
675 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); | ||
676 | |||
677 | if (config->park && (config->park_frm >= 0) && | ||
678 | (config->park_frm < chan->num_frms)) { | ||
679 | if (chan->direction == DMA_MEM_TO_DEV) | ||
680 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | ||
681 | config->park_frm << | ||
682 | XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); | ||
683 | else | ||
684 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | ||
685 | config->park_frm << | ||
686 | XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); | ||
687 | } | ||
688 | |||
689 | /* Start the hardware */ | ||
690 | xilinx_vdma_start(chan); | ||
691 | |||
692 | if (chan->err) | ||
693 | goto out_unlock; | ||
694 | |||
695 | /* Start the transfer */ | ||
696 | if (chan->has_sg) { | ||
697 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys); | ||
698 | } else { | ||
699 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | ||
700 | int i = 0; | ||
701 | |||
702 | list_for_each_entry(segment, &desc->segments, node) { | ||
703 | vdma_desc_write(chan, | ||
704 | XILINX_VDMA_REG_START_ADDRESS(i++), | ||
705 | segment->hw.buf_addr); | ||
706 | last = segment; | ||
707 | } | ||
708 | |||
709 | if (!last) | ||
710 | goto out_unlock; | ||
711 | |||
712 | /* HW expects these parameters to be same for one transaction */ | ||
713 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | ||
714 | vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, | ||
715 | last->hw.stride); | ||
716 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | ||
717 | } | ||
718 | |||
719 | list_del(&desc->node); | ||
720 | chan->active_desc = desc; | ||
721 | |||
722 | out_unlock: | ||
723 | spin_unlock_irqrestore(&chan->lock, flags); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * xilinx_vdma_issue_pending - Issue pending transactions | ||
728 | * @dchan: DMA channel | ||
729 | */ | ||
730 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | ||
731 | { | ||
732 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
733 | |||
734 | xilinx_vdma_start_transfer(chan); | ||
735 | } | ||
736 | |||
737 | /** | ||
738 | * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete | ||
739 | * @chan : xilinx DMA channel | ||
740 | * | ||
741 | * CONTEXT: hardirq | ||
742 | */ | ||
743 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | ||
744 | { | ||
745 | struct xilinx_vdma_tx_descriptor *desc; | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&chan->lock, flags); | ||
749 | |||
750 | desc = chan->active_desc; | ||
751 | if (!desc) { | ||
752 | dev_dbg(chan->dev, "no running descriptors\n"); | ||
753 | goto out_unlock; | ||
754 | } | ||
755 | |||
756 | dma_cookie_complete(&desc->async_tx); | ||
757 | list_add_tail(&desc->node, &chan->done_list); | ||
758 | |||
759 | chan->active_desc = NULL; | ||
760 | |||
761 | out_unlock: | ||
762 | spin_unlock_irqrestore(&chan->lock, flags); | ||
763 | } | ||
764 | |||
765 | /** | ||
766 | * xilinx_vdma_reset - Reset VDMA channel | ||
767 | * @chan: Driver specific VDMA channel | ||
768 | * | ||
769 | * Return: '0' on success and failure value on error | ||
770 | */ | ||
771 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | ||
772 | { | ||
773 | int loop = XILINX_VDMA_LOOP_COUNT; | ||
774 | u32 tmp; | ||
775 | |||
776 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | ||
777 | |||
778 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
779 | XILINX_VDMA_DMACR_RESET; | ||
780 | |||
781 | /* Wait for the hardware to finish reset */ | ||
782 | do { | ||
783 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
784 | XILINX_VDMA_DMACR_RESET; | ||
785 | } while (loop-- && tmp); | ||
786 | |||
787 | if (!loop) { | ||
788 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", | ||
789 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | ||
790 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | ||
791 | return -ETIMEDOUT; | ||
792 | } | ||
793 | |||
794 | chan->err = false; | ||
795 | |||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts | ||
801 | * @chan: Driver specific VDMA channel | ||
802 | * | ||
803 | * Return: '0' on success and failure value on error | ||
804 | */ | ||
805 | static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) | ||
806 | { | ||
807 | int err; | ||
808 | |||
809 | /* Reset VDMA */ | ||
810 | err = xilinx_vdma_reset(chan); | ||
811 | if (err) | ||
812 | return err; | ||
813 | |||
814 | /* Enable interrupts */ | ||
815 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, | ||
816 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | /** | ||
822 | * xilinx_vdma_irq_handler - VDMA Interrupt handler | ||
823 | * @irq: IRQ number | ||
824 | * @data: Pointer to the Xilinx VDMA channel structure | ||
825 | * | ||
826 | * Return: IRQ_HANDLED/IRQ_NONE | ||
827 | */ | ||
828 | static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | ||
829 | { | ||
830 | struct xilinx_vdma_chan *chan = data; | ||
831 | u32 status; | ||
832 | |||
833 | /* Read the status and ack the interrupts. */ | ||
834 | status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); | ||
835 | if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) | ||
836 | return IRQ_NONE; | ||
837 | |||
838 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | ||
839 | status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | ||
840 | |||
841 | if (status & XILINX_VDMA_DMASR_ERR_IRQ) { | ||
842 | /* | ||
843 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the | ||
844 | * error is recoverable, ignore it. Otherwise flag the error. | ||
845 | * | ||
846 | * Only recoverable errors can be cleared in the DMASR register, | ||
847 | * make sure not to write to other error bits to 1. | ||
848 | */ | ||
849 | u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; | ||
850 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | ||
851 | errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); | ||
852 | |||
853 | if (!chan->flush_on_fsync || | ||
854 | (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { | ||
855 | dev_err(chan->dev, | ||
856 | "Channel %p has errors %x, cdr %x tdr %x\n", | ||
857 | chan, errors, | ||
858 | vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), | ||
859 | vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); | ||
860 | chan->err = true; | ||
861 | } | ||
862 | } | ||
863 | |||
864 | if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { | ||
865 | /* | ||
866 | * Device takes too long to do the transfer when user requires | ||
867 | * responsiveness. | ||
868 | */ | ||
869 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); | ||
870 | } | ||
871 | |||
872 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | ||
873 | xilinx_vdma_complete_descriptor(chan); | ||
874 | xilinx_vdma_start_transfer(chan); | ||
875 | } | ||
876 | |||
877 | tasklet_schedule(&chan->tasklet); | ||
878 | return IRQ_HANDLED; | ||
879 | } | ||
880 | |||
881 | /** | ||
882 | * xilinx_vdma_tx_submit - Submit DMA transaction | ||
883 | * @tx: Async transaction descriptor | ||
884 | * | ||
885 | * Return: cookie value on success and failure value on error | ||
886 | */ | ||
887 | static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
888 | { | ||
889 | struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); | ||
890 | struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); | ||
891 | dma_cookie_t cookie; | ||
892 | unsigned long flags; | ||
893 | int err; | ||
894 | |||
895 | if (chan->err) { | ||
896 | /* | ||
897 | * If reset fails, need to hard reset the system. | ||
898 | * Channel is no longer functional | ||
899 | */ | ||
900 | err = xilinx_vdma_chan_reset(chan); | ||
901 | if (err < 0) | ||
902 | return err; | ||
903 | } | ||
904 | |||
905 | spin_lock_irqsave(&chan->lock, flags); | ||
906 | |||
907 | cookie = dma_cookie_assign(tx); | ||
908 | |||
909 | /* Append the transaction to the pending transactions queue. */ | ||
910 | list_add_tail(&desc->node, &chan->pending_list); | ||
911 | |||
912 | /* Free the allocated desc */ | ||
913 | chan->allocated_desc = NULL; | ||
914 | |||
915 | spin_unlock_irqrestore(&chan->lock, flags); | ||
916 | |||
917 | return cookie; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a | ||
922 | * DMA_SLAVE transaction | ||
923 | * @dchan: DMA channel | ||
924 | * @xt: Interleaved template pointer | ||
925 | * @flags: transfer ack flags | ||
926 | * | ||
927 | * Return: Async transaction descriptor on success and NULL on failure | ||
928 | */ | ||
929 | static struct dma_async_tx_descriptor * | ||
930 | xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | ||
931 | struct dma_interleaved_template *xt, | ||
932 | unsigned long flags) | ||
933 | { | ||
934 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
935 | struct xilinx_vdma_tx_descriptor *desc; | ||
936 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; | ||
937 | struct xilinx_vdma_desc_hw *hw; | ||
938 | |||
939 | if (!is_slave_direction(xt->dir)) | ||
940 | return NULL; | ||
941 | |||
942 | if (!xt->numf || !xt->sgl[0].size) | ||
943 | return NULL; | ||
944 | |||
945 | /* Allocate a transaction descriptor. */ | ||
946 | desc = xilinx_vdma_alloc_tx_descriptor(chan); | ||
947 | if (!desc) | ||
948 | return NULL; | ||
949 | |||
950 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | ||
951 | desc->async_tx.tx_submit = xilinx_vdma_tx_submit; | ||
952 | async_tx_ack(&desc->async_tx); | ||
953 | |||
954 | /* Allocate the link descriptor from DMA pool */ | ||
955 | segment = xilinx_vdma_alloc_tx_segment(chan); | ||
956 | if (!segment) | ||
957 | goto error; | ||
958 | |||
959 | /* Fill in the hardware descriptor */ | ||
960 | hw = &segment->hw; | ||
961 | hw->vsize = xt->numf; | ||
962 | hw->hsize = xt->sgl[0].size; | ||
963 | hw->stride = xt->sgl[0].icg << | ||
964 | XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; | ||
965 | hw->stride |= chan->config.frm_dly << | ||
966 | XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; | ||
967 | |||
968 | if (xt->dir != DMA_MEM_TO_DEV) | ||
969 | hw->buf_addr = xt->dst_start; | ||
970 | else | ||
971 | hw->buf_addr = xt->src_start; | ||
972 | |||
973 | /* Link the previous next descriptor to current */ | ||
974 | prev = list_last_entry(&desc->segments, | ||
975 | struct xilinx_vdma_tx_segment, node); | ||
976 | prev->hw.next_desc = segment->phys; | ||
977 | |||
978 | /* Insert the segment into the descriptor segments list. */ | ||
979 | list_add_tail(&segment->node, &desc->segments); | ||
980 | |||
981 | prev = segment; | ||
982 | |||
983 | /* Link the last hardware descriptor with the first. */ | ||
984 | segment = list_first_entry(&desc->segments, | ||
985 | struct xilinx_vdma_tx_segment, node); | ||
986 | prev->hw.next_desc = segment->phys; | ||
987 | |||
988 | return &desc->async_tx; | ||
989 | |||
990 | error: | ||
991 | xilinx_vdma_free_tx_descriptor(chan, desc); | ||
992 | return NULL; | ||
993 | } | ||
994 | |||
995 | /** | ||
996 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | ||
997 | * @chan: Driver specific VDMA Channel pointer | ||
998 | */ | ||
999 | static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) | ||
1000 | { | ||
1001 | /* Halt the DMA engine */ | ||
1002 | xilinx_vdma_halt(chan); | ||
1003 | |||
1004 | /* Remove and free all of the descriptors in the lists */ | ||
1005 | xilinx_vdma_free_descriptors(chan); | ||
1006 | } | ||
1007 | |||
1008 | /** | ||
1009 | * xilinx_vdma_channel_set_config - Configure VDMA channel | ||
1010 | * Run-time configuration for Axi VDMA, supports: | ||
1011 | * . halt the channel | ||
1012 | * . configure interrupt coalescing and inter-packet delay threshold | ||
1013 | * . start/stop parking | ||
1014 | * . enable genlock | ||
1015 | * | ||
1016 | * @dchan: DMA channel | ||
1017 | * @cfg: VDMA device configuration pointer | ||
1018 | * | ||
1019 | * Return: '0' on success and failure value on error | ||
1020 | */ | ||
1021 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | ||
1022 | struct xilinx_vdma_config *cfg) | ||
1023 | { | ||
1024 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
1025 | u32 dmacr; | ||
1026 | |||
1027 | if (cfg->reset) | ||
1028 | return xilinx_vdma_chan_reset(chan); | ||
1029 | |||
1030 | dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | ||
1031 | |||
1032 | chan->config.frm_dly = cfg->frm_dly; | ||
1033 | chan->config.park = cfg->park; | ||
1034 | |||
1035 | /* genlock settings */ | ||
1036 | chan->config.gen_lock = cfg->gen_lock; | ||
1037 | chan->config.master = cfg->master; | ||
1038 | |||
1039 | if (cfg->gen_lock && chan->genlock) { | ||
1040 | dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; | ||
1041 | dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; | ||
1042 | } | ||
1043 | |||
1044 | chan->config.frm_cnt_en = cfg->frm_cnt_en; | ||
1045 | if (cfg->park) | ||
1046 | chan->config.park_frm = cfg->park_frm; | ||
1047 | else | ||
1048 | chan->config.park_frm = -1; | ||
1049 | |||
1050 | chan->config.coalesc = cfg->coalesc; | ||
1051 | chan->config.delay = cfg->delay; | ||
1052 | |||
1053 | if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { | ||
1054 | dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; | ||
1055 | chan->config.coalesc = cfg->coalesc; | ||
1056 | } | ||
1057 | |||
1058 | if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { | ||
1059 | dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; | ||
1060 | chan->config.delay = cfg->delay; | ||
1061 | } | ||
1062 | |||
1063 | /* FSync Source selection */ | ||
1064 | dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; | ||
1065 | dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; | ||
1066 | |||
1067 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); | ||
1068 | |||
1069 | return 0; | ||
1070 | } | ||
1071 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | ||
1072 | |||
1073 | /** | ||
1074 | * xilinx_vdma_device_control - Configure DMA channel of the device | ||
1075 | * @dchan: DMA Channel pointer | ||
1076 | * @cmd: DMA control command | ||
1077 | * @arg: Channel configuration | ||
1078 | * | ||
1079 | * Return: '0' on success and failure value on error | ||
1080 | */ | ||
1081 | static int xilinx_vdma_device_control(struct dma_chan *dchan, | ||
1082 | enum dma_ctrl_cmd cmd, unsigned long arg) | ||
1083 | { | ||
1084 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
1085 | |||
1086 | if (cmd != DMA_TERMINATE_ALL) | ||
1087 | return -ENXIO; | ||
1088 | |||
1089 | xilinx_vdma_terminate_all(chan); | ||
1090 | |||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | /* ----------------------------------------------------------------------------- | ||
1095 | * Probe and remove | ||
1096 | */ | ||
1097 | |||
1098 | /** | ||
1099 | * xilinx_vdma_chan_remove - Per Channel remove function | ||
1100 | * @chan: Driver specific VDMA channel | ||
1101 | */ | ||
1102 | static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | ||
1103 | { | ||
1104 | /* Disable all interrupts */ | ||
1105 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, | ||
1106 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | ||
1107 | |||
1108 | if (chan->irq > 0) | ||
1109 | free_irq(chan->irq, chan); | ||
1110 | |||
1111 | tasklet_kill(&chan->tasklet); | ||
1112 | |||
1113 | list_del(&chan->common.device_node); | ||
1114 | } | ||
1115 | |||
1116 | /** | ||
1117 | * xilinx_vdma_chan_probe - Per Channel Probing | ||
1118 | * It get channel features from the device tree entry and | ||
1119 | * initialize special channel handling routines | ||
1120 | * | ||
1121 | * @xdev: Driver specific device structure | ||
1122 | * @node: Device node | ||
1123 | * | ||
1124 | * Return: '0' on success and failure value on error | ||
1125 | */ | ||
1126 | static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | ||
1127 | struct device_node *node) | ||
1128 | { | ||
1129 | struct xilinx_vdma_chan *chan; | ||
1130 | bool has_dre = false; | ||
1131 | u32 value, width; | ||
1132 | int err; | ||
1133 | |||
1134 | /* Allocate and initialize the channel structure */ | ||
1135 | chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); | ||
1136 | if (!chan) | ||
1137 | return -ENOMEM; | ||
1138 | |||
1139 | chan->dev = xdev->dev; | ||
1140 | chan->xdev = xdev; | ||
1141 | chan->has_sg = xdev->has_sg; | ||
1142 | |||
1143 | spin_lock_init(&chan->lock); | ||
1144 | INIT_LIST_HEAD(&chan->pending_list); | ||
1145 | INIT_LIST_HEAD(&chan->done_list); | ||
1146 | |||
1147 | /* Retrieve the channel properties from the device tree */ | ||
1148 | has_dre = of_property_read_bool(node, "xlnx,include-dre"); | ||
1149 | |||
1150 | chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); | ||
1151 | |||
1152 | err = of_property_read_u32(node, "xlnx,datawidth", &value); | ||
1153 | if (err) { | ||
1154 | dev_err(xdev->dev, "missing xlnx,datawidth property\n"); | ||
1155 | return err; | ||
1156 | } | ||
1157 | width = value >> 3; /* Convert bits to bytes */ | ||
1158 | |||
1159 | /* If data width is greater than 8 bytes, DRE is not in hw */ | ||
1160 | if (width > 8) | ||
1161 | has_dre = false; | ||
1162 | |||
1163 | if (!has_dre) | ||
1164 | xdev->common.copy_align = fls(width - 1); | ||
1165 | |||
1166 | if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { | ||
1167 | chan->direction = DMA_MEM_TO_DEV; | ||
1168 | chan->id = 0; | ||
1169 | |||
1170 | chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; | ||
1171 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | ||
1172 | |||
1173 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | ||
1174 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) | ||
1175 | chan->flush_on_fsync = true; | ||
1176 | } else if (of_device_is_compatible(node, | ||
1177 | "xlnx,axi-vdma-s2mm-channel")) { | ||
1178 | chan->direction = DMA_DEV_TO_MEM; | ||
1179 | chan->id = 1; | ||
1180 | |||
1181 | chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; | ||
1182 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | ||
1183 | |||
1184 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | ||
1185 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) | ||
1186 | chan->flush_on_fsync = true; | ||
1187 | } else { | ||
1188 | dev_err(xdev->dev, "Invalid channel compatible node\n"); | ||
1189 | return -EINVAL; | ||
1190 | } | ||
1191 | |||
1192 | /* Request the interrupt */ | ||
1193 | chan->irq = irq_of_parse_and_map(node, 0); | ||
1194 | err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, | ||
1195 | "xilinx-vdma-controller", chan); | ||
1196 | if (err) { | ||
1197 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); | ||
1198 | return err; | ||
1199 | } | ||
1200 | |||
1201 | /* Initialize the tasklet */ | ||
1202 | tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, | ||
1203 | (unsigned long)chan); | ||
1204 | |||
1205 | /* | ||
1206 | * Initialize the DMA channel and add it to the DMA engine channels | ||
1207 | * list. | ||
1208 | */ | ||
1209 | chan->common.device = &xdev->common; | ||
1210 | |||
1211 | list_add_tail(&chan->common.device_node, &xdev->common.channels); | ||
1212 | xdev->chan[chan->id] = chan; | ||
1213 | |||
1214 | /* Reset the channel */ | ||
1215 | err = xilinx_vdma_chan_reset(chan); | ||
1216 | if (err < 0) { | ||
1217 | dev_err(xdev->dev, "Reset channel failed\n"); | ||
1218 | return err; | ||
1219 | } | ||
1220 | |||
1221 | return 0; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * of_dma_xilinx_xlate - Translation function | ||
1226 | * @dma_spec: Pointer to DMA specifier as found in the device tree | ||
1227 | * @ofdma: Pointer to DMA controller data | ||
1228 | * | ||
1229 | * Return: DMA channel pointer on success and NULL on error | ||
1230 | */ | ||
1231 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | ||
1232 | struct of_dma *ofdma) | ||
1233 | { | ||
1234 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | ||
1235 | int chan_id = dma_spec->args[0]; | ||
1236 | |||
1237 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) | ||
1238 | return NULL; | ||
1239 | |||
1240 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | ||
1241 | } | ||
1242 | |||
1243 | /** | ||
1244 | * xilinx_vdma_probe - Driver probe function | ||
1245 | * @pdev: Pointer to the platform_device structure | ||
1246 | * | ||
1247 | * Return: '0' on success and failure value on error | ||
1248 | */ | ||
1249 | static int xilinx_vdma_probe(struct platform_device *pdev) | ||
1250 | { | ||
1251 | struct device_node *node = pdev->dev.of_node; | ||
1252 | struct xilinx_vdma_device *xdev; | ||
1253 | struct device_node *child; | ||
1254 | struct resource *io; | ||
1255 | u32 num_frames; | ||
1256 | int i, err; | ||
1257 | |||
1258 | /* Allocate and initialize the DMA engine structure */ | ||
1259 | xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); | ||
1260 | if (!xdev) | ||
1261 | return -ENOMEM; | ||
1262 | |||
1263 | xdev->dev = &pdev->dev; | ||
1264 | |||
1265 | /* Request and map I/O memory */ | ||
1266 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1267 | xdev->regs = devm_ioremap_resource(&pdev->dev, io); | ||
1268 | if (IS_ERR(xdev->regs)) | ||
1269 | return PTR_ERR(xdev->regs); | ||
1270 | |||
1271 | /* Retrieve the DMA engine properties from the device tree */ | ||
1272 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | ||
1273 | |||
1274 | err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); | ||
1275 | if (err < 0) { | ||
1276 | dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); | ||
1277 | return err; | ||
1278 | } | ||
1279 | |||
1280 | err = of_property_read_u32(node, "xlnx,flush-fsync", | ||
1281 | &xdev->flush_on_fsync); | ||
1282 | if (err < 0) | ||
1283 | dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); | ||
1284 | |||
1285 | /* Initialize the DMA engine */ | ||
1286 | xdev->common.dev = &pdev->dev; | ||
1287 | |||
1288 | INIT_LIST_HEAD(&xdev->common.channels); | ||
1289 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); | ||
1290 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | ||
1291 | |||
1292 | xdev->common.device_alloc_chan_resources = | ||
1293 | xilinx_vdma_alloc_chan_resources; | ||
1294 | xdev->common.device_free_chan_resources = | ||
1295 | xilinx_vdma_free_chan_resources; | ||
1296 | xdev->common.device_prep_interleaved_dma = | ||
1297 | xilinx_vdma_dma_prep_interleaved; | ||
1298 | xdev->common.device_control = xilinx_vdma_device_control; | ||
1299 | xdev->common.device_tx_status = xilinx_vdma_tx_status; | ||
1300 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | ||
1301 | |||
1302 | platform_set_drvdata(pdev, xdev); | ||
1303 | |||
1304 | /* Initialize the channels */ | ||
1305 | for_each_child_of_node(node, child) { | ||
1306 | err = xilinx_vdma_chan_probe(xdev, child); | ||
1307 | if (err < 0) | ||
1308 | goto error; | ||
1309 | } | ||
1310 | |||
1311 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | ||
1312 | if (xdev->chan[i]) | ||
1313 | xdev->chan[i]->num_frms = num_frames; | ||
1314 | |||
1315 | /* Register the DMA engine with the core */ | ||
1316 | dma_async_device_register(&xdev->common); | ||
1317 | |||
1318 | err = of_dma_controller_register(node, of_dma_xilinx_xlate, | ||
1319 | xdev); | ||
1320 | if (err < 0) { | ||
1321 | dev_err(&pdev->dev, "Unable to register DMA to DT\n"); | ||
1322 | dma_async_device_unregister(&xdev->common); | ||
1323 | goto error; | ||
1324 | } | ||
1325 | |||
1326 | dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); | ||
1327 | |||
1328 | return 0; | ||
1329 | |||
1330 | error: | ||
1331 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | ||
1332 | if (xdev->chan[i]) | ||
1333 | xilinx_vdma_chan_remove(xdev->chan[i]); | ||
1334 | |||
1335 | return err; | ||
1336 | } | ||
1337 | |||
1338 | /** | ||
1339 | * xilinx_vdma_remove - Driver remove function | ||
1340 | * @pdev: Pointer to the platform_device structure | ||
1341 | * | ||
1342 | * Return: Always '0' | ||
1343 | */ | ||
1344 | static int xilinx_vdma_remove(struct platform_device *pdev) | ||
1345 | { | ||
1346 | struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); | ||
1347 | int i; | ||
1348 | |||
1349 | of_dma_controller_free(pdev->dev.of_node); | ||
1350 | |||
1351 | dma_async_device_unregister(&xdev->common); | ||
1352 | |||
1353 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | ||
1354 | if (xdev->chan[i]) | ||
1355 | xilinx_vdma_chan_remove(xdev->chan[i]); | ||
1356 | |||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static const struct of_device_id xilinx_vdma_of_ids[] = { | ||
1361 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | ||
1362 | {} | ||
1363 | }; | ||
1364 | |||
1365 | static struct platform_driver xilinx_vdma_driver = { | ||
1366 | .driver = { | ||
1367 | .name = "xilinx-vdma", | ||
1368 | .owner = THIS_MODULE, | ||
1369 | .of_match_table = xilinx_vdma_of_ids, | ||
1370 | }, | ||
1371 | .probe = xilinx_vdma_probe, | ||
1372 | .remove = xilinx_vdma_remove, | ||
1373 | }; | ||
1374 | |||
1375 | module_platform_driver(xilinx_vdma_driver); | ||
1376 | |||
1377 | MODULE_AUTHOR("Xilinx, Inc."); | ||
1378 | MODULE_DESCRIPTION("Xilinx VDMA driver"); | ||
1379 | MODULE_LICENSE("GPL v2"); | ||