aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-18 18:55:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-18 18:55:59 -0400
commit13bf2cf9e2d1e0e56088ec6342c2726704100647 (patch)
treeb75f76b2376244e64471dd5c6867aaaf3cb0298c
parentbbd60bffaf780464298cb7a39852f7f1065f1726 (diff)
parent3257d86182cc27eda83d6854787256641f7c574b (diff)
Merge tag 'dmaengine-4.19-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull DMAengine updates from Vinod Koul: "This round brings couple of framework changes, a new driver and usual driver updates: - new managed helper for dmaengine framework registration - split dmaengine pause capability to pause and resume and allow drivers to report that individually - update dma_request_chan_by_mask() to handle deferred probing - move imx-sdma to use virt-dma - new driver for Actions Semi Owl family S900 controller - minor updates to intel, renesas, mv_xor, pl330 etc" * tag 'dmaengine-4.19-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits) dmaengine: Add Actions Semi Owl family S900 DMA driver dt-bindings: dmaengine: Add binding for Actions Semi Owl SoCs dmaengine: sh: rcar-dmac: Should not stop the DMAC by rcar_dmac_sync_tcr() dmaengine: mic_x100_dma: use the new helper to simplify the code dmaengine: add a new helper dmaenginem_async_device_register dmaengine: imx-sdma: add memcpy interface dmaengine: imx-sdma: add SDMA_BD_MAX_CNT to replace '0xffff' dmaengine: dma_request_chan_by_mask() to handle deferred probing dmaengine: pl330: fix irq race with terminate_all dmaengine: Revert "dmaengine: mv_xor_v2: enable COMPILE_TEST" dmaengine: mv_xor_v2: use {lower,upper}_32_bits to configure HW descriptor address dmaengine: mv_xor_v2: enable COMPILE_TEST dmaengine: mv_xor_v2: move unmap to before callback dmaengine: mv_xor_v2: convert callback to helper function dmaengine: mv_xor_v2: kill the tasklets upon exit dmaengine: mv_xor_v2: explicitly freeup irq dmaengine: sh: rcar-dmac: Add dma_pause operation dmaengine: sh: rcar-dmac: add a new function to clear CHCR.DE with barrier dmaengine: idma64: Support dmaengine_terminate_sync() dmaengine: hsu: Support dmaengine_terminate_sync() ...
-rw-r--r--Documentation/devicetree/bindings/dma/owl-dma.txt47
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt2
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/raid6test.c4
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c53
-rw-r--r--drivers/dma/hsu/hsu.c8
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/imx-sdma.c578
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/mic_x100_dma.c8
-rw-r--r--drivers/dma/mv_xor_v2.c16
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/owl-dma.c971
-rw-r--r--drivers/dma/pl330.c12
-rw-r--r--drivers/dma/sh/rcar-dmac.c112
-rw-r--r--drivers/dma/ste_dma40.c15
-rw-r--r--drivers/dma/stm32-dma.c4
-rw-r--r--drivers/dma/stm32-mdma.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c22
-rw-r--r--include/linux/dma/xilinx_dma.h2
-rw-r--r--include/linux/dmaengine.h6
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c2
26 files changed, 1600 insertions, 307 deletions
diff --git a/Documentation/devicetree/bindings/dma/owl-dma.txt b/Documentation/devicetree/bindings/dma/owl-dma.txt
new file mode 100644
index 000000000000..03e9bb12b75f
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/owl-dma.txt
@@ -0,0 +1,47 @@
1* Actions Semi Owl SoCs DMA controller
2
3This binding follows the generic DMA bindings defined in dma.txt.
4
5Required properties:
6- compatible: Should be "actions,s900-dma".
7- reg: Should contain DMA registers location and length.
8- interrupts: Should contain 4 interrupts shared by all channel.
9- #dma-cells: Must be <1>. Used to represent the number of integer
10 cells in the dmas property of client device.
11- dma-channels: Physical channels supported.
12- dma-requests: Number of DMA request signals supported by the controller.
13 Refer to Documentation/devicetree/bindings/dma/dma.txt
14- clocks: Phandle and Specifier of the clock feeding the DMA controller.
15
16Example:
17
18Controller:
19 dma: dma-controller@e0260000 {
20 compatible = "actions,s900-dma";
21 reg = <0x0 0xe0260000 0x0 0x1000>;
22 interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
23 <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
24 <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
25 <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
26 #dma-cells = <1>;
27 dma-channels = <12>;
28 dma-requests = <46>;
29 clocks = <&clock CLK_DMAC>;
30 };
31
32Client:
33
34DMA clients connected to the Actions Semi Owl SoCs DMA controller must
35use the format described in the dma.txt file, using a two-cell specifier
36for each channel.
37
38The two cells in order are:
391. A phandle pointing to the DMA controller.
402. The channel id.
41
42uart5: serial@e012a000 {
43 ...
44 dma-names = "tx", "rx";
45 dmas = <&dma 26>, <&dma 27>;
46 ...
47};
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index b1ba639554c0..946229c48657 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -29,6 +29,7 @@ Required Properties:
29 - "renesas,dmac-r8a77965" (R-Car M3-N) 29 - "renesas,dmac-r8a77965" (R-Car M3-N)
30 - "renesas,dmac-r8a77970" (R-Car V3M) 30 - "renesas,dmac-r8a77970" (R-Car V3M)
31 - "renesas,dmac-r8a77980" (R-Car V3H) 31 - "renesas,dmac-r8a77980" (R-Car V3H)
32 - "renesas,dmac-r8a77990" (R-Car E3)
32 - "renesas,dmac-r8a77995" (R-Car D3) 33 - "renesas,dmac-r8a77995" (R-Car D3)
33 34
34- reg: base address and length of the registers block for the DMAC 35- reg: base address and length of the registers block for the DMAC
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index a2b8bfaec43c..174af2c45e77 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -66,6 +66,8 @@ Optional child node properties:
66Optional child node properties for VDMA: 66Optional child node properties for VDMA:
67- xlnx,genlock-mode: Tells Genlock synchronization is 67- xlnx,genlock-mode: Tells Genlock synchronization is
68 enabled/disabled in hardware. 68 enabled/disabled in hardware.
69- xlnx,enable-vert-flip: Tells vertical flip is
70 enabled/disabled in hardware(S2MM path).
69Optional child node properties for AXI DMA: 71Optional child node properties for AXI DMA:
70-dma-channels: Number of dma channels in child node. 72-dma-channels: Number of dma channels in child node.
71 73
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 7c1bb3d0c222..43681ca0837f 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -240,6 +240,7 @@ CLOCK
240 devm_of_clk_add_hw_provider() 240 devm_of_clk_add_hw_provider()
241 241
242DMA 242DMA
243 dmaenginem_async_device_register()
243 dmam_alloc_coherent() 244 dmam_alloc_coherent()
244 dmam_alloc_attrs() 245 dmam_alloc_attrs()
245 dmam_declare_coherent_memory() 246 dmam_declare_coherent_memory()
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..80dc567801ec 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -42,6 +42,8 @@ static struct page *pq_scribble_page;
42#define P(b, d) (b[d-2]) 42#define P(b, d) (b[d-2])
43#define Q(b, d) (b[d-1]) 43#define Q(b, d) (b[d-1])
44 44
45#define MAX_DISKS 255
46
45/** 47/**
46 * do_async_gen_syndrome - asynchronously calculate P and/or Q 48 * do_async_gen_syndrome - asynchronously calculate P and/or Q
47 */ 49 */
@@ -184,7 +186,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
184 struct dma_device *device = chan ? chan->device : NULL; 186 struct dma_device *device = chan ? chan->device : NULL;
185 struct dmaengine_unmap_data *unmap = NULL; 187 struct dmaengine_unmap_data *unmap = NULL;
186 188
187 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 189 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
188 190
189 if (device) 191 if (device)
190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); 192 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
@@ -196,7 +198,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
196 is_dma_pq_aligned(device, offset, 0, len)) { 198 is_dma_pq_aligned(device, offset, 0, len)) {
197 struct dma_async_tx_descriptor *tx; 199 struct dma_async_tx_descriptor *tx;
198 enum dma_ctrl_flags dma_flags = 0; 200 enum dma_ctrl_flags dma_flags = 0;
199 unsigned char coefs[src_cnt]; 201 unsigned char coefs[MAX_DISKS];
200 int i, j; 202 int i, j;
201 203
202 /* run the p+q asynchronously */ 204 /* run the p+q asynchronously */
@@ -299,11 +301,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
299 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); 301 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
300 struct dma_device *device = chan ? chan->device : NULL; 302 struct dma_device *device = chan ? chan->device : NULL;
301 struct dma_async_tx_descriptor *tx; 303 struct dma_async_tx_descriptor *tx;
302 unsigned char coefs[disks-2]; 304 unsigned char coefs[MAX_DISKS];
303 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 305 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
304 struct dmaengine_unmap_data *unmap = NULL; 306 struct dmaengine_unmap_data *unmap = NULL;
305 307
306 BUG_ON(disks < 4); 308 BUG_ON(disks < 4 || disks > MAX_DISKS);
307 309
308 if (device) 310 if (device)
309 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); 311 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..a5edaabae12a 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -81,11 +81,13 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru
81 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 81 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); 82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
83 } else { 83 } else {
84 struct page *blocks[disks]; 84 struct page *blocks[NDISKS];
85 struct page *dest; 85 struct page *dest;
86 int count = 0; 86 int count = 0;
87 int i; 87 int i;
88 88
89 BUG_ON(disks > NDISKS);
90
89 /* data+Q failure. Reconstruct data from P, 91 /* data+Q failure. Reconstruct data from P,
90 * then rebuild syndrome 92 * then rebuild syndrome
91 */ 93 */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ca1680afa20a..dacf3f42426d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -250,6 +250,7 @@ config IMX_SDMA
250 tristate "i.MX SDMA support" 250 tristate "i.MX SDMA support"
251 depends on ARCH_MXC 251 depends on ARCH_MXC
252 select DMA_ENGINE 252 select DMA_ENGINE
253 select DMA_VIRTUAL_CHANNELS
253 help 254 help
254 Support the i.MX SDMA engine. This engine is integrated into 255 Support the i.MX SDMA engine. This engine is integrated into
255 Freescale i.MX25/31/35/51/53/6 chips. 256 Freescale i.MX25/31/35/51/53/6 chips.
@@ -413,6 +414,14 @@ config NBPFAXI_DMA
413 help 414 help
414 Support for "Type-AXI" NBPF DMA IPs from Renesas 415 Support for "Type-AXI" NBPF DMA IPs from Renesas
415 416
417config OWL_DMA
418 tristate "Actions Semi Owl SoCs DMA support"
419 depends on ARCH_ACTIONS
420 select DMA_ENGINE
421 select DMA_VIRTUAL_CHANNELS
422 help
423 Enable support for the Actions Semi Owl SoCs DMA controller.
424
416config PCH_DMA 425config PCH_DMA
417 tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" 426 tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
418 depends on PCI && (X86_32 || COMPILE_TEST) 427 depends on PCI && (X86_32 || COMPILE_TEST)
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 203a99d68315..c91702d88b95 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
52obj-$(CONFIG_MXS_DMA) += mxs-dma.o 52obj-$(CONFIG_MXS_DMA) += mxs-dma.o
53obj-$(CONFIG_MX3_IPU) += ipu/ 53obj-$(CONFIG_MX3_IPU) += ipu/
54obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o 54obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
55obj-$(CONFIG_OWL_DMA) += owl-dma.o
55obj-$(CONFIG_PCH_DMA) += pch_dma.o 56obj-$(CONFIG_PCH_DMA) += pch_dma.o
56obj-$(CONFIG_PL330_DMA) += pl330.o 57obj-$(CONFIG_PL330_DMA) += pl330.o
57obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 58obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 08ba8473a284..272bed6c8ba7 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -500,12 +500,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
500 caps->max_burst = device->max_burst; 500 caps->max_burst = device->max_burst;
501 caps->residue_granularity = device->residue_granularity; 501 caps->residue_granularity = device->residue_granularity;
502 caps->descriptor_reuse = device->descriptor_reuse; 502 caps->descriptor_reuse = device->descriptor_reuse;
503 503 caps->cmd_pause = !!device->device_pause;
504 /* 504 caps->cmd_resume = !!device->device_resume;
505 * Some devices implement only pause (e.g. to get residuum) but no
506 * resume. However cmd_pause is advertised as pause AND resume.
507 */
508 caps->cmd_pause = !!(device->device_pause && device->device_resume);
509 caps->cmd_terminate = !!device->device_terminate_all; 505 caps->cmd_terminate = !!device->device_terminate_all;
510 506
511 return 0; 507 return 0;
@@ -774,8 +770,14 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
774 return ERR_PTR(-ENODEV); 770 return ERR_PTR(-ENODEV);
775 771
776 chan = __dma_request_channel(mask, NULL, NULL); 772 chan = __dma_request_channel(mask, NULL, NULL);
777 if (!chan) 773 if (!chan) {
778 chan = ERR_PTR(-ENODEV); 774 mutex_lock(&dma_list_mutex);
775 if (list_empty(&dma_device_list))
776 chan = ERR_PTR(-EPROBE_DEFER);
777 else
778 chan = ERR_PTR(-ENODEV);
779 mutex_unlock(&dma_list_mutex);
780 }
779 781
780 return chan; 782 return chan;
781} 783}
@@ -1139,6 +1141,41 @@ void dma_async_device_unregister(struct dma_device *device)
1139} 1141}
1140EXPORT_SYMBOL(dma_async_device_unregister); 1142EXPORT_SYMBOL(dma_async_device_unregister);
1141 1143
1144static void dmam_device_release(struct device *dev, void *res)
1145{
1146 struct dma_device *device;
1147
1148 device = *(struct dma_device **)res;
1149 dma_async_device_unregister(device);
1150}
1151
1152/**
1153 * dmaenginem_async_device_register - registers DMA devices found
1154 * @device: &dma_device
1155 *
1156 * The operation is managed and will be undone on driver detach.
1157 */
1158int dmaenginem_async_device_register(struct dma_device *device)
1159{
1160 void *p;
1161 int ret;
1162
1163 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1164 if (!p)
1165 return -ENOMEM;
1166
1167 ret = dma_async_device_register(device);
1168 if (!ret) {
1169 *(struct dma_device **)p = device;
1170 devres_add(device->dev, p);
1171 } else {
1172 devres_free(p);
1173 }
1174
1175 return ret;
1176}
1177EXPORT_SYMBOL(dmaenginem_async_device_register);
1178
1142struct dmaengine_unmap_pool { 1179struct dmaengine_unmap_pool {
1143 struct kmem_cache *cache; 1180 struct kmem_cache *cache;
1144 const char *name; 1181 const char *name;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 29d04ca71d52..202ffa9f7611 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -413,6 +413,13 @@ static void hsu_dma_free_chan_resources(struct dma_chan *chan)
413 vchan_free_chan_resources(to_virt_chan(chan)); 413 vchan_free_chan_resources(to_virt_chan(chan));
414} 414}
415 415
416static void hsu_dma_synchronize(struct dma_chan *chan)
417{
418 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
419
420 vchan_synchronize(&hsuc->vchan);
421}
422
416int hsu_dma_probe(struct hsu_dma_chip *chip) 423int hsu_dma_probe(struct hsu_dma_chip *chip)
417{ 424{
418 struct hsu_dma *hsu; 425 struct hsu_dma *hsu;
@@ -459,6 +466,7 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
459 hsu->dma.device_pause = hsu_dma_pause; 466 hsu->dma.device_pause = hsu_dma_pause;
460 hsu->dma.device_resume = hsu_dma_resume; 467 hsu->dma.device_resume = hsu_dma_resume;
461 hsu->dma.device_terminate_all = hsu_dma_terminate_all; 468 hsu->dma.device_terminate_all = hsu_dma_terminate_all;
469 hsu->dma.device_synchronize = hsu_dma_synchronize;
462 470
463 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; 471 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
464 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; 472 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index e5c911200bdb..1fbf9cb9b742 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -496,6 +496,13 @@ static int idma64_terminate_all(struct dma_chan *chan)
496 return 0; 496 return 0;
497} 497}
498 498
499static void idma64_synchronize(struct dma_chan *chan)
500{
501 struct idma64_chan *idma64c = to_idma64_chan(chan);
502
503 vchan_synchronize(&idma64c->vchan);
504}
505
499static int idma64_alloc_chan_resources(struct dma_chan *chan) 506static int idma64_alloc_chan_resources(struct dma_chan *chan)
500{ 507{
501 struct idma64_chan *idma64c = to_idma64_chan(chan); 508 struct idma64_chan *idma64c = to_idma64_chan(chan);
@@ -583,6 +590,7 @@ static int idma64_probe(struct idma64_chip *chip)
583 idma64->dma.device_pause = idma64_pause; 590 idma64->dma.device_pause = idma64_pause;
584 idma64->dma.device_resume = idma64_resume; 591 idma64->dma.device_resume = idma64_resume;
585 idma64->dma.device_terminate_all = idma64_terminate_all; 592 idma64->dma.device_terminate_all = idma64_terminate_all;
593 idma64->dma.device_synchronize = idma64_synchronize;
586 594
587 idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; 595 idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
588 idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS; 596 idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f077992635c2..b4ec2d20e661 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,6 +24,7 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
27#include <linux/firmware.h> 28#include <linux/firmware.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/platform_device.h> 30#include <linux/platform_device.h>
@@ -41,6 +42,7 @@
41#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
42 43
43#include "dmaengine.h" 44#include "dmaengine.h"
45#include "virt-dma.h"
44 46
45/* SDMA registers */ 47/* SDMA registers */
46#define SDMA_H_C0PTR 0x000 48#define SDMA_H_C0PTR 0x000
@@ -183,6 +185,7 @@
183 * Mode/Count of data node descriptors - IPCv2 185 * Mode/Count of data node descriptors - IPCv2
184 */ 186 */
185struct sdma_mode_count { 187struct sdma_mode_count {
188#define SDMA_BD_MAX_CNT 0xffff
186 u32 count : 16; /* size of the buffer pointed by this BD */ 189 u32 count : 16; /* size of the buffer pointed by this BD */
187 u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 190 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
188 u32 command : 8; /* command mostly used for channel 0 */ 191 u32 command : 8; /* command mostly used for channel 0 */
@@ -200,9 +203,9 @@ struct sdma_buffer_descriptor {
200/** 203/**
201 * struct sdma_channel_control - Channel control Block 204 * struct sdma_channel_control - Channel control Block
202 * 205 *
203 * @current_bd_ptr current buffer descriptor processed 206 * @current_bd_ptr: current buffer descriptor processed
204 * @base_bd_ptr first element of buffer descriptor array 207 * @base_bd_ptr: first element of buffer descriptor array
205 * @unused padding. The SDMA engine expects an array of 128 byte 208 * @unused: padding. The SDMA engine expects an array of 128 byte
206 * control blocks 209 * control blocks
207 */ 210 */
208struct sdma_channel_control { 211struct sdma_channel_control {
@@ -215,10 +218,13 @@ struct sdma_channel_control {
215 * struct sdma_state_registers - SDMA context for a channel 218 * struct sdma_state_registers - SDMA context for a channel
216 * 219 *
217 * @pc: program counter 220 * @pc: program counter
221 * @unused1: unused
218 * @t: test bit: status of arithmetic & test instruction 222 * @t: test bit: status of arithmetic & test instruction
219 * @rpc: return program counter 223 * @rpc: return program counter
224 * @unused0: unused
220 * @sf: source fault while loading data 225 * @sf: source fault while loading data
221 * @spc: loop start program counter 226 * @spc: loop start program counter
227 * @unused2: unused
222 * @df: destination fault while storing data 228 * @df: destination fault while storing data
223 * @epc: loop end program counter 229 * @epc: loop end program counter
224 * @lm: loop mode 230 * @lm: loop mode
@@ -256,6 +262,14 @@ struct sdma_state_registers {
256 * @dsa: dedicated core source address register 262 * @dsa: dedicated core source address register
257 * @ds: dedicated core status register 263 * @ds: dedicated core status register
258 * @dd: dedicated core data register 264 * @dd: dedicated core data register
265 * @scratch0: 1st word of dedicated ram for context switch
266 * @scratch1: 2nd word of dedicated ram for context switch
267 * @scratch2: 3rd word of dedicated ram for context switch
268 * @scratch3: 4th word of dedicated ram for context switch
269 * @scratch4: 5th word of dedicated ram for context switch
270 * @scratch5: 6th word of dedicated ram for context switch
271 * @scratch6: 7th word of dedicated ram for context switch
272 * @scratch7: 8th word of dedicated ram for context switch
259 */ 273 */
260struct sdma_context_data { 274struct sdma_context_data {
261 struct sdma_state_registers channel_state; 275 struct sdma_state_registers channel_state;
@@ -284,25 +298,67 @@ struct sdma_context_data {
284 u32 scratch7; 298 u32 scratch7;
285} __attribute__ ((packed)); 299} __attribute__ ((packed));
286 300
287#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
288 301
289struct sdma_engine; 302struct sdma_engine;
290 303
291/** 304/**
305 * struct sdma_desc - descriptor structor for one transfer
306 * @vd: descriptor for virt dma
307 * @num_bd: number of descriptors currently handling
308 * @bd_phys: physical address of bd
309 * @buf_tail: ID of the buffer that was processed
310 * @buf_ptail: ID of the previous buffer that was processed
311 * @period_len: period length, used in cyclic.
312 * @chn_real_count: the real count updated from bd->mode.count
313 * @chn_count: the transfer count set
314 * @sdmac: sdma_channel pointer
315 * @bd: pointer of allocate bd
316 */
317struct sdma_desc {
318 struct virt_dma_desc vd;
319 unsigned int num_bd;
320 dma_addr_t bd_phys;
321 unsigned int buf_tail;
322 unsigned int buf_ptail;
323 unsigned int period_len;
324 unsigned int chn_real_count;
325 unsigned int chn_count;
326 struct sdma_channel *sdmac;
327 struct sdma_buffer_descriptor *bd;
328};
329
330/**
292 * struct sdma_channel - housekeeping for a SDMA channel 331 * struct sdma_channel - housekeeping for a SDMA channel
293 * 332 *
294 * @sdma pointer to the SDMA engine for this channel 333 * @vc: virt_dma base structure
295 * @channel the channel number, matches dmaengine chan_id + 1 334 * @desc: sdma description including vd and other special member
296 * @direction transfer type. Needed for setting SDMA script 335 * @sdma: pointer to the SDMA engine for this channel
297 * @peripheral_type Peripheral type. Needed for setting SDMA script 336 * @channel: the channel number, matches dmaengine chan_id + 1
298 * @event_id0 aka dma request line 337 * @direction: transfer type. Needed for setting SDMA script
299 * @event_id1 for channels that use 2 events 338 * @peripheral_type: Peripheral type. Needed for setting SDMA script
300 * @word_size peripheral access size 339 * @event_id0: aka dma request line
301 * @buf_tail ID of the buffer that was processed 340 * @event_id1: for channels that use 2 events
302 * @buf_ptail ID of the previous buffer that was processed 341 * @word_size: peripheral access size
303 * @num_bd max NUM_BD. number of descriptors currently handling 342 * @pc_from_device: script address for those device_2_memory
343 * @pc_to_device: script address for those memory_2_device
344 * @device_to_device: script address for those device_2_device
345 * @pc_to_pc: script address for those memory_2_memory
346 * @flags: loop mode or not
347 * @per_address: peripheral source or destination address in common case
348 * destination address in p_2_p case
349 * @per_address2: peripheral source address in p_2_p case
350 * @event_mask: event mask used in p_2_p script
351 * @watermark_level: value for gReg[7], some script will extend it from
352 * basic watermark such as p_2_p
353 * @shp_addr: value for gReg[6]
354 * @per_addr: value for gReg[2]
355 * @status: status of dma channel
356 * @data: specific sdma interface structure
357 * @bd_pool: dma_pool for bd
304 */ 358 */
305struct sdma_channel { 359struct sdma_channel {
360 struct virt_dma_chan vc;
361 struct sdma_desc *desc;
306 struct sdma_engine *sdma; 362 struct sdma_engine *sdma;
307 unsigned int channel; 363 unsigned int channel;
308 enum dma_transfer_direction direction; 364 enum dma_transfer_direction direction;
@@ -310,28 +366,17 @@ struct sdma_channel {
310 unsigned int event_id0; 366 unsigned int event_id0;
311 unsigned int event_id1; 367 unsigned int event_id1;
312 enum dma_slave_buswidth word_size; 368 enum dma_slave_buswidth word_size;
313 unsigned int buf_tail;
314 unsigned int buf_ptail;
315 unsigned int num_bd;
316 unsigned int period_len;
317 struct sdma_buffer_descriptor *bd;
318 dma_addr_t bd_phys;
319 unsigned int pc_from_device, pc_to_device; 369 unsigned int pc_from_device, pc_to_device;
320 unsigned int device_to_device; 370 unsigned int device_to_device;
371 unsigned int pc_to_pc;
321 unsigned long flags; 372 unsigned long flags;
322 dma_addr_t per_address, per_address2; 373 dma_addr_t per_address, per_address2;
323 unsigned long event_mask[2]; 374 unsigned long event_mask[2];
324 unsigned long watermark_level; 375 unsigned long watermark_level;
325 u32 shp_addr, per_addr; 376 u32 shp_addr, per_addr;
326 struct dma_chan chan;
327 spinlock_t lock;
328 struct dma_async_tx_descriptor desc;
329 enum dma_status status; 377 enum dma_status status;
330 unsigned int chn_count;
331 unsigned int chn_real_count;
332 struct tasklet_struct tasklet;
333 struct imx_dma_data data; 378 struct imx_dma_data data;
334 bool enabled; 379 struct dma_pool *bd_pool;
335}; 380};
336 381
337#define IMX_DMA_SG_LOOP BIT(0) 382#define IMX_DMA_SG_LOOP BIT(0)
@@ -346,15 +391,15 @@ struct sdma_channel {
346/** 391/**
347 * struct sdma_firmware_header - Layout of the firmware image 392 * struct sdma_firmware_header - Layout of the firmware image
348 * 393 *
349 * @magic "SDMA" 394 * @magic: "SDMA"
350 * @version_major increased whenever layout of struct sdma_script_start_addrs 395 * @version_major: increased whenever layout of struct
351 * changes. 396 * sdma_script_start_addrs changes.
352 * @version_minor firmware minor version (for binary compatible changes) 397 * @version_minor: firmware minor version (for binary compatible changes)
353 * @script_addrs_start offset of struct sdma_script_start_addrs in this image 398 * @script_addrs_start: offset of struct sdma_script_start_addrs in this image
354 * @num_script_addrs Number of script addresses in this image 399 * @num_script_addrs: Number of script addresses in this image
355 * @ram_code_start offset of SDMA ram image in this firmware image 400 * @ram_code_start: offset of SDMA ram image in this firmware image
356 * @ram_code_size size of SDMA ram image 401 * @ram_code_size: size of SDMA ram image
357 * @script_addrs Stores the start address of the SDMA scripts 402 * @script_addrs: Stores the start address of the SDMA scripts
358 * (in SDMA memory space) 403 * (in SDMA memory space)
359 */ 404 */
360struct sdma_firmware_header { 405struct sdma_firmware_header {
@@ -391,6 +436,8 @@ struct sdma_engine {
391 u32 spba_start_addr; 436 u32 spba_start_addr;
392 u32 spba_end_addr; 437 u32 spba_end_addr;
393 unsigned int irq; 438 unsigned int irq;
439 dma_addr_t bd0_phys;
440 struct sdma_buffer_descriptor *bd0;
394}; 441};
395 442
396static struct sdma_driver_data sdma_imx31 = { 443static struct sdma_driver_data sdma_imx31 = {
@@ -590,14 +637,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
590 637
591static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 638static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
592{ 639{
593 unsigned long flags;
594 struct sdma_channel *sdmac = &sdma->channel[channel];
595
596 writel(BIT(channel), sdma->regs + SDMA_H_START); 640 writel(BIT(channel), sdma->regs + SDMA_H_START);
597
598 spin_lock_irqsave(&sdmac->lock, flags);
599 sdmac->enabled = true;
600 spin_unlock_irqrestore(&sdmac->lock, flags);
601} 641}
602 642
603/* 643/*
@@ -625,7 +665,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
625static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 665static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
626 u32 address) 666 u32 address)
627{ 667{
628 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 668 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
629 void *buf_virt; 669 void *buf_virt;
630 dma_addr_t buf_phys; 670 dma_addr_t buf_phys;
631 int ret; 671 int ret;
@@ -681,26 +721,49 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
681 writel_relaxed(val, sdma->regs + chnenbl); 721 writel_relaxed(val, sdma->regs + chnenbl);
682} 722}
683 723
724static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
725{
726 return container_of(t, struct sdma_desc, vd.tx);
727}
728
729static void sdma_start_desc(struct sdma_channel *sdmac)
730{
731 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
732 struct sdma_desc *desc;
733 struct sdma_engine *sdma = sdmac->sdma;
734 int channel = sdmac->channel;
735
736 if (!vd) {
737 sdmac->desc = NULL;
738 return;
739 }
740 sdmac->desc = desc = to_sdma_desc(&vd->tx);
741 /*
742 * Do not delete the node in desc_issued list in cyclic mode, otherwise
743 * the desc allocated will never be freed in vchan_dma_desc_free_list
744 */
745 if (!(sdmac->flags & IMX_DMA_SG_LOOP))
746 list_del(&vd->node);
747
748 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
749 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
750 sdma_enable_channel(sdma, sdmac->channel);
751}
752
684static void sdma_update_channel_loop(struct sdma_channel *sdmac) 753static void sdma_update_channel_loop(struct sdma_channel *sdmac)
685{ 754{
686 struct sdma_buffer_descriptor *bd; 755 struct sdma_buffer_descriptor *bd;
687 int error = 0; 756 int error = 0;
688 enum dma_status old_status = sdmac->status; 757 enum dma_status old_status = sdmac->status;
689 unsigned long flags;
690
691 spin_lock_irqsave(&sdmac->lock, flags);
692 if (!sdmac->enabled) {
693 spin_unlock_irqrestore(&sdmac->lock, flags);
694 return;
695 }
696 spin_unlock_irqrestore(&sdmac->lock, flags);
697 758
698 /* 759 /*
699 * loop mode. Iterate over descriptors, re-setup them and 760 * loop mode. Iterate over descriptors, re-setup them and
700 * call callback function. 761 * call callback function.
701 */ 762 */
702 while (1) { 763 while (sdmac->desc) {
703 bd = &sdmac->bd[sdmac->buf_tail]; 764 struct sdma_desc *desc = sdmac->desc;
765
766 bd = &desc->bd[desc->buf_tail];
704 767
705 if (bd->mode.status & BD_DONE) 768 if (bd->mode.status & BD_DONE)
706 break; 769 break;
@@ -716,11 +779,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
716 * the number of bytes present in the current buffer descriptor. 779 * the number of bytes present in the current buffer descriptor.
717 */ 780 */
718 781
719 sdmac->chn_real_count = bd->mode.count; 782 desc->chn_real_count = bd->mode.count;
720 bd->mode.status |= BD_DONE; 783 bd->mode.status |= BD_DONE;
721 bd->mode.count = sdmac->period_len; 784 bd->mode.count = desc->period_len;
722 sdmac->buf_ptail = sdmac->buf_tail; 785 desc->buf_ptail = desc->buf_tail;
723 sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd; 786 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
724 787
725 /* 788 /*
726 * The callback is called from the interrupt context in order 789 * The callback is called from the interrupt context in order
@@ -728,41 +791,38 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
728 * SDMA transaction status by the time the client tasklet is 791 * SDMA transaction status by the time the client tasklet is
729 * executed. 792 * executed.
730 */ 793 */
731 794 spin_unlock(&sdmac->vc.lock);
732 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); 795 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
796 spin_lock(&sdmac->vc.lock);
733 797
734 if (error) 798 if (error)
735 sdmac->status = old_status; 799 sdmac->status = old_status;
736 } 800 }
737} 801}
738 802
739static void mxc_sdma_handle_channel_normal(unsigned long data) 803static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
740{ 804{
741 struct sdma_channel *sdmac = (struct sdma_channel *) data; 805 struct sdma_channel *sdmac = (struct sdma_channel *) data;
742 struct sdma_buffer_descriptor *bd; 806 struct sdma_buffer_descriptor *bd;
743 int i, error = 0; 807 int i, error = 0;
744 808
745 sdmac->chn_real_count = 0; 809 sdmac->desc->chn_real_count = 0;
746 /* 810 /*
747 * non loop mode. Iterate over all descriptors, collect 811 * non loop mode. Iterate over all descriptors, collect
748 * errors and call callback function 812 * errors and call callback function
749 */ 813 */
750 for (i = 0; i < sdmac->num_bd; i++) { 814 for (i = 0; i < sdmac->desc->num_bd; i++) {
751 bd = &sdmac->bd[i]; 815 bd = &sdmac->desc->bd[i];
752 816
753 if (bd->mode.status & (BD_DONE | BD_RROR)) 817 if (bd->mode.status & (BD_DONE | BD_RROR))
754 error = -EIO; 818 error = -EIO;
755 sdmac->chn_real_count += bd->mode.count; 819 sdmac->desc->chn_real_count += bd->mode.count;
756 } 820 }
757 821
758 if (error) 822 if (error)
759 sdmac->status = DMA_ERROR; 823 sdmac->status = DMA_ERROR;
760 else 824 else
761 sdmac->status = DMA_COMPLETE; 825 sdmac->status = DMA_COMPLETE;
762
763 dma_cookie_complete(&sdmac->desc);
764
765 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
766} 826}
767 827
768static irqreturn_t sdma_int_handler(int irq, void *dev_id) 828static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -778,12 +838,21 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
778 while (stat) { 838 while (stat) {
779 int channel = fls(stat) - 1; 839 int channel = fls(stat) - 1;
780 struct sdma_channel *sdmac = &sdma->channel[channel]; 840 struct sdma_channel *sdmac = &sdma->channel[channel];
841 struct sdma_desc *desc;
842
843 spin_lock(&sdmac->vc.lock);
844 desc = sdmac->desc;
845 if (desc) {
846 if (sdmac->flags & IMX_DMA_SG_LOOP) {
847 sdma_update_channel_loop(sdmac);
848 } else {
849 mxc_sdma_handle_channel_normal(sdmac);
850 vchan_cookie_complete(&desc->vd);
851 sdma_start_desc(sdmac);
852 }
853 }
781 854
782 if (sdmac->flags & IMX_DMA_SG_LOOP) 855 spin_unlock(&sdmac->vc.lock);
783 sdma_update_channel_loop(sdmac);
784 else
785 tasklet_schedule(&sdmac->tasklet);
786
787 __clear_bit(channel, &stat); 856 __clear_bit(channel, &stat);
788 } 857 }
789 858
@@ -802,14 +871,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
802 * These are needed once we start to support transfers between 871 * These are needed once we start to support transfers between
803 * two peripherals or memory-to-memory transfers 872 * two peripherals or memory-to-memory transfers
804 */ 873 */
805 int per_2_per = 0; 874 int per_2_per = 0, emi_2_emi = 0;
806 875
807 sdmac->pc_from_device = 0; 876 sdmac->pc_from_device = 0;
808 sdmac->pc_to_device = 0; 877 sdmac->pc_to_device = 0;
809 sdmac->device_to_device = 0; 878 sdmac->device_to_device = 0;
879 sdmac->pc_to_pc = 0;
810 880
811 switch (peripheral_type) { 881 switch (peripheral_type) {
812 case IMX_DMATYPE_MEMORY: 882 case IMX_DMATYPE_MEMORY:
883 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
813 break; 884 break;
814 case IMX_DMATYPE_DSP: 885 case IMX_DMATYPE_DSP:
815 emi_2_per = sdma->script_addrs->bp_2_ap_addr; 886 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -882,6 +953,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
882 sdmac->pc_from_device = per_2_emi; 953 sdmac->pc_from_device = per_2_emi;
883 sdmac->pc_to_device = emi_2_per; 954 sdmac->pc_to_device = emi_2_per;
884 sdmac->device_to_device = per_2_per; 955 sdmac->device_to_device = per_2_per;
956 sdmac->pc_to_pc = emi_2_emi;
885} 957}
886 958
887static int sdma_load_context(struct sdma_channel *sdmac) 959static int sdma_load_context(struct sdma_channel *sdmac)
@@ -890,7 +962,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
890 int channel = sdmac->channel; 962 int channel = sdmac->channel;
891 int load_address; 963 int load_address;
892 struct sdma_context_data *context = sdma->context; 964 struct sdma_context_data *context = sdma->context;
893 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 965 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
894 int ret; 966 int ret;
895 unsigned long flags; 967 unsigned long flags;
896 968
@@ -898,6 +970,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
898 load_address = sdmac->pc_from_device; 970 load_address = sdmac->pc_from_device;
899 else if (sdmac->direction == DMA_DEV_TO_DEV) 971 else if (sdmac->direction == DMA_DEV_TO_DEV)
900 load_address = sdmac->device_to_device; 972 load_address = sdmac->device_to_device;
973 else if (sdmac->direction == DMA_MEM_TO_MEM)
974 load_address = sdmac->pc_to_pc;
901 else 975 else
902 load_address = sdmac->pc_to_device; 976 load_address = sdmac->pc_to_device;
903 977
@@ -939,7 +1013,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
939 1013
940static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 1014static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
941{ 1015{
942 return container_of(chan, struct sdma_channel, chan); 1016 return container_of(chan, struct sdma_channel, vc.chan);
943} 1017}
944 1018
945static int sdma_disable_channel(struct dma_chan *chan) 1019static int sdma_disable_channel(struct dma_chan *chan)
@@ -947,21 +1021,25 @@ static int sdma_disable_channel(struct dma_chan *chan)
947 struct sdma_channel *sdmac = to_sdma_chan(chan); 1021 struct sdma_channel *sdmac = to_sdma_chan(chan);
948 struct sdma_engine *sdma = sdmac->sdma; 1022 struct sdma_engine *sdma = sdmac->sdma;
949 int channel = sdmac->channel; 1023 int channel = sdmac->channel;
950 unsigned long flags;
951 1024
952 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 1025 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
953 sdmac->status = DMA_ERROR; 1026 sdmac->status = DMA_ERROR;
954 1027
955 spin_lock_irqsave(&sdmac->lock, flags);
956 sdmac->enabled = false;
957 spin_unlock_irqrestore(&sdmac->lock, flags);
958
959 return 0; 1028 return 0;
960} 1029}
961 1030
962static int sdma_disable_channel_with_delay(struct dma_chan *chan) 1031static int sdma_disable_channel_with_delay(struct dma_chan *chan)
963{ 1032{
1033 struct sdma_channel *sdmac = to_sdma_chan(chan);
1034 unsigned long flags;
1035 LIST_HEAD(head);
1036
964 sdma_disable_channel(chan); 1037 sdma_disable_channel(chan);
1038 spin_lock_irqsave(&sdmac->vc.lock, flags);
1039 vchan_get_all_descriptors(&sdmac->vc, &head);
1040 sdmac->desc = NULL;
1041 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1042 vchan_dma_desc_free_list(&sdmac->vc, &head);
965 1043
966 /* 1044 /*
967 * According to NXP R&D team a delay of one BD SDMA cost time 1045 * According to NXP R&D team a delay of one BD SDMA cost time
@@ -1090,52 +1168,81 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1090 return 0; 1168 return 0;
1091} 1169}
1092 1170
1093static int sdma_request_channel(struct sdma_channel *sdmac) 1171static int sdma_request_channel0(struct sdma_engine *sdma)
1094{ 1172{
1095 struct sdma_engine *sdma = sdmac->sdma;
1096 int channel = sdmac->channel;
1097 int ret = -EBUSY; 1173 int ret = -EBUSY;
1098 1174
1099 sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, 1175 sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1100 GFP_KERNEL); 1176 GFP_NOWAIT);
1101 if (!sdmac->bd) { 1177 if (!sdma->bd0) {
1102 ret = -ENOMEM; 1178 ret = -ENOMEM;
1103 goto out; 1179 goto out;
1104 } 1180 }
1105 1181
1106 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 1182 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1107 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1183 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1108 1184
1109 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 1185 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1110 return 0; 1186 return 0;
1111out: 1187out:
1112 1188
1113 return ret; 1189 return ret;
1114} 1190}
1115 1191
1116static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 1192
1193static int sdma_alloc_bd(struct sdma_desc *desc)
1117{ 1194{
1118 unsigned long flags; 1195 int ret = 0;
1119 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
1120 dma_cookie_t cookie;
1121 1196
1122 spin_lock_irqsave(&sdmac->lock, flags); 1197 desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
1198 &desc->bd_phys);
1199 if (!desc->bd) {
1200 ret = -ENOMEM;
1201 goto out;
1202 }
1203out:
1204 return ret;
1205}
1123 1206
1124 cookie = dma_cookie_assign(tx); 1207static void sdma_free_bd(struct sdma_desc *desc)
1208{
1209 dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
1210}
1125 1211
1126 spin_unlock_irqrestore(&sdmac->lock, flags); 1212static void sdma_desc_free(struct virt_dma_desc *vd)
1213{
1214 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1127 1215
1128 return cookie; 1216 sdma_free_bd(desc);
1217 kfree(desc);
1129} 1218}
1130 1219
1131static int sdma_alloc_chan_resources(struct dma_chan *chan) 1220static int sdma_alloc_chan_resources(struct dma_chan *chan)
1132{ 1221{
1133 struct sdma_channel *sdmac = to_sdma_chan(chan); 1222 struct sdma_channel *sdmac = to_sdma_chan(chan);
1134 struct imx_dma_data *data = chan->private; 1223 struct imx_dma_data *data = chan->private;
1224 struct imx_dma_data mem_data;
1135 int prio, ret; 1225 int prio, ret;
1136 1226
1137 if (!data) 1227 /*
1138 return -EINVAL; 1228 * MEMCPY may never setup chan->private by filter function such as
1229 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1230 * Please note in any other slave case, you have to setup chan->private
1231 * with 'struct imx_dma_data' in your own filter function if you want to
1232 * request dma channel by dma_request_channel() rather than
1233 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1234 * to warn you to correct your filter function.
1235 */
1236 if (!data) {
1237 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1238 mem_data.priority = 2;
1239 mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1240 mem_data.dma_request = 0;
1241 mem_data.dma_request2 = 0;
1242 data = &mem_data;
1243
1244 sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1245 }
1139 1246
1140 switch (data->priority) { 1247 switch (data->priority) {
1141 case DMA_PRIO_HIGH: 1248 case DMA_PRIO_HIGH:
@@ -1161,18 +1268,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
1161 if (ret) 1268 if (ret)
1162 goto disable_clk_ipg; 1269 goto disable_clk_ipg;
1163 1270
1164 ret = sdma_request_channel(sdmac);
1165 if (ret)
1166 goto disable_clk_ahb;
1167
1168 ret = sdma_set_channel_priority(sdmac, prio); 1271 ret = sdma_set_channel_priority(sdmac, prio);
1169 if (ret) 1272 if (ret)
1170 goto disable_clk_ahb; 1273 goto disable_clk_ahb;
1171 1274
1172 dma_async_tx_descriptor_init(&sdmac->desc, chan); 1275 sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
1173 sdmac->desc.tx_submit = sdma_tx_submit; 1276 sizeof(struct sdma_buffer_descriptor),
1174 /* txd.flags will be overwritten in prep funcs */ 1277 32, 0);
1175 sdmac->desc.flags = DMA_CTRL_ACK;
1176 1278
1177 return 0; 1279 return 0;
1178 1280
@@ -1188,7 +1290,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1188 struct sdma_channel *sdmac = to_sdma_chan(chan); 1290 struct sdma_channel *sdmac = to_sdma_chan(chan);
1189 struct sdma_engine *sdma = sdmac->sdma; 1291 struct sdma_engine *sdma = sdmac->sdma;
1190 1292
1191 sdma_disable_channel(chan); 1293 sdma_disable_channel_with_delay(chan);
1192 1294
1193 if (sdmac->event_id0) 1295 if (sdmac->event_id0)
1194 sdma_event_disable(sdmac, sdmac->event_id0); 1296 sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1200,10 +1302,105 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1200 1302
1201 sdma_set_channel_priority(sdmac, 0); 1303 sdma_set_channel_priority(sdmac, 0);
1202 1304
1203 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1204
1205 clk_disable(sdma->clk_ipg); 1305 clk_disable(sdma->clk_ipg);
1206 clk_disable(sdma->clk_ahb); 1306 clk_disable(sdma->clk_ahb);
1307
1308 dma_pool_destroy(sdmac->bd_pool);
1309 sdmac->bd_pool = NULL;
1310}
1311
1312static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1313 enum dma_transfer_direction direction, u32 bds)
1314{
1315 struct sdma_desc *desc;
1316
1317 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1318 if (!desc)
1319 goto err_out;
1320
1321 sdmac->status = DMA_IN_PROGRESS;
1322 sdmac->direction = direction;
1323 sdmac->flags = 0;
1324
1325 desc->chn_count = 0;
1326 desc->chn_real_count = 0;
1327 desc->buf_tail = 0;
1328 desc->buf_ptail = 0;
1329 desc->sdmac = sdmac;
1330 desc->num_bd = bds;
1331
1332 if (sdma_alloc_bd(desc))
1333 goto err_desc_out;
1334
1335 /* No slave_config called in MEMCPY case, so do here */
1336 if (direction == DMA_MEM_TO_MEM)
1337 sdma_config_ownership(sdmac, false, true, false);
1338
1339 if (sdma_load_context(sdmac))
1340 goto err_desc_out;
1341
1342 return desc;
1343
1344err_desc_out:
1345 kfree(desc);
1346err_out:
1347 return NULL;
1348}
1349
1350static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1351 struct dma_chan *chan, dma_addr_t dma_dst,
1352 dma_addr_t dma_src, size_t len, unsigned long flags)
1353{
1354 struct sdma_channel *sdmac = to_sdma_chan(chan);
1355 struct sdma_engine *sdma = sdmac->sdma;
1356 int channel = sdmac->channel;
1357 size_t count;
1358 int i = 0, param;
1359 struct sdma_buffer_descriptor *bd;
1360 struct sdma_desc *desc;
1361
1362 if (!chan || !len)
1363 return NULL;
1364
1365 dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1366 &dma_src, &dma_dst, len, channel);
1367
1368 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1369 len / SDMA_BD_MAX_CNT + 1);
1370 if (!desc)
1371 return NULL;
1372
1373 do {
1374 count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1375 bd = &desc->bd[i];
1376 bd->buffer_addr = dma_src;
1377 bd->ext_buffer_addr = dma_dst;
1378 bd->mode.count = count;
1379 desc->chn_count += count;
1380 bd->mode.command = 0;
1381
1382 dma_src += count;
1383 dma_dst += count;
1384 len -= count;
1385 i++;
1386
1387 param = BD_DONE | BD_EXTD | BD_CONT;
1388 /* last bd */
1389 if (!len) {
1390 param |= BD_INTR;
1391 param |= BD_LAST;
1392 param &= ~BD_CONT;
1393 }
1394
1395 dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1396 i, count, bd->buffer_addr,
1397 param & BD_WRAP ? "wrap" : "",
1398 param & BD_INTR ? " intr" : "");
1399
1400 bd->mode.status = param;
1401 } while (len);
1402
1403 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1207} 1404}
1208 1405
1209static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 1406static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -1213,75 +1410,54 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1213{ 1410{
1214 struct sdma_channel *sdmac = to_sdma_chan(chan); 1411 struct sdma_channel *sdmac = to_sdma_chan(chan);
1215 struct sdma_engine *sdma = sdmac->sdma; 1412 struct sdma_engine *sdma = sdmac->sdma;
1216 int ret, i, count; 1413 int i, count;
1217 int channel = sdmac->channel; 1414 int channel = sdmac->channel;
1218 struct scatterlist *sg; 1415 struct scatterlist *sg;
1416 struct sdma_desc *desc;
1219 1417
1220 if (sdmac->status == DMA_IN_PROGRESS) 1418 desc = sdma_transfer_init(sdmac, direction, sg_len);
1221 return NULL; 1419 if (!desc)
1222 sdmac->status = DMA_IN_PROGRESS; 1420 goto err_out;
1223
1224 sdmac->flags = 0;
1225
1226 sdmac->buf_tail = 0;
1227 sdmac->buf_ptail = 0;
1228 sdmac->chn_real_count = 0;
1229 1421
1230 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 1422 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1231 sg_len, channel); 1423 sg_len, channel);
1232 1424
1233 sdmac->direction = direction;
1234 ret = sdma_load_context(sdmac);
1235 if (ret)
1236 goto err_out;
1237
1238 if (sg_len > NUM_BD) {
1239 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1240 channel, sg_len, NUM_BD);
1241 ret = -EINVAL;
1242 goto err_out;
1243 }
1244
1245 sdmac->chn_count = 0;
1246 for_each_sg(sgl, sg, sg_len, i) { 1425 for_each_sg(sgl, sg, sg_len, i) {
1247 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1426 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1248 int param; 1427 int param;
1249 1428
1250 bd->buffer_addr = sg->dma_address; 1429 bd->buffer_addr = sg->dma_address;
1251 1430
1252 count = sg_dma_len(sg); 1431 count = sg_dma_len(sg);
1253 1432
1254 if (count > 0xffff) { 1433 if (count > SDMA_BD_MAX_CNT) {
1255 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 1434 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1256 channel, count, 0xffff); 1435 channel, count, SDMA_BD_MAX_CNT);
1257 ret = -EINVAL; 1436 goto err_bd_out;
1258 goto err_out;
1259 } 1437 }
1260 1438
1261 bd->mode.count = count; 1439 bd->mode.count = count;
1262 sdmac->chn_count += count; 1440 desc->chn_count += count;
1263 1441
1264 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 1442 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1265 ret = -EINVAL; 1443 goto err_bd_out;
1266 goto err_out;
1267 }
1268 1444
1269 switch (sdmac->word_size) { 1445 switch (sdmac->word_size) {
1270 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1446 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1271 bd->mode.command = 0; 1447 bd->mode.command = 0;
1272 if (count & 3 || sg->dma_address & 3) 1448 if (count & 3 || sg->dma_address & 3)
1273 return NULL; 1449 goto err_bd_out;
1274 break; 1450 break;
1275 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1451 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1276 bd->mode.command = 2; 1452 bd->mode.command = 2;
1277 if (count & 1 || sg->dma_address & 1) 1453 if (count & 1 || sg->dma_address & 1)
1278 return NULL; 1454 goto err_bd_out;
1279 break; 1455 break;
1280 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1456 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1281 bd->mode.command = 1; 1457 bd->mode.command = 1;
1282 break; 1458 break;
1283 default: 1459 default:
1284 return NULL; 1460 goto err_bd_out;
1285 } 1461 }
1286 1462
1287 param = BD_DONE | BD_EXTD | BD_CONT; 1463 param = BD_DONE | BD_EXTD | BD_CONT;
@@ -1300,10 +1476,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1300 bd->mode.status = param; 1476 bd->mode.status = param;
1301 } 1477 }
1302 1478
1303 sdmac->num_bd = sg_len; 1479 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1304 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1480err_bd_out:
1305 1481 sdma_free_bd(desc);
1306 return &sdmac->desc; 1482 kfree(desc);
1307err_out: 1483err_out:
1308 sdmac->status = DMA_ERROR; 1484 sdmac->status = DMA_ERROR;
1309 return NULL; 1485 return NULL;
@@ -1318,40 +1494,27 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1318 struct sdma_engine *sdma = sdmac->sdma; 1494 struct sdma_engine *sdma = sdmac->sdma;
1319 int num_periods = buf_len / period_len; 1495 int num_periods = buf_len / period_len;
1320 int channel = sdmac->channel; 1496 int channel = sdmac->channel;
1321 int ret, i = 0, buf = 0; 1497 int i = 0, buf = 0;
1498 struct sdma_desc *desc;
1322 1499
1323 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 1500 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1324 1501
1325 if (sdmac->status == DMA_IN_PROGRESS) 1502 desc = sdma_transfer_init(sdmac, direction, num_periods);
1326 return NULL; 1503 if (!desc)
1327 1504 goto err_out;
1328 sdmac->status = DMA_IN_PROGRESS;
1329 1505
1330 sdmac->buf_tail = 0; 1506 desc->period_len = period_len;
1331 sdmac->buf_ptail = 0;
1332 sdmac->chn_real_count = 0;
1333 sdmac->period_len = period_len;
1334 1507
1335 sdmac->flags |= IMX_DMA_SG_LOOP; 1508 sdmac->flags |= IMX_DMA_SG_LOOP;
1336 sdmac->direction = direction;
1337 ret = sdma_load_context(sdmac);
1338 if (ret)
1339 goto err_out;
1340
1341 if (num_periods > NUM_BD) {
1342 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1343 channel, num_periods, NUM_BD);
1344 goto err_out;
1345 }
1346 1509
1347 if (period_len > 0xffff) { 1510 if (period_len > SDMA_BD_MAX_CNT) {
1348 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", 1511 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1349 channel, period_len, 0xffff); 1512 channel, period_len, SDMA_BD_MAX_CNT);
1350 goto err_out; 1513 goto err_bd_out;
1351 } 1514 }
1352 1515
1353 while (buf < buf_len) { 1516 while (buf < buf_len) {
1354 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1517 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1355 int param; 1518 int param;
1356 1519
1357 bd->buffer_addr = dma_addr; 1520 bd->buffer_addr = dma_addr;
@@ -1359,7 +1522,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1359 bd->mode.count = period_len; 1522 bd->mode.count = period_len;
1360 1523
1361 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1524 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1362 goto err_out; 1525 goto err_bd_out;
1363 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 1526 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1364 bd->mode.command = 0; 1527 bd->mode.command = 0;
1365 else 1528 else
@@ -1382,10 +1545,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1382 i++; 1545 i++;
1383 } 1546 }
1384 1547
1385 sdmac->num_bd = num_periods; 1548 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1386 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1549err_bd_out:
1387 1550 sdma_free_bd(desc);
1388 return &sdmac->desc; 1551 kfree(desc);
1389err_out: 1552err_out:
1390 sdmac->status = DMA_ERROR; 1553 sdmac->status = DMA_ERROR;
1391 return NULL; 1554 return NULL;
@@ -1424,13 +1587,31 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1424 struct dma_tx_state *txstate) 1587 struct dma_tx_state *txstate)
1425{ 1588{
1426 struct sdma_channel *sdmac = to_sdma_chan(chan); 1589 struct sdma_channel *sdmac = to_sdma_chan(chan);
1590 struct sdma_desc *desc;
1427 u32 residue; 1591 u32 residue;
1592 struct virt_dma_desc *vd;
1593 enum dma_status ret;
1594 unsigned long flags;
1428 1595
1429 if (sdmac->flags & IMX_DMA_SG_LOOP) 1596 ret = dma_cookie_status(chan, cookie, txstate);
1430 residue = (sdmac->num_bd - sdmac->buf_ptail) * 1597 if (ret == DMA_COMPLETE || !txstate)
1431 sdmac->period_len - sdmac->chn_real_count; 1598 return ret;
1432 else 1599
1433 residue = sdmac->chn_count - sdmac->chn_real_count; 1600 spin_lock_irqsave(&sdmac->vc.lock, flags);
1601 vd = vchan_find_desc(&sdmac->vc, cookie);
1602 if (vd) {
1603 desc = to_sdma_desc(&vd->tx);
1604 if (sdmac->flags & IMX_DMA_SG_LOOP)
1605 residue = (desc->num_bd - desc->buf_ptail) *
1606 desc->period_len - desc->chn_real_count;
1607 else
1608 residue = desc->chn_count - desc->chn_real_count;
1609 } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1610 residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
1611 } else {
1612 residue = 0;
1613 }
1614 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1434 1615
1435 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1616 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1436 residue); 1617 residue);
@@ -1441,10 +1622,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1441static void sdma_issue_pending(struct dma_chan *chan) 1622static void sdma_issue_pending(struct dma_chan *chan)
1442{ 1623{
1443 struct sdma_channel *sdmac = to_sdma_chan(chan); 1624 struct sdma_channel *sdmac = to_sdma_chan(chan);
1444 struct sdma_engine *sdma = sdmac->sdma; 1625 unsigned long flags;
1445 1626
1446 if (sdmac->status == DMA_IN_PROGRESS) 1627 spin_lock_irqsave(&sdmac->vc.lock, flags);
1447 sdma_enable_channel(sdma, sdmac->channel); 1628 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1629 sdma_start_desc(sdmac);
1630 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1448} 1631}
1449 1632
1450#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1633#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
@@ -1650,7 +1833,7 @@ static int sdma_init(struct sdma_engine *sdma)
1650 for (i = 0; i < MAX_DMA_CHANNELS; i++) 1833 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1651 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 1834 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1652 1835
1653 ret = sdma_request_channel(&sdma->channel[0]); 1836 ret = sdma_request_channel0(sdma);
1654 if (ret) 1837 if (ret)
1655 goto err_dma_alloc; 1838 goto err_dma_alloc;
1656 1839
@@ -1805,6 +1988,7 @@ static int sdma_probe(struct platform_device *pdev)
1805 1988
1806 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1989 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1807 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1990 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1991 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
1808 1992
1809 INIT_LIST_HEAD(&sdma->dma_device.channels); 1993 INIT_LIST_HEAD(&sdma->dma_device.channels);
1810 /* Initialize channel parameters */ 1994 /* Initialize channel parameters */
@@ -1812,22 +1996,16 @@ static int sdma_probe(struct platform_device *pdev)
1812 struct sdma_channel *sdmac = &sdma->channel[i]; 1996 struct sdma_channel *sdmac = &sdma->channel[i];
1813 1997
1814 sdmac->sdma = sdma; 1998 sdmac->sdma = sdma;
1815 spin_lock_init(&sdmac->lock);
1816 1999
1817 sdmac->chan.device = &sdma->dma_device;
1818 dma_cookie_init(&sdmac->chan);
1819 sdmac->channel = i; 2000 sdmac->channel = i;
1820 2001 sdmac->vc.desc_free = sdma_desc_free;
1821 tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
1822 (unsigned long) sdmac);
1823 /* 2002 /*
1824 * Add the channel to the DMAC list. Do not add channel 0 though 2003 * Add the channel to the DMAC list. Do not add channel 0 though
1825 * because we need it internally in the SDMA driver. This also means 2004 * because we need it internally in the SDMA driver. This also means
1826 * that channel 0 in dmaengine counting matches sdma channel 1. 2005 * that channel 0 in dmaengine counting matches sdma channel 1.
1827 */ 2006 */
1828 if (i) 2007 if (i)
1829 list_add_tail(&sdmac->chan.device_node, 2008 vchan_init(&sdmac->vc, &sdma->dma_device);
1830 &sdma->dma_device.channels);
1831 } 2009 }
1832 2010
1833 ret = sdma_init(sdma); 2011 ret = sdma_init(sdma);
@@ -1877,9 +2055,10 @@ static int sdma_probe(struct platform_device *pdev)
1877 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; 2055 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
1878 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; 2056 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
1879 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2057 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2058 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
1880 sdma->dma_device.device_issue_pending = sdma_issue_pending; 2059 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1881 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 2060 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1882 dma_set_max_seg_size(sdma->dma_device.dev, 65535); 2061 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
1883 2062
1884 platform_set_drvdata(pdev, sdma); 2063 platform_set_drvdata(pdev, sdma);
1885 2064
@@ -1932,7 +2111,8 @@ static int sdma_remove(struct platform_device *pdev)
1932 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 2111 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1933 struct sdma_channel *sdmac = &sdma->channel[i]; 2112 struct sdma_channel *sdmac = &sdma->channel[i];
1934 2113
1935 tasklet_kill(&sdmac->tasklet); 2114 tasklet_kill(&sdmac->vc.task);
2115 sdma_free_chan_resources(&sdmac->vc.chan);
1936 } 2116 }
1937 2117
1938 platform_set_drvdata(pdev, NULL); 2118 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 8b5b23a8ace9..23fb2fa04000 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -688,6 +688,12 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
688{ 688{
689 u64 phys_complete; 689 u64 phys_complete;
690 690
691 /* set the completion address register again */
692 writel(lower_32_bits(ioat_chan->completion_dma),
693 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
694 writel(upper_32_bits(ioat_chan->completion_dma),
695 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
696
691 ioat_quiesce(ioat_chan, 0); 697 ioat_quiesce(ioat_chan, 0);
692 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 698 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
693 __cleanup(ioat_chan, phys_complete); 699 __cleanup(ioat_chan, phys_complete);
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 68dd79783b54..b76cb17d879c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -470,11 +470,6 @@ static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
470 mic_dma_chan_mask_intr(ch); 470 mic_dma_chan_mask_intr(ch);
471} 471}
472 472
473static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
474{
475 dma_async_device_unregister(&mic_dma_dev->dma_dev);
476}
477
478static int mic_dma_setup_irq(struct mic_dma_chan *ch) 473static int mic_dma_setup_irq(struct mic_dma_chan *ch)
479{ 474{
480 ch->cookie = 475 ch->cookie =
@@ -630,7 +625,7 @@ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
630 list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, 625 list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
631 &mic_dma_dev->dma_dev.channels); 626 &mic_dma_dev->dma_dev.channels);
632 } 627 }
633 return dma_async_device_register(&mic_dma_dev->dma_dev); 628 return dmaenginem_async_device_register(&mic_dma_dev->dma_dev);
634} 629}
635 630
636/* 631/*
@@ -678,7 +673,6 @@ alloc_error:
678 673
679static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) 674static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
680{ 675{
681 mic_dma_unregister_dma_device(mic_dma_dev);
682 mic_dma_uninit(mic_dma_dev); 676 mic_dma_uninit(mic_dma_dev);
683 kfree(mic_dma_dev); 677 kfree(mic_dma_dev);
684} 678}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index c6589ccf1b9a..8dc0aa4d73ab 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -174,6 +174,7 @@ struct mv_xor_v2_device {
174 int desc_size; 174 int desc_size;
175 unsigned int npendings; 175 unsigned int npendings;
176 unsigned int hw_queue_idx; 176 unsigned int hw_queue_idx;
177 struct msi_desc *msi_desc;
177}; 178};
178 179
179/** 180/**
@@ -588,11 +589,9 @@ static void mv_xor_v2_tasklet(unsigned long data)
588 */ 589 */
589 dma_cookie_complete(&next_pending_sw_desc->async_tx); 590 dma_cookie_complete(&next_pending_sw_desc->async_tx);
590 591
591 if (next_pending_sw_desc->async_tx.callback)
592 next_pending_sw_desc->async_tx.callback(
593 next_pending_sw_desc->async_tx.callback_param);
594
595 dma_descriptor_unmap(&next_pending_sw_desc->async_tx); 592 dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
593 dmaengine_desc_get_callback_invoke(
594 &next_pending_sw_desc->async_tx, NULL);
596 } 595 }
597 596
598 dma_run_dependencies(&next_pending_sw_desc->async_tx); 597 dma_run_dependencies(&next_pending_sw_desc->async_tx);
@@ -643,9 +642,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
643 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); 642 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
644 643
645 /* write the DESQ address to the DMA enngine*/ 644 /* write the DESQ address to the DMA enngine*/
646 writel(xor_dev->hw_desq & 0xFFFFFFFF, 645 writel(lower_32_bits(xor_dev->hw_desq),
647 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); 646 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
648 writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, 647 writel(upper_32_bits(xor_dev->hw_desq),
649 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); 648 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
650 649
651 /* 650 /*
@@ -780,6 +779,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
780 msi_desc = first_msi_entry(&pdev->dev); 779 msi_desc = first_msi_entry(&pdev->dev);
781 if (!msi_desc) 780 if (!msi_desc)
782 goto free_msi_irqs; 781 goto free_msi_irqs;
782 xor_dev->msi_desc = msi_desc;
783 783
784 ret = devm_request_irq(&pdev->dev, msi_desc->irq, 784 ret = devm_request_irq(&pdev->dev, msi_desc->irq,
785 mv_xor_v2_interrupt_handler, 0, 785 mv_xor_v2_interrupt_handler, 0,
@@ -897,8 +897,12 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
897 xor_dev->desc_size * MV_XOR_V2_DESC_NUM, 897 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
898 xor_dev->hw_desq_virt, xor_dev->hw_desq); 898 xor_dev->hw_desq_virt, xor_dev->hw_desq);
899 899
900 devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
901
900 platform_msi_domain_free_irqs(&pdev->dev); 902 platform_msi_domain_free_irqs(&pdev->dev);
901 903
904 tasklet_kill(&xor_dev->irq_tasklet);
905
902 clk_disable_unprepare(xor_dev->clk); 906 clk_disable_unprepare(xor_dev->clk);
903 907
904 return 0; 908 return 0;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 2f9974ddfbb2..8c7b2e8703da 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -479,6 +479,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
479 479
480 default: 480 default:
481 pr_warn("%s(): invalid bus width %u\n", __func__, width); 481 pr_warn("%s(): invalid bus width %u\n", __func__, width);
482 /* fall through */
482 case DMA_SLAVE_BUSWIDTH_1_BYTE: 483 case DMA_SLAVE_BUSWIDTH_1_BYTE:
483 size = burst; 484 size = burst;
484 } 485 }
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
new file mode 100644
index 000000000000..7812a6338acd
--- /dev/null
+++ b/drivers/dma/owl-dma.c
@@ -0,0 +1,971 @@
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Actions Semi Owl SoCs DMA driver
4//
5// Copyright (c) 2014 Actions Semi Inc.
6// Author: David Liu <liuwei@actions-semi.com>
7//
8// Copyright (c) 2018 Linaro Ltd.
9// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
10
11#include <linux/bitops.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of_device.h>
24#include <linux/slab.h>
25#include "virt-dma.h"
26
27#define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
28
29/* Global DMA Controller Registers */
30#define OWL_DMA_IRQ_PD0 0x00
31#define OWL_DMA_IRQ_PD1 0x04
32#define OWL_DMA_IRQ_PD2 0x08
33#define OWL_DMA_IRQ_PD3 0x0C
34#define OWL_DMA_IRQ_EN0 0x10
35#define OWL_DMA_IRQ_EN1 0x14
36#define OWL_DMA_IRQ_EN2 0x18
37#define OWL_DMA_IRQ_EN3 0x1C
38#define OWL_DMA_SECURE_ACCESS_CTL 0x20
39#define OWL_DMA_NIC_QOS 0x24
40#define OWL_DMA_DBGSEL 0x28
41#define OWL_DMA_IDLE_STAT 0x2C
42
43/* Channel Registers */
44#define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
45#define OWL_DMAX_MODE 0x00
46#define OWL_DMAX_SOURCE 0x04
47#define OWL_DMAX_DESTINATION 0x08
48#define OWL_DMAX_FRAME_LEN 0x0C
49#define OWL_DMAX_FRAME_CNT 0x10
50#define OWL_DMAX_REMAIN_FRAME_CNT 0x14
51#define OWL_DMAX_REMAIN_CNT 0x18
52#define OWL_DMAX_SOURCE_STRIDE 0x1C
53#define OWL_DMAX_DESTINATION_STRIDE 0x20
54#define OWL_DMAX_START 0x24
55#define OWL_DMAX_PAUSE 0x28
56#define OWL_DMAX_CHAINED_CTL 0x2C
57#define OWL_DMAX_CONSTANT 0x30
58#define OWL_DMAX_LINKLIST_CTL 0x34
59#define OWL_DMAX_NEXT_DESCRIPTOR 0x38
60#define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
61#define OWL_DMAX_INT_CTL 0x40
62#define OWL_DMAX_INT_STATUS 0x44
63#define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
64#define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
65
66/* OWL_DMAX_MODE Bits */
67#define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
68#define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
69#define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
70#define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
71#define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
72#define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
73#define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
74#define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
75#define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
76#define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
77#define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
78#define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
79#define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
80#define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
81#define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
82#define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
83#define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
84#define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
85#define OWL_DMA_MODE_CB BIT(23)
86#define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
87#define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
88#define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
89#define OWL_DMA_MODE_CFE BIT(29)
90#define OWL_DMA_MODE_LME BIT(30)
91#define OWL_DMA_MODE_CME BIT(31)
92
93/* OWL_DMAX_LINKLIST_CTL Bits */
94#define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
95#define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
96#define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
97#define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
98#define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
99#define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
100#define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
101#define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
102#define OWL_DMA_LLC_SUSPEND BIT(16)
103
104/* OWL_DMAX_INT_CTL Bits */
105#define OWL_DMA_INTCTL_BLOCK BIT(0)
106#define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
107#define OWL_DMA_INTCTL_FRAME BIT(2)
108#define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
109#define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
110
111/* OWL_DMAX_INT_STATUS Bits */
112#define OWL_DMA_INTSTAT_BLOCK BIT(0)
113#define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
114#define OWL_DMA_INTSTAT_FRAME BIT(2)
115#define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
116#define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
117
118/* Pack shift and newshift in a single word */
119#define BIT_FIELD(val, width, shift, newshift) \
120 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
121
122/**
123 * struct owl_dma_lli_hw - Hardware link list for dma transfer
124 * @next_lli: physical address of the next link list
125 * @saddr: source physical address
126 * @daddr: destination physical address
127 * @flen: frame length
128 * @fcnt: frame count
129 * @src_stride: source stride
130 * @dst_stride: destination stride
131 * @ctrla: dma_mode and linklist ctrl config
132 * @ctrlb: interrupt config
133 * @const_num: data for constant fill
134 */
135struct owl_dma_lli_hw {
136 u32 next_lli;
137 u32 saddr;
138 u32 daddr;
139 u32 flen:20;
140 u32 fcnt:12;
141 u32 src_stride;
142 u32 dst_stride;
143 u32 ctrla;
144 u32 ctrlb;
145 u32 const_num;
146};
147
148/**
149 * struct owl_dma_lli - Link list for dma transfer
150 * @hw: hardware link list
151 * @phys: physical address of hardware link list
152 * @node: node for txd's lli_list
153 */
154struct owl_dma_lli {
155 struct owl_dma_lli_hw hw;
156 dma_addr_t phys;
157 struct list_head node;
158};
159
160/**
161 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
162 * @vd: virtual DMA descriptor
163 * @lli_list: link list of lli nodes
164 */
165struct owl_dma_txd {
166 struct virt_dma_desc vd;
167 struct list_head lli_list;
168};
169
170/**
171 * struct owl_dma_pchan - Holder for the physical channels
172 * @id: physical index to this channel
173 * @base: virtual memory base for the dma channel
174 * @vchan: the virtual channel currently being served by this physical channel
175 * @lock: a lock to use when altering an instance of this struct
176 */
177struct owl_dma_pchan {
178 u32 id;
179 void __iomem *base;
180 struct owl_dma_vchan *vchan;
181 spinlock_t lock;
182};
183
184/**
185 * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
186 * @vc: wrappped virtual channel
187 * @pchan: the physical channel utilized by this channel
188 * @txd: active transaction on this channel
189 */
190struct owl_dma_vchan {
191 struct virt_dma_chan vc;
192 struct owl_dma_pchan *pchan;
193 struct owl_dma_txd *txd;
194};
195
196/**
197 * struct owl_dma - Holder for the Owl DMA controller
198 * @dma: dma engine for this instance
199 * @base: virtual memory base for the DMA controller
200 * @clk: clock for the DMA controller
201 * @lock: a lock to use when change DMA controller global register
202 * @lli_pool: a pool for the LLI descriptors
203 * @nr_pchans: the number of physical channels
204 * @pchans: array of data for the physical channels
205 * @nr_vchans: the number of physical channels
206 * @vchans: array of data for the physical channels
207 */
208struct owl_dma {
209 struct dma_device dma;
210 void __iomem *base;
211 struct clk *clk;
212 spinlock_t lock;
213 struct dma_pool *lli_pool;
214 int irq;
215
216 unsigned int nr_pchans;
217 struct owl_dma_pchan *pchans;
218
219 unsigned int nr_vchans;
220 struct owl_dma_vchan *vchans;
221};
222
223static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
224 u32 val, bool state)
225{
226 u32 regval;
227
228 regval = readl(pchan->base + reg);
229
230 if (state)
231 regval |= val;
232 else
233 regval &= ~val;
234
235 writel(val, pchan->base + reg);
236}
237
238static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
239{
240 writel(data, pchan->base + reg);
241}
242
243static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
244{
245 return readl(pchan->base + reg);
246}
247
248static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
249{
250 u32 regval;
251
252 regval = readl(od->base + reg);
253
254 if (state)
255 regval |= val;
256 else
257 regval &= ~val;
258
259 writel(val, od->base + reg);
260}
261
262static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
263{
264 writel(data, od->base + reg);
265}
266
267static u32 dma_readl(struct owl_dma *od, u32 reg)
268{
269 return readl(od->base + reg);
270}
271
272static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
273{
274 return container_of(dd, struct owl_dma, dma);
275}
276
277static struct device *chan2dev(struct dma_chan *chan)
278{
279 return &chan->dev->device;
280}
281
282static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
283{
284 return container_of(chan, struct owl_dma_vchan, vc.chan);
285}
286
287static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
288{
289 return container_of(tx, struct owl_dma_txd, vd.tx);
290}
291
292static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
293{
294 u32 ctl;
295
296 ctl = BIT_FIELD(mode, 4, 28, 28) |
297 BIT_FIELD(mode, 8, 16, 20) |
298 BIT_FIELD(mode, 4, 8, 16) |
299 BIT_FIELD(mode, 6, 0, 10) |
300 BIT_FIELD(llc_ctl, 2, 10, 8) |
301 BIT_FIELD(llc_ctl, 2, 8, 6);
302
303 return ctl;
304}
305
306static inline u32 llc_hw_ctrlb(u32 int_ctl)
307{
308 u32 ctl;
309
310 ctl = BIT_FIELD(int_ctl, 7, 0, 18);
311
312 return ctl;
313}
314
315static void owl_dma_free_lli(struct owl_dma *od,
316 struct owl_dma_lli *lli)
317{
318 list_del(&lli->node);
319 dma_pool_free(od->lli_pool, lli, lli->phys);
320}
321
322static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
323{
324 struct owl_dma_lli *lli;
325 dma_addr_t phys;
326
327 lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
328 if (!lli)
329 return NULL;
330
331 INIT_LIST_HEAD(&lli->node);
332 lli->phys = phys;
333
334 return lli;
335}
336
337static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
338 struct owl_dma_lli *prev,
339 struct owl_dma_lli *next)
340{
341 list_add_tail(&next->node, &txd->lli_list);
342
343 if (prev) {
344 prev->hw.next_lli = next->phys;
345 prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
346 }
347
348 return next;
349}
350
351static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
352 struct owl_dma_lli *lli,
353 dma_addr_t src, dma_addr_t dst,
354 u32 len, enum dma_transfer_direction dir)
355{
356 struct owl_dma_lli_hw *hw = &lli->hw;
357 u32 mode;
358
359 mode = OWL_DMA_MODE_PW(0);
360
361 switch (dir) {
362 case DMA_MEM_TO_MEM:
363 mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
364 OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
365 OWL_DMA_MODE_DAM_INC;
366
367 break;
368 default:
369 return -EINVAL;
370 }
371
372 hw->next_lli = 0; /* One link list by default */
373 hw->saddr = src;
374 hw->daddr = dst;
375
376 hw->fcnt = 1; /* Frame count fixed as 1 */
377 hw->flen = len; /* Max frame length is 1MB */
378 hw->src_stride = 0;
379 hw->dst_stride = 0;
380 hw->ctrla = llc_hw_ctrla(mode,
381 OWL_DMA_LLC_SAV_LOAD_NEXT |
382 OWL_DMA_LLC_DAV_LOAD_NEXT);
383
384 hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
385
386 return 0;
387}
388
389static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
390 struct owl_dma_vchan *vchan)
391{
392 struct owl_dma_pchan *pchan = NULL;
393 unsigned long flags;
394 int i;
395
396 for (i = 0; i < od->nr_pchans; i++) {
397 pchan = &od->pchans[i];
398
399 spin_lock_irqsave(&pchan->lock, flags);
400 if (!pchan->vchan) {
401 pchan->vchan = vchan;
402 spin_unlock_irqrestore(&pchan->lock, flags);
403 break;
404 }
405
406 spin_unlock_irqrestore(&pchan->lock, flags);
407 }
408
409 return pchan;
410}
411
412static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
413{
414 unsigned int val;
415
416 val = dma_readl(od, OWL_DMA_IDLE_STAT);
417
418 return !(val & (1 << pchan->id));
419}
420
421static void owl_dma_terminate_pchan(struct owl_dma *od,
422 struct owl_dma_pchan *pchan)
423{
424 unsigned long flags;
425 u32 irq_pd;
426
427 pchan_writel(pchan, OWL_DMAX_START, 0);
428 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
429
430 spin_lock_irqsave(&od->lock, flags);
431 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
432
433 irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
434 if (irq_pd & (1 << pchan->id)) {
435 dev_warn(od->dma.dev,
436 "terminating pchan %d that still has pending irq\n",
437 pchan->id);
438 dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
439 }
440
441 pchan->vchan = NULL;
442
443 spin_unlock_irqrestore(&od->lock, flags);
444}
445
446static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
447{
448 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
449 struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
450 struct owl_dma_pchan *pchan = vchan->pchan;
451 struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
452 struct owl_dma_lli *lli;
453 unsigned long flags;
454 u32 int_ctl;
455
456 list_del(&vd->node);
457
458 vchan->txd = txd;
459
460 /* Wait for channel inactive */
461 while (owl_dma_pchan_busy(od, pchan))
462 cpu_relax();
463
464 lli = list_first_entry(&txd->lli_list,
465 struct owl_dma_lli, node);
466
467 int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
468
469 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
470 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
471 OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
472 pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
473 pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
474
475 /* Clear IRQ status for this pchan */
476 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
477
478 spin_lock_irqsave(&od->lock, flags);
479
480 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
481
482 spin_unlock_irqrestore(&od->lock, flags);
483
484 dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
485
486 /* Start DMA transfer for this pchan */
487 pchan_writel(pchan, OWL_DMAX_START, 0x1);
488
489 return 0;
490}
491
492static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
493{
494 /* Ensure that the physical channel is stopped */
495 owl_dma_terminate_pchan(od, vchan->pchan);
496
497 vchan->pchan = NULL;
498}
499
500static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
501{
502 struct owl_dma *od = dev_id;
503 struct owl_dma_vchan *vchan;
504 struct owl_dma_pchan *pchan;
505 unsigned long pending;
506 int i;
507 unsigned int global_irq_pending, chan_irq_pending;
508
509 spin_lock(&od->lock);
510
511 pending = dma_readl(od, OWL_DMA_IRQ_PD0);
512
513 /* Clear IRQ status for each pchan */
514 for_each_set_bit(i, &pending, od->nr_pchans) {
515 pchan = &od->pchans[i];
516 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
517 }
518
519 /* Clear pending IRQ */
520 dma_writel(od, OWL_DMA_IRQ_PD0, pending);
521
522 /* Check missed pending IRQ */
523 for (i = 0; i < od->nr_pchans; i++) {
524 pchan = &od->pchans[i];
525 chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
526 pchan_readl(pchan, OWL_DMAX_INT_STATUS);
527
528 /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
529 dma_readl(od, OWL_DMA_IRQ_PD0);
530
531 global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
532
533 if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
534 dev_dbg(od->dma.dev,
535 "global and channel IRQ pending match err\n");
536
537 /* Clear IRQ status for this pchan */
538 pchan_update(pchan, OWL_DMAX_INT_STATUS,
539 0xff, false);
540
541 /* Update global IRQ pending */
542 pending |= BIT(i);
543 }
544 }
545
546 spin_unlock(&od->lock);
547
548 for_each_set_bit(i, &pending, od->nr_pchans) {
549 struct owl_dma_txd *txd;
550
551 pchan = &od->pchans[i];
552
553 vchan = pchan->vchan;
554 if (!vchan) {
555 dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
556 pchan->id);
557 continue;
558 }
559
560 spin_lock(&vchan->vc.lock);
561
562 txd = vchan->txd;
563 if (txd) {
564 vchan->txd = NULL;
565
566 vchan_cookie_complete(&txd->vd);
567
568 /*
569 * Start the next descriptor (if any),
570 * otherwise free this channel.
571 */
572 if (vchan_next_desc(&vchan->vc))
573 owl_dma_start_next_txd(vchan);
574 else
575 owl_dma_phy_free(od, vchan);
576 }
577
578 spin_unlock(&vchan->vc.lock);
579 }
580
581 return IRQ_HANDLED;
582}
583
584static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
585{
586 struct owl_dma_lli *lli, *_lli;
587
588 if (unlikely(!txd))
589 return;
590
591 list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
592 owl_dma_free_lli(od, lli);
593
594 kfree(txd);
595}
596
597static void owl_dma_desc_free(struct virt_dma_desc *vd)
598{
599 struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
600 struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
601
602 owl_dma_free_txd(od, txd);
603}
604
605static int owl_dma_terminate_all(struct dma_chan *chan)
606{
607 struct owl_dma *od = to_owl_dma(chan->device);
608 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
609 unsigned long flags;
610 LIST_HEAD(head);
611
612 spin_lock_irqsave(&vchan->vc.lock, flags);
613
614 if (vchan->pchan)
615 owl_dma_phy_free(od, vchan);
616
617 if (vchan->txd) {
618 owl_dma_desc_free(&vchan->txd->vd);
619 vchan->txd = NULL;
620 }
621
622 vchan_get_all_descriptors(&vchan->vc, &head);
623 vchan_dma_desc_free_list(&vchan->vc, &head);
624
625 spin_unlock_irqrestore(&vchan->vc.lock, flags);
626
627 return 0;
628}
629
630static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
631{
632 struct owl_dma_pchan *pchan;
633 struct owl_dma_txd *txd;
634 struct owl_dma_lli *lli;
635 unsigned int next_lli_phy;
636 size_t bytes;
637
638 pchan = vchan->pchan;
639 txd = vchan->txd;
640
641 if (!pchan || !txd)
642 return 0;
643
644 /* Get remain count of current node in link list */
645 bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
646
647 /* Loop through the preceding nodes to get total remaining bytes */
648 if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
649 next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
650 list_for_each_entry(lli, &txd->lli_list, node) {
651 /* Start from the next active node */
652 if (lli->phys == next_lli_phy) {
653 list_for_each_entry(lli, &txd->lli_list, node)
654 bytes += lli->hw.flen;
655 break;
656 }
657 }
658 }
659
660 return bytes;
661}
662
663static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
664 dma_cookie_t cookie,
665 struct dma_tx_state *state)
666{
667 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
668 struct owl_dma_lli *lli;
669 struct virt_dma_desc *vd;
670 struct owl_dma_txd *txd;
671 enum dma_status ret;
672 unsigned long flags;
673 size_t bytes = 0;
674
675 ret = dma_cookie_status(chan, cookie, state);
676 if (ret == DMA_COMPLETE || !state)
677 return ret;
678
679 spin_lock_irqsave(&vchan->vc.lock, flags);
680
681 vd = vchan_find_desc(&vchan->vc, cookie);
682 if (vd) {
683 txd = to_owl_txd(&vd->tx);
684 list_for_each_entry(lli, &txd->lli_list, node)
685 bytes += lli->hw.flen;
686 } else {
687 bytes = owl_dma_getbytes_chan(vchan);
688 }
689
690 spin_unlock_irqrestore(&vchan->vc.lock, flags);
691
692 dma_set_residue(state, bytes);
693
694 return ret;
695}
696
697static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
698{
699 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
700 struct owl_dma_pchan *pchan;
701
702 pchan = owl_dma_get_pchan(od, vchan);
703 if (!pchan)
704 return;
705
706 dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
707
708 vchan->pchan = pchan;
709 owl_dma_start_next_txd(vchan);
710}
711
712static void owl_dma_issue_pending(struct dma_chan *chan)
713{
714 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
715 unsigned long flags;
716
717 spin_lock_irqsave(&vchan->vc.lock, flags);
718 if (vchan_issue_pending(&vchan->vc)) {
719 if (!vchan->pchan)
720 owl_dma_phy_alloc_and_start(vchan);
721 }
722 spin_unlock_irqrestore(&vchan->vc.lock, flags);
723}
724
725static struct dma_async_tx_descriptor
726 *owl_dma_prep_memcpy(struct dma_chan *chan,
727 dma_addr_t dst, dma_addr_t src,
728 size_t len, unsigned long flags)
729{
730 struct owl_dma *od = to_owl_dma(chan->device);
731 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
732 struct owl_dma_txd *txd;
733 struct owl_dma_lli *lli, *prev = NULL;
734 size_t offset, bytes;
735 int ret;
736
737 if (!len)
738 return NULL;
739
740 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
741 if (!txd)
742 return NULL;
743
744 INIT_LIST_HEAD(&txd->lli_list);
745
746 /* Process the transfer as frame by frame */
747 for (offset = 0; offset < len; offset += bytes) {
748 lli = owl_dma_alloc_lli(od);
749 if (!lli) {
750 dev_warn(chan2dev(chan), "failed to allocate lli\n");
751 goto err_txd_free;
752 }
753
754 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
755
756 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
757 bytes, DMA_MEM_TO_MEM);
758 if (ret) {
759 dev_warn(chan2dev(chan), "failed to config lli\n");
760 goto err_txd_free;
761 }
762
763 prev = owl_dma_add_lli(txd, prev, lli);
764 }
765
766 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
767
768err_txd_free:
769 owl_dma_free_txd(od, txd);
770 return NULL;
771}
772
773static void owl_dma_free_chan_resources(struct dma_chan *chan)
774{
775 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
776
777 /* Ensure all queued descriptors are freed */
778 vchan_free_chan_resources(&vchan->vc);
779}
780
781static inline void owl_dma_free(struct owl_dma *od)
782{
783 struct owl_dma_vchan *vchan = NULL;
784 struct owl_dma_vchan *next;
785
786 list_for_each_entry_safe(vchan,
787 next, &od->dma.channels, vc.chan.device_node) {
788 list_del(&vchan->vc.chan.device_node);
789 tasklet_kill(&vchan->vc.task);
790 }
791}
792
793static int owl_dma_probe(struct platform_device *pdev)
794{
795 struct device_node *np = pdev->dev.of_node;
796 struct owl_dma *od;
797 struct resource *res;
798 int ret, i, nr_channels, nr_requests;
799
800 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
801 if (!od)
802 return -ENOMEM;
803
804 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
805 if (!res)
806 return -EINVAL;
807
808 od->base = devm_ioremap_resource(&pdev->dev, res);
809 if (IS_ERR(od->base))
810 return PTR_ERR(od->base);
811
812 ret = of_property_read_u32(np, "dma-channels", &nr_channels);
813 if (ret) {
814 dev_err(&pdev->dev, "can't get dma-channels\n");
815 return ret;
816 }
817
818 ret = of_property_read_u32(np, "dma-requests", &nr_requests);
819 if (ret) {
820 dev_err(&pdev->dev, "can't get dma-requests\n");
821 return ret;
822 }
823
824 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
825 nr_channels, nr_requests);
826
827 od->nr_pchans = nr_channels;
828 od->nr_vchans = nr_requests;
829
830 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
831
832 platform_set_drvdata(pdev, od);
833 spin_lock_init(&od->lock);
834
835 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
836
837 od->dma.dev = &pdev->dev;
838 od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
839 od->dma.device_tx_status = owl_dma_tx_status;
840 od->dma.device_issue_pending = owl_dma_issue_pending;
841 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
842 od->dma.device_terminate_all = owl_dma_terminate_all;
843 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
844 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
845 od->dma.directions = BIT(DMA_MEM_TO_MEM);
846 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
847
848 INIT_LIST_HEAD(&od->dma.channels);
849
850 od->clk = devm_clk_get(&pdev->dev, NULL);
851 if (IS_ERR(od->clk)) {
852 dev_err(&pdev->dev, "unable to get clock\n");
853 return PTR_ERR(od->clk);
854 }
855
856 /*
857 * Eventhough the DMA controller is capable of generating 4
858 * IRQ's for DMA priority feature, we only use 1 IRQ for
859 * simplification.
860 */
861 od->irq = platform_get_irq(pdev, 0);
862 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
863 dev_name(&pdev->dev), od);
864 if (ret) {
865 dev_err(&pdev->dev, "unable to request IRQ\n");
866 return ret;
867 }
868
869 /* Init physical channel */
870 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
871 sizeof(struct owl_dma_pchan), GFP_KERNEL);
872 if (!od->pchans)
873 return -ENOMEM;
874
875 for (i = 0; i < od->nr_pchans; i++) {
876 struct owl_dma_pchan *pchan = &od->pchans[i];
877
878 pchan->id = i;
879 pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
880 }
881
882 /* Init virtual channel */
883 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
884 sizeof(struct owl_dma_vchan), GFP_KERNEL);
885 if (!od->vchans)
886 return -ENOMEM;
887
888 for (i = 0; i < od->nr_vchans; i++) {
889 struct owl_dma_vchan *vchan = &od->vchans[i];
890
891 vchan->vc.desc_free = owl_dma_desc_free;
892 vchan_init(&vchan->vc, &od->dma);
893 }
894
895 /* Create a pool of consistent memory blocks for hardware descriptors */
896 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
897 sizeof(struct owl_dma_lli),
898 __alignof__(struct owl_dma_lli),
899 0);
900 if (!od->lli_pool) {
901 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
902 return -ENOMEM;
903 }
904
905 clk_prepare_enable(od->clk);
906
907 ret = dma_async_device_register(&od->dma);
908 if (ret) {
909 dev_err(&pdev->dev, "failed to register DMA engine device\n");
910 goto err_pool_free;
911 }
912
913 return 0;
914
915err_pool_free:
916 clk_disable_unprepare(od->clk);
917 dma_pool_destroy(od->lli_pool);
918
919 return ret;
920}
921
922static int owl_dma_remove(struct platform_device *pdev)
923{
924 struct owl_dma *od = platform_get_drvdata(pdev);
925
926 dma_async_device_unregister(&od->dma);
927
928 /* Mask all interrupts for this execution environment */
929 dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
930
931 /* Make sure we won't have any further interrupts */
932 devm_free_irq(od->dma.dev, od->irq, od);
933
934 owl_dma_free(od);
935
936 clk_disable_unprepare(od->clk);
937
938 return 0;
939}
940
941static const struct of_device_id owl_dma_match[] = {
942 { .compatible = "actions,s900-dma", },
943 { /* sentinel */ }
944};
945MODULE_DEVICE_TABLE(of, owl_dma_match);
946
947static struct platform_driver owl_dma_driver = {
948 .probe = owl_dma_probe,
949 .remove = owl_dma_remove,
950 .driver = {
951 .name = "dma-owl",
952 .of_match_table = of_match_ptr(owl_dma_match),
953 },
954};
955
956static int owl_dma_init(void)
957{
958 return platform_driver_register(&owl_dma_driver);
959}
960subsys_initcall(owl_dma_init);
961
962static void __exit owl_dma_exit(void)
963{
964 platform_driver_unregister(&owl_dma_driver);
965}
966module_exit(owl_dma_exit);
967
968MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
969MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
970MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
971MODULE_LICENSE("GPL");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index de0957fe9668..88750a34e859 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1046,13 +1046,16 @@ static bool _start(struct pl330_thread *thrd)
1046 1046
1047 if (_state(thrd) == PL330_STATE_KILLING) 1047 if (_state(thrd) == PL330_STATE_KILLING)
1048 UNTIL(thrd, PL330_STATE_STOPPED) 1048 UNTIL(thrd, PL330_STATE_STOPPED)
1049 /* fall through */
1049 1050
1050 case PL330_STATE_FAULTING: 1051 case PL330_STATE_FAULTING:
1051 _stop(thrd); 1052 _stop(thrd);
1053 /* fall through */
1052 1054
1053 case PL330_STATE_KILLING: 1055 case PL330_STATE_KILLING:
1054 case PL330_STATE_COMPLETING: 1056 case PL330_STATE_COMPLETING:
1055 UNTIL(thrd, PL330_STATE_STOPPED) 1057 UNTIL(thrd, PL330_STATE_STOPPED)
1058 /* fall through */
1056 1059
1057 case PL330_STATE_STOPPED: 1060 case PL330_STATE_STOPPED:
1058 return _trigger(thrd); 1061 return _trigger(thrd);
@@ -1779,8 +1782,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
1779 1782
1780static void pl330_release_channel(struct pl330_thread *thrd) 1783static void pl330_release_channel(struct pl330_thread *thrd)
1781{ 1784{
1782 struct pl330_dmac *pl330;
1783
1784 if (!thrd || thrd->free) 1785 if (!thrd || thrd->free)
1785 return; 1786 return;
1786 1787
@@ -1789,8 +1790,6 @@ static void pl330_release_channel(struct pl330_thread *thrd)
1789 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); 1790 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1790 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); 1791 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1791 1792
1792 pl330 = thrd->dmac;
1793
1794 _free_event(thrd, thrd->ev); 1793 _free_event(thrd, thrd->ev);
1795 thrd->free = true; 1794 thrd->free = true;
1796} 1795}
@@ -2257,13 +2256,14 @@ static int pl330_terminate_all(struct dma_chan *chan)
2257 2256
2258 pm_runtime_get_sync(pl330->ddma.dev); 2257 pm_runtime_get_sync(pl330->ddma.dev);
2259 spin_lock_irqsave(&pch->lock, flags); 2258 spin_lock_irqsave(&pch->lock, flags);
2259
2260 spin_lock(&pl330->lock); 2260 spin_lock(&pl330->lock);
2261 _stop(pch->thread); 2261 _stop(pch->thread);
2262 spin_unlock(&pl330->lock);
2263
2264 pch->thread->req[0].desc = NULL; 2262 pch->thread->req[0].desc = NULL;
2265 pch->thread->req[1].desc = NULL; 2263 pch->thread->req[1].desc = NULL;
2266 pch->thread->req_running = -1; 2264 pch->thread->req_running = -1;
2265 spin_unlock(&pl330->lock);
2266
2267 power_down = pch->active; 2267 power_down = pch->active;
2268 pch->active = false; 2268 pch->active = false;
2269 2269
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2a2ccd9c78e4..48ee35e2bce6 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Renesas R-Car Gen2 DMA Controller Driver 3 * Renesas R-Car Gen2 DMA Controller Driver
3 * 4 *
4 * Copyright (C) 2014 Renesas Electronics Inc. 5 * Copyright (C) 2014 Renesas Electronics Inc.
5 * 6 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 7 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <linux/delay.h> 10#include <linux/delay.h>
@@ -431,7 +428,8 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
431 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; 428 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
432 } 429 }
433 430
434 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE); 431 rcar_dmac_chan_write(chan, RCAR_DMACHCR,
432 chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
435} 433}
436 434
437static int rcar_dmac_init(struct rcar_dmac *dmac) 435static int rcar_dmac_init(struct rcar_dmac *dmac)
@@ -761,21 +759,15 @@ static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
761 dev_err(chan->chan.device->dev, "CHCR DE check error\n"); 759 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
762} 760}
763 761
764static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan) 762static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
765{ 763{
766 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 764 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
767 765
768 if (!(chcr & RCAR_DMACHCR_DE))
769 return;
770
771 /* set DE=0 and flush remaining data */ 766 /* set DE=0 and flush remaining data */
772 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); 767 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
773 768
774 /* make sure all remaining data was flushed */ 769 /* make sure all remaining data was flushed */
775 rcar_dmac_chcr_de_barrier(chan); 770 rcar_dmac_chcr_de_barrier(chan);
776
777 /* back DE */
778 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
779} 771}
780 772
781static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) 773static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
@@ -783,7 +775,8 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
783 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 775 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
784 776
785 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | 777 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
786 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); 778 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
779 RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
787 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); 780 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
788 rcar_dmac_chcr_de_barrier(chan); 781 rcar_dmac_chcr_de_barrier(chan);
789} 782}
@@ -812,12 +805,7 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
812 } 805 }
813} 806}
814 807
815static void rcar_dmac_stop(struct rcar_dmac *dmac) 808static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
816{
817 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
818}
819
820static void rcar_dmac_abort(struct rcar_dmac *dmac)
821{ 809{
822 unsigned int i; 810 unsigned int i;
823 811
@@ -826,14 +814,24 @@ static void rcar_dmac_abort(struct rcar_dmac *dmac)
826 struct rcar_dmac_chan *chan = &dmac->channels[i]; 814 struct rcar_dmac_chan *chan = &dmac->channels[i];
827 815
828 /* Stop and reinitialize the channel. */ 816 /* Stop and reinitialize the channel. */
829 spin_lock(&chan->lock); 817 spin_lock_irq(&chan->lock);
830 rcar_dmac_chan_halt(chan); 818 rcar_dmac_chan_halt(chan);
831 spin_unlock(&chan->lock); 819 spin_unlock_irq(&chan->lock);
832
833 rcar_dmac_chan_reinit(chan);
834 } 820 }
835} 821}
836 822
823static int rcar_dmac_chan_pause(struct dma_chan *chan)
824{
825 unsigned long flags;
826 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
827
828 spin_lock_irqsave(&rchan->lock, flags);
829 rcar_dmac_clear_chcr_de(rchan);
830 spin_unlock_irqrestore(&rchan->lock, flags);
831
832 return 0;
833}
834
837/* ----------------------------------------------------------------------------- 835/* -----------------------------------------------------------------------------
838 * Descriptors preparation 836 * Descriptors preparation
839 */ 837 */
@@ -1355,9 +1353,6 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1355 residue += chunk->size; 1353 residue += chunk->size;
1356 } 1354 }
1357 1355
1358 if (desc->direction == DMA_DEV_TO_MEM)
1359 rcar_dmac_sync_tcr(chan);
1360
1361 /* Add the residue for the current chunk. */ 1356 /* Add the residue for the current chunk. */
1362 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; 1357 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
1363 1358
@@ -1522,11 +1517,26 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1522 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; 1517 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1523 struct rcar_dmac_chan *chan = dev; 1518 struct rcar_dmac_chan *chan = dev;
1524 irqreturn_t ret = IRQ_NONE; 1519 irqreturn_t ret = IRQ_NONE;
1520 bool reinit = false;
1525 u32 chcr; 1521 u32 chcr;
1526 1522
1527 spin_lock(&chan->lock); 1523 spin_lock(&chan->lock);
1528 1524
1529 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 1525 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1526 if (chcr & RCAR_DMACHCR_CAE) {
1527 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
1528
1529 /*
1530 * We don't need to call rcar_dmac_chan_halt()
1531 * because channel is already stopped in error case.
1532 * We need to clear register and check DE bit as recovery.
1533 */
1534 rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
1535 rcar_dmac_chcr_de_barrier(chan);
1536 reinit = true;
1537 goto spin_lock_end;
1538 }
1539
1530 if (chcr & RCAR_DMACHCR_TE) 1540 if (chcr & RCAR_DMACHCR_TE)
1531 mask |= RCAR_DMACHCR_DE; 1541 mask |= RCAR_DMACHCR_DE;
1532 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); 1542 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
@@ -1539,8 +1549,16 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1539 if (chcr & RCAR_DMACHCR_TE) 1549 if (chcr & RCAR_DMACHCR_TE)
1540 ret |= rcar_dmac_isr_transfer_end(chan); 1550 ret |= rcar_dmac_isr_transfer_end(chan);
1541 1551
1552spin_lock_end:
1542 spin_unlock(&chan->lock); 1553 spin_unlock(&chan->lock);
1543 1554
1555 if (reinit) {
1556 dev_err(chan->chan.device->dev, "Channel Address Error\n");
1557
1558 rcar_dmac_chan_reinit(chan);
1559 ret = IRQ_HANDLED;
1560 }
1561
1544 return ret; 1562 return ret;
1545} 1563}
1546 1564
@@ -1597,24 +1615,6 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1597 return IRQ_HANDLED; 1615 return IRQ_HANDLED;
1598} 1616}
1599 1617
1600static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1601{
1602 struct rcar_dmac *dmac = data;
1603
1604 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1605 return IRQ_NONE;
1606
1607 /*
1608 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1609 * abort transfers on all channels, and reinitialize the DMAC.
1610 */
1611 rcar_dmac_stop(dmac);
1612 rcar_dmac_abort(dmac);
1613 rcar_dmac_init(dmac);
1614
1615 return IRQ_HANDLED;
1616}
1617
1618/* ----------------------------------------------------------------------------- 1618/* -----------------------------------------------------------------------------
1619 * OF xlate and channel filter 1619 * OF xlate and channel filter
1620 */ 1620 */
@@ -1784,8 +1784,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1784 struct rcar_dmac *dmac; 1784 struct rcar_dmac *dmac;
1785 struct resource *mem; 1785 struct resource *mem;
1786 unsigned int i; 1786 unsigned int i;
1787 char *irqname;
1788 int irq;
1789 int ret; 1787 int ret;
1790 1788
1791 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 1789 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -1824,17 +1822,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1824 if (IS_ERR(dmac->iomem)) 1822 if (IS_ERR(dmac->iomem))
1825 return PTR_ERR(dmac->iomem); 1823 return PTR_ERR(dmac->iomem);
1826 1824
1827 irq = platform_get_irq_byname(pdev, "error");
1828 if (irq < 0) {
1829 dev_err(&pdev->dev, "no error IRQ specified\n");
1830 return -ENODEV;
1831 }
1832
1833 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1834 dev_name(dmac->dev));
1835 if (!irqname)
1836 return -ENOMEM;
1837
1838 /* Enable runtime PM and initialize the device. */ 1825 /* Enable runtime PM and initialize the device. */
1839 pm_runtime_enable(&pdev->dev); 1826 pm_runtime_enable(&pdev->dev);
1840 ret = pm_runtime_get_sync(&pdev->dev); 1827 ret = pm_runtime_get_sync(&pdev->dev);
@@ -1871,6 +1858,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1871 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; 1858 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1872 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; 1859 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1873 engine->device_config = rcar_dmac_device_config; 1860 engine->device_config = rcar_dmac_device_config;
1861 engine->device_pause = rcar_dmac_chan_pause;
1874 engine->device_terminate_all = rcar_dmac_chan_terminate_all; 1862 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1875 engine->device_tx_status = rcar_dmac_tx_status; 1863 engine->device_tx_status = rcar_dmac_tx_status;
1876 engine->device_issue_pending = rcar_dmac_issue_pending; 1864 engine->device_issue_pending = rcar_dmac_issue_pending;
@@ -1885,14 +1873,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1885 goto error; 1873 goto error;
1886 } 1874 }
1887 1875
1888 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1889 irqname, dmac);
1890 if (ret) {
1891 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1892 irq, ret);
1893 return ret;
1894 }
1895
1896 /* Register the DMAC as a DMA provider for DT. */ 1876 /* Register the DMAC as a DMA provider for DT. */
1897 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, 1877 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1898 NULL); 1878 NULL);
@@ -1932,7 +1912,7 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
1932{ 1912{
1933 struct rcar_dmac *dmac = platform_get_drvdata(pdev); 1913 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1934 1914
1935 rcar_dmac_stop(dmac); 1915 rcar_dmac_stop_all_chan(dmac);
1936} 1916}
1937 1917
1938static const struct of_device_id rcar_dmac_of_ids[] = { 1918static const struct of_device_id rcar_dmac_of_ids[] = {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1bc149af990e..f4edfc56f34e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -555,6 +555,7 @@ struct d40_gen_dmac {
555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and 555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
556 * later 556 * later
557 * @reg_val_backup_chan: Backup data for standard channel parameter registers. 557 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
558 * @regs_interrupt: Scratch space for registers during interrupt.
558 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. 559 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
559 * @gen_dmac: the struct for generic registers values to represent u8500/8540 560 * @gen_dmac: the struct for generic registers values to represent u8500/8540
560 * DMA controller 561 * DMA controller
@@ -592,6 +593,7 @@ struct d40_base {
592 u32 reg_val_backup[BACKUP_REGS_SZ]; 593 u32 reg_val_backup[BACKUP_REGS_SZ];
593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; 594 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
594 u32 *reg_val_backup_chan; 595 u32 *reg_val_backup_chan;
596 u32 *regs_interrupt;
595 u16 gcc_pwr_off_mask; 597 u16 gcc_pwr_off_mask;
596 struct d40_gen_dmac gen_dmac; 598 struct d40_gen_dmac gen_dmac;
597}; 599};
@@ -1637,7 +1639,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1637 struct d40_chan *d40c; 1639 struct d40_chan *d40c;
1638 unsigned long flags; 1640 unsigned long flags;
1639 struct d40_base *base = data; 1641 struct d40_base *base = data;
1640 u32 regs[base->gen_dmac.il_size]; 1642 u32 *regs = base->regs_interrupt;
1641 struct d40_interrupt_lookup *il = base->gen_dmac.il; 1643 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1642 u32 il_size = base->gen_dmac.il_size; 1644 u32 il_size = base->gen_dmac.il_size;
1643 1645
@@ -3258,13 +3260,22 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3258 if (!base->lcla_pool.alloc_map) 3260 if (!base->lcla_pool.alloc_map)
3259 goto free_backup_chan; 3261 goto free_backup_chan;
3260 3262
3263 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3264 sizeof(*base->regs_interrupt),
3265 GFP_KERNEL);
3266 if (!base->regs_interrupt)
3267 goto free_map;
3268
3261 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3269 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3262 0, SLAB_HWCACHE_ALIGN, 3270 0, SLAB_HWCACHE_ALIGN,
3263 NULL); 3271 NULL);
3264 if (base->desc_slab == NULL) 3272 if (base->desc_slab == NULL)
3265 goto free_map; 3273 goto free_regs;
3274
3266 3275
3267 return base; 3276 return base;
3277 free_regs:
3278 kfree(base->regs_interrupt);
3268 free_map: 3279 free_map:
3269 kfree(base->lcla_pool.alloc_map); 3280 kfree(base->lcla_pool.alloc_map);
3270 free_backup_chan: 3281 free_backup_chan:
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 8c5807362a25..379e8d534e61 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -594,7 +594,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
594 594
595 chan->busy = true; 595 chan->busy = true;
596 596
597 dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); 597 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
598} 598}
599 599
600static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) 600static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -693,7 +693,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
693 693
694 spin_lock_irqsave(&chan->vchan.lock, flags); 694 spin_lock_irqsave(&chan->vchan.lock, flags);
695 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { 695 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
696 dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); 696 dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
697 stm32_dma_start_transfer(chan); 697 stm32_dma_start_transfer(chan);
698 698
699 } 699 }
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 9dc450b7ace6..06dd1725375e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1170,7 +1170,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
1170 1170
1171 chan->busy = true; 1171 chan->busy = true;
1172 1172
1173 dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); 1173 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
1174} 1174}
1175 1175
1176static void stm32_mdma_issue_pending(struct dma_chan *c) 1176static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1183,7 +1183,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
1183 if (!vchan_issue_pending(&chan->vchan)) 1183 if (!vchan_issue_pending(&chan->vchan))
1184 goto end; 1184 goto end;
1185 1185
1186 dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); 1186 dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
1187 1187
1188 if (!chan->desc && !chan->busy) 1188 if (!chan->desc && !chan->busy)
1189 stm32_mdma_start_transfer(chan); 1189 stm32_mdma_start_transfer(chan);
@@ -1203,7 +1203,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
1203 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1203 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1204 1204
1205 if (!ret) 1205 if (!ret)
1206 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan); 1206 dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
1207 1207
1208 return ret; 1208 return ret;
1209} 1209}
@@ -1240,7 +1240,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
1240 1240
1241 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1241 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1242 1242
1243 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan); 1243 dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
1244 1244
1245 return 0; 1245 return 0;
1246} 1246}
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 27b523530c4a..c12442312595 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -115,6 +115,9 @@
115#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 115#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 116#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
117 117
118#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
119#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
120
118/* HW specific definitions */ 121/* HW specific definitions */
119#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 122#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
120 123
@@ -340,6 +343,7 @@ struct xilinx_dma_tx_descriptor {
340 * @start_transfer: Differentiate b/w DMA IP's transfer 343 * @start_transfer: Differentiate b/w DMA IP's transfer
341 * @stop_transfer: Differentiate b/w DMA IP's quiesce 344 * @stop_transfer: Differentiate b/w DMA IP's quiesce
342 * @tdest: TDEST value for mcdma 345 * @tdest: TDEST value for mcdma
346 * @has_vflip: S2MM vertical flip
343 */ 347 */
344struct xilinx_dma_chan { 348struct xilinx_dma_chan {
345 struct xilinx_dma_device *xdev; 349 struct xilinx_dma_device *xdev;
@@ -376,6 +380,7 @@ struct xilinx_dma_chan {
376 void (*start_transfer)(struct xilinx_dma_chan *chan); 380 void (*start_transfer)(struct xilinx_dma_chan *chan);
377 int (*stop_transfer)(struct xilinx_dma_chan *chan); 381 int (*stop_transfer)(struct xilinx_dma_chan *chan);
378 u16 tdest; 382 u16 tdest;
383 bool has_vflip;
379}; 384};
380 385
381/** 386/**
@@ -1092,6 +1097,14 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1092 desc->async_tx.phys); 1097 desc->async_tx.phys);
1093 1098
1094 /* Configure the hardware using info in the config structure */ 1099 /* Configure the hardware using info in the config structure */
1100 if (chan->has_vflip) {
1101 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1102 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1103 reg |= config->vflip_en;
1104 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1105 reg);
1106 }
1107
1095 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1108 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1096 1109
1097 if (config->frm_cnt_en) 1110 if (config->frm_cnt_en)
@@ -2105,6 +2118,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2105 } 2118 }
2106 2119
2107 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2120 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2121 chan->config.vflip_en = cfg->vflip_en;
2122
2108 if (cfg->park) 2123 if (cfg->park)
2109 chan->config.park_frm = cfg->park_frm; 2124 chan->config.park_frm = cfg->park_frm;
2110 else 2125 else
@@ -2428,6 +2443,13 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2428 chan->direction = DMA_DEV_TO_MEM; 2443 chan->direction = DMA_DEV_TO_MEM;
2429 chan->id = chan_id; 2444 chan->id = chan_id;
2430 chan->tdest = chan_id - xdev->nr_channels; 2445 chan->tdest = chan_id - xdev->nr_channels;
2446 chan->has_vflip = of_property_read_bool(node,
2447 "xlnx,enable-vert-flip");
2448 if (chan->has_vflip) {
2449 chan->config.vflip_en = dma_read(chan,
2450 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2451 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2452 }
2431 2453
2432 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2454 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2433 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2455 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f276ed0..5b6e61e4b3aa 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -27,6 +27,7 @@
27 * @delay: Delay counter 27 * @delay: Delay counter
28 * @reset: Reset Channel 28 * @reset: Reset Channel
29 * @ext_fsync: External Frame Sync source 29 * @ext_fsync: External Frame Sync source
30 * @vflip_en: Vertical Flip enable
30 */ 31 */
31struct xilinx_vdma_config { 32struct xilinx_vdma_config {
32 int frm_dly; 33 int frm_dly;
@@ -39,6 +40,7 @@ struct xilinx_vdma_config {
39 int delay; 40 int delay;
40 int reset; 41 int reset;
41 int ext_fsync; 42 int ext_fsync;
43 bool vflip_en;
42}; 44};
43 45
44int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 46int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 861be5cab1df..d49ec5c31944 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -415,7 +415,9 @@ enum dma_residue_granularity {
415 * each type, the dma controller should set BIT(<TYPE>) and same 415 * each type, the dma controller should set BIT(<TYPE>) and same
416 * should be checked by controller as well 416 * should be checked by controller as well
417 * @max_burst: max burst capability per-transfer 417 * @max_burst: max burst capability per-transfer
418 * @cmd_pause: true, if pause and thereby resume is supported 418 * @cmd_pause: true, if pause is supported (i.e. for reading residue or
419 * for resume later)
420 * @cmd_resume: true, if resume is supported
419 * @cmd_terminate: true, if terminate cmd is supported 421 * @cmd_terminate: true, if terminate cmd is supported
420 * @residue_granularity: granularity of the reported transfer residue 422 * @residue_granularity: granularity of the reported transfer residue
421 * @descriptor_reuse: if a descriptor can be reused by client and 423 * @descriptor_reuse: if a descriptor can be reused by client and
@@ -427,6 +429,7 @@ struct dma_slave_caps {
427 u32 directions; 429 u32 directions;
428 u32 max_burst; 430 u32 max_burst;
429 bool cmd_pause; 431 bool cmd_pause;
432 bool cmd_resume;
430 bool cmd_terminate; 433 bool cmd_terminate;
431 enum dma_residue_granularity residue_granularity; 434 enum dma_residue_granularity residue_granularity;
432 bool descriptor_reuse; 435 bool descriptor_reuse;
@@ -1403,6 +1406,7 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1403/* --- DMA device --- */ 1406/* --- DMA device --- */
1404 1407
1405int dma_async_device_register(struct dma_device *device); 1408int dma_async_device_register(struct dma_device *device);
1409int dmaenginem_async_device_register(struct dma_device *device);
1406void dma_async_device_unregister(struct dma_device *device); 1410void dma_async_device_unregister(struct dma_device *device);
1407void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1411void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1408struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); 1412struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 52fd7af952a5..30e791a53352 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -147,7 +147,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
147 147
148 ret = dma_get_slave_caps(chan, &dma_caps); 148 ret = dma_get_slave_caps(chan, &dma_caps);
149 if (ret == 0) { 149 if (ret == 0) {
150 if (dma_caps.cmd_pause) 150 if (dma_caps.cmd_pause && dma_caps.cmd_resume)
151 hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; 151 hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
152 if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) 152 if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
153 hw.info |= SNDRV_PCM_INFO_BATCH; 153 hw.info |= SNDRV_PCM_INFO_BATCH;