diff options
61 files changed, 2592 insertions, 654 deletions
diff --git a/Documentation/devicetree/bindings/dma/dma.txt b/Documentation/devicetree/bindings/dma/dma.txt index 6312fb00ce8d..eeb4e4d1771e 100644 --- a/Documentation/devicetree/bindings/dma/dma.txt +++ b/Documentation/devicetree/bindings/dma/dma.txt | |||
@@ -16,6 +16,9 @@ Optional properties: | |||
16 | - dma-channels: Number of DMA channels supported by the controller. | 16 | - dma-channels: Number of DMA channels supported by the controller. |
17 | - dma-requests: Number of DMA request signals supported by the | 17 | - dma-requests: Number of DMA request signals supported by the |
18 | controller. | 18 | controller. |
19 | - dma-channel-mask: Bitmask of available DMA channels in ascending order | ||
20 | that are not reserved by firmware and are available to | ||
21 | the kernel. i.e. first channel corresponds to LSB. | ||
19 | 22 | ||
20 | Example: | 23 | Example: |
21 | 24 | ||
@@ -29,6 +32,7 @@ Example: | |||
29 | #dma-cells = <1>; | 32 | #dma-cells = <1>; |
30 | dma-channels = <32>; | 33 | dma-channels = <32>; |
31 | dma-requests = <127>; | 34 | dma-requests = <127>; |
35 | dma-channel-mask = <0xfffe> | ||
32 | }; | 36 | }; |
33 | 37 | ||
34 | * DMA router | 38 | * DMA router |
diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt new file mode 100644 index 000000000000..6a0ff9059e72 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt | |||
@@ -0,0 +1,57 @@ | |||
1 | NXP Layerscape SoC qDMA Controller | ||
2 | ================================== | ||
3 | |||
4 | This device follows the generic DMA bindings defined in dma/dma.txt. | ||
5 | |||
6 | Required properties: | ||
7 | |||
8 | - compatible: Must be one of | ||
9 | "fsl,ls1021a-qdma": for LS1021A Board | ||
10 | "fsl,ls1043a-qdma": for ls1043A Board | ||
11 | "fsl,ls1046a-qdma": for ls1046A Board | ||
12 | - reg: Should contain the register's base address and length. | ||
13 | - interrupts: Should contain a reference to the interrupt used by this | ||
14 | device. | ||
15 | - interrupt-names: Should contain interrupt names: | ||
16 | "qdma-queue0": the block0 interrupt | ||
17 | "qdma-queue1": the block1 interrupt | ||
18 | "qdma-queue2": the block2 interrupt | ||
19 | "qdma-queue3": the block3 interrupt | ||
20 | "qdma-error": the error interrupt | ||
21 | - fsl,dma-queues: Should contain number of queues supported. | ||
22 | - dma-channels: Number of DMA channels supported | ||
23 | - block-number: the virtual block number | ||
24 | - block-offset: the offset of different virtual block | ||
25 | - status-sizes: status queue size of per virtual block | ||
26 | - queue-sizes: command queue size of per virtual block, the size number | ||
27 | based on queues | ||
28 | |||
29 | Optional properties: | ||
30 | |||
31 | - dma-channels: Number of DMA channels supported by the controller. | ||
32 | - big-endian: If present registers and hardware scatter/gather descriptors | ||
33 | of the qDMA are implemented in big endian mode, otherwise in little | ||
34 | mode. | ||
35 | |||
36 | Examples: | ||
37 | |||
38 | qdma: dma-controller@8390000 { | ||
39 | compatible = "fsl,ls1021a-qdma"; | ||
40 | reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */ | ||
41 | <0x0 0x8389000 0x0 0x1000>, /* Status regs */ | ||
42 | <0x0 0x838a000 0x0 0x2000>; /* Block regs */ | ||
43 | interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>, | ||
44 | <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>, | ||
45 | <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; | ||
46 | interrupt-names = "qdma-error", | ||
47 | "qdma-queue0", "qdma-queue1"; | ||
48 | dma-channels = <8>; | ||
49 | block-number = <2>; | ||
50 | block-offset = <0x1000>; | ||
51 | fsl,dma-queues = <2>; | ||
52 | status-sizes = <64>; | ||
53 | queue-sizes = <64 64>; | ||
54 | big-endian; | ||
55 | }; | ||
56 | |||
57 | DMA clients must use the format described in dma/dma.txt file. | ||
diff --git a/Documentation/devicetree/bindings/dma/k3dma.txt b/Documentation/devicetree/bindings/dma/k3dma.txt index 4945aeac4dc4..10a2f15b08a3 100644 --- a/Documentation/devicetree/bindings/dma/k3dma.txt +++ b/Documentation/devicetree/bindings/dma/k3dma.txt | |||
@@ -3,7 +3,9 @@ | |||
3 | See dma.txt first | 3 | See dma.txt first |
4 | 4 | ||
5 | Required properties: | 5 | Required properties: |
6 | - compatible: Should be "hisilicon,k3-dma-1.0" | 6 | - compatible: Must be one of |
7 | - "hisilicon,k3-dma-1.0" | ||
8 | - "hisilicon,hisi-pcm-asp-dma-1.0" | ||
7 | - reg: Should contain DMA registers location and length. | 9 | - reg: Should contain DMA registers location and length. |
8 | - interrupts: Should contain one interrupt shared by all channel | 10 | - interrupts: Should contain one interrupt shared by all channel |
9 | - #dma-cells: see dma.txt, should be 1, para number | 11 | - #dma-cells: see dma.txt, should be 1, para number |
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index db757df7057d..0bedceed1963 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
@@ -23,8 +23,6 @@ Deprecated properties: | |||
23 | 23 | ||
24 | 24 | ||
25 | Optional properties: | 25 | Optional properties: |
26 | - is_private: The device channels should be marked as private and not for by the | ||
27 | general purpose DMA channel allocator. False if not passed. | ||
28 | - multi-block: Multi block transfers supported by hardware. Array property with | 26 | - multi-block: Multi block transfers supported by hardware. Array property with |
29 | one cell per channel. 0: not supported, 1 (default): supported. | 27 | one cell per channel. 0: not supported, 1 (default): supported. |
30 | - snps,dma-protection-control: AHB HPROT[3:1] protection setting. | 28 | - snps,dma-protection-control: AHB HPROT[3:1] protection setting. |
diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt index 7a10fea2e51b..adccea9941f1 100644 --- a/Documentation/devicetree/bindings/dma/sprd-dma.txt +++ b/Documentation/devicetree/bindings/dma/sprd-dma.txt | |||
@@ -31,7 +31,7 @@ DMA clients connected to the Spreadtrum DMA controller must use the format | |||
31 | described in the dma.txt file, using a two-cell specifier for each channel. | 31 | described in the dma.txt file, using a two-cell specifier for each channel. |
32 | The two cells in order are: | 32 | The two cells in order are: |
33 | 1. A phandle pointing to the DMA controller. | 33 | 1. A phandle pointing to the DMA controller. |
34 | 2. The channel id. | 34 | 2. The slave id. |
35 | 35 | ||
36 | spi0: spi@70a00000{ | 36 | spi0: spi@70a00000{ |
37 | ... | 37 | ... |
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt index 174af2c45e77..93b6d961dd4f 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt | |||
@@ -37,10 +37,11 @@ Required properties: | |||
37 | Required properties for VDMA: | 37 | Required properties for VDMA: |
38 | - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. | 38 | - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. |
39 | 39 | ||
40 | Optional properties: | ||
41 | - xlnx,include-sg: Tells configured for Scatter-mode in | ||
42 | the hardware. | ||
43 | Optional properties for AXI DMA: | 40 | Optional properties for AXI DMA: |
41 | - xlnx,sg-length-width: Should be set to the width in bits of the length | ||
42 | register as configured in h/w. Takes values {8...26}. If the property | ||
43 | is missing or invalid then the default value 23 is used. This is the | ||
44 | maximum value that is supported by all IP versions. | ||
44 | - xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. | 45 | - xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. |
45 | Optional properties for VDMA: | 46 | Optional properties for VDMA: |
46 | - xlnx,flush-fsync: Tells which channel to Flush on Frame sync. | 47 | - xlnx,flush-fsync: Tells which channel to Flush on Frame sync. |
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst index d728e50105eb..45953f171500 100644 --- a/Documentation/driver-api/dmaengine/client.rst +++ b/Documentation/driver-api/dmaengine/client.rst | |||
@@ -172,7 +172,7 @@ The details of these operations are: | |||
172 | 172 | ||
173 | After calling ``dmaengine_submit()`` the submitted transfer descriptor | 173 | After calling ``dmaengine_submit()`` the submitted transfer descriptor |
174 | (``struct dma_async_tx_descriptor``) belongs to the DMA engine. | 174 | (``struct dma_async_tx_descriptor``) belongs to the DMA engine. |
175 | Consequentially, the client must consider invalid the pointer to that | 175 | Consequently, the client must consider invalid the pointer to that |
176 | descriptor. | 176 | descriptor. |
177 | 177 | ||
178 | 5. Issue pending DMA requests and wait for callback notification | 178 | 5. Issue pending DMA requests and wait for callback notification |
diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst index 8d81f1a7169b..e78d070bb468 100644 --- a/Documentation/driver-api/dmaengine/dmatest.rst +++ b/Documentation/driver-api/dmaengine/dmatest.rst | |||
@@ -59,6 +59,7 @@ parameter, that specific channel is requested using the dmaengine and a thread | |||
59 | is created with the existing parameters. This thread is set as pending | 59 | is created with the existing parameters. This thread is set as pending |
60 | and will be executed once run is set to 1. Any parameters set after the thread | 60 | and will be executed once run is set to 1. Any parameters set after the thread |
61 | is created are not applied. | 61 | is created are not applied. |
62 | |||
62 | .. hint:: | 63 | .. hint:: |
63 | available channel list could be extracted by running the following command:: | 64 | available channel list could be extracted by running the following command:: |
64 | 65 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d2286c7f7222..0b1dfb5bf2d9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -218,6 +218,20 @@ config FSL_EDMA | |||
218 | multiplexing capability for DMA request sources(slot). | 218 | multiplexing capability for DMA request sources(slot). |
219 | This module can be found on Freescale Vybrid and LS-1 SoCs. | 219 | This module can be found on Freescale Vybrid and LS-1 SoCs. |
220 | 220 | ||
221 | config FSL_QDMA | ||
222 | tristate "NXP Layerscape qDMA engine support" | ||
223 | depends on ARM || ARM64 | ||
224 | select DMA_ENGINE | ||
225 | select DMA_VIRTUAL_CHANNELS | ||
226 | select DMA_ENGINE_RAID | ||
227 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
228 | help | ||
229 | Support the NXP Layerscape qDMA engine with command queue and legacy mode. | ||
230 | Channel virtualization is supported through enqueuing of DMA jobs to, | ||
231 | or dequeuing DMA jobs from, different work queues. | ||
232 | This module can be found on NXP Layerscape SoCs. | ||
233 | The qdma driver only work on SoCs with a DPAA hardware block. | ||
234 | |||
221 | config FSL_RAID | 235 | config FSL_RAID |
222 | tristate "Freescale RAID engine Support" | 236 | tristate "Freescale RAID engine Support" |
223 | depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH | 237 | depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 09571a81353d..6126e1c3a875 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -33,6 +33,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | |||
33 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 33 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
34 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o | 34 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o |
35 | obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o | 35 | obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o |
36 | obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o | ||
36 | obj-$(CONFIG_FSL_RAID) += fsl_raid.o | 37 | obj-$(CONFIG_FSL_RAID) += fsl_raid.o |
37 | obj-$(CONFIG_HSU_DMA) += hsu/ | 38 | obj-$(CONFIG_HSU_DMA) += hsu/ |
38 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o | 39 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 01d936c9fe89..a0a9cd76c1d4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
134 | struct at_desc *ret = NULL; | 134 | struct at_desc *ret = NULL; |
135 | unsigned long flags; | 135 | unsigned long flags; |
136 | unsigned int i = 0; | 136 | unsigned int i = 0; |
137 | LIST_HEAD(tmp_list); | ||
138 | 137 | ||
139 | spin_lock_irqsave(&atchan->lock, flags); | 138 | spin_lock_irqsave(&atchan->lock, flags); |
140 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | 139 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
@@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan) | |||
1387 | int chan_id = atchan->chan_common.chan_id; | 1386 | int chan_id = atchan->chan_common.chan_id; |
1388 | unsigned long flags; | 1387 | unsigned long flags; |
1389 | 1388 | ||
1390 | LIST_HEAD(list); | ||
1391 | |||
1392 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 1389 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1393 | 1390 | ||
1394 | spin_lock_irqsave(&atchan->lock, flags); | 1391 | spin_lock_irqsave(&atchan->lock, flags); |
@@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan) | |||
1408 | int chan_id = atchan->chan_common.chan_id; | 1405 | int chan_id = atchan->chan_common.chan_id; |
1409 | unsigned long flags; | 1406 | unsigned long flags; |
1410 | 1407 | ||
1411 | LIST_HEAD(list); | ||
1412 | |||
1413 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 1408 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1414 | 1409 | ||
1415 | if (!atc_chan_is_paused(atchan)) | 1410 | if (!atc_chan_is_paused(atchan)) |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index ae10f5614f95..ec8a291d62ba 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -2,9 +2,6 @@ | |||
2 | /* | 2 | /* |
3 | * BCM2835 DMA engine support | 3 | * BCM2835 DMA engine support |
4 | * | 4 | * |
5 | * This driver only supports cyclic DMA transfers | ||
6 | * as needed for the I2S module. | ||
7 | * | ||
8 | * Author: Florian Meier <florian.meier@koalo.de> | 5 | * Author: Florian Meier <florian.meier@koalo.de> |
9 | * Copyright 2013 | 6 | * Copyright 2013 |
10 | * | 7 | * |
@@ -42,7 +39,6 @@ | |||
42 | 39 | ||
43 | struct bcm2835_dmadev { | 40 | struct bcm2835_dmadev { |
44 | struct dma_device ddev; | 41 | struct dma_device ddev; |
45 | spinlock_t lock; | ||
46 | void __iomem *base; | 42 | void __iomem *base; |
47 | struct device_dma_parameters dma_parms; | 43 | struct device_dma_parameters dma_parms; |
48 | }; | 44 | }; |
@@ -64,7 +60,6 @@ struct bcm2835_cb_entry { | |||
64 | 60 | ||
65 | struct bcm2835_chan { | 61 | struct bcm2835_chan { |
66 | struct virt_dma_chan vc; | 62 | struct virt_dma_chan vc; |
67 | struct list_head node; | ||
68 | 63 | ||
69 | struct dma_slave_config cfg; | 64 | struct dma_slave_config cfg; |
70 | unsigned int dreq; | 65 | unsigned int dreq; |
@@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( | |||
312 | return NULL; | 307 | return NULL; |
313 | 308 | ||
314 | /* allocate and setup the descriptor. */ | 309 | /* allocate and setup the descriptor. */ |
315 | d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry), | 310 | d = kzalloc(struct_size(d, cb_list, frames), gfp); |
316 | gfp); | ||
317 | if (!d) | 311 | if (!d) |
318 | return NULL; | 312 | return NULL; |
319 | 313 | ||
@@ -406,7 +400,7 @@ static void bcm2835_dma_fill_cb_chain_with_sg( | |||
406 | } | 400 | } |
407 | } | 401 | } |
408 | 402 | ||
409 | static int bcm2835_dma_abort(struct bcm2835_chan *c) | 403 | static void bcm2835_dma_abort(struct bcm2835_chan *c) |
410 | { | 404 | { |
411 | void __iomem *chan_base = c->chan_base; | 405 | void __iomem *chan_base = c->chan_base; |
412 | long int timeout = 10000; | 406 | long int timeout = 10000; |
@@ -416,7 +410,7 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c) | |||
416 | * (The ACTIVE flag in the CS register is not a reliable indicator.) | 410 | * (The ACTIVE flag in the CS register is not a reliable indicator.) |
417 | */ | 411 | */ |
418 | if (!readl(chan_base + BCM2835_DMA_ADDR)) | 412 | if (!readl(chan_base + BCM2835_DMA_ADDR)) |
419 | return 0; | 413 | return; |
420 | 414 | ||
421 | /* Write 0 to the active bit - Pause the DMA */ | 415 | /* Write 0 to the active bit - Pause the DMA */ |
422 | writel(0, chan_base + BCM2835_DMA_CS); | 416 | writel(0, chan_base + BCM2835_DMA_CS); |
@@ -432,7 +426,6 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c) | |||
432 | "failed to complete outstanding writes\n"); | 426 | "failed to complete outstanding writes\n"); |
433 | 427 | ||
434 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); | 428 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); |
435 | return 0; | ||
436 | } | 429 | } |
437 | 430 | ||
438 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) | 431 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) |
@@ -504,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | |||
504 | 497 | ||
505 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); | 498 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); |
506 | 499 | ||
500 | /* | ||
501 | * Control blocks are 256 bit in length and must start at a 256 bit | ||
502 | * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1). | ||
503 | */ | ||
507 | c->cb_pool = dma_pool_create(dev_name(dev), dev, | 504 | c->cb_pool = dma_pool_create(dev_name(dev), dev, |
508 | sizeof(struct bcm2835_dma_cb), 0, 0); | 505 | sizeof(struct bcm2835_dma_cb), 32, 0); |
509 | if (!c->cb_pool) { | 506 | if (!c->cb_pool) { |
510 | dev_err(dev, "unable to allocate descriptor pool\n"); | 507 | dev_err(dev, "unable to allocate descriptor pool\n"); |
511 | return -ENOMEM; | 508 | return -ENOMEM; |
@@ -774,17 +771,11 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan, | |||
774 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) | 771 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) |
775 | { | 772 | { |
776 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 773 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
777 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); | ||
778 | unsigned long flags; | 774 | unsigned long flags; |
779 | LIST_HEAD(head); | 775 | LIST_HEAD(head); |
780 | 776 | ||
781 | spin_lock_irqsave(&c->vc.lock, flags); | 777 | spin_lock_irqsave(&c->vc.lock, flags); |
782 | 778 | ||
783 | /* Prevent this channel being scheduled */ | ||
784 | spin_lock(&d->lock); | ||
785 | list_del_init(&c->node); | ||
786 | spin_unlock(&d->lock); | ||
787 | |||
788 | /* stop DMA activity */ | 779 | /* stop DMA activity */ |
789 | if (c->desc) { | 780 | if (c->desc) { |
790 | vchan_terminate_vdesc(&c->desc->vd); | 781 | vchan_terminate_vdesc(&c->desc->vd); |
@@ -817,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, | |||
817 | 808 | ||
818 | c->vc.desc_free = bcm2835_dma_desc_free; | 809 | c->vc.desc_free = bcm2835_dma_desc_free; |
819 | vchan_init(&c->vc, &d->ddev); | 810 | vchan_init(&c->vc, &d->ddev); |
820 | INIT_LIST_HEAD(&c->node); | ||
821 | 811 | ||
822 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); | 812 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); |
823 | c->ch = chan_id; | 813 | c->ch = chan_id; |
@@ -920,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
920 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 910 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
921 | od->ddev.dev = &pdev->dev; | 911 | od->ddev.dev = &pdev->dev; |
922 | INIT_LIST_HEAD(&od->ddev.channels); | 912 | INIT_LIST_HEAD(&od->ddev.channels); |
923 | spin_lock_init(&od->lock); | ||
924 | 913 | ||
925 | platform_set_drvdata(pdev, od); | 914 | platform_set_drvdata(pdev, od); |
926 | 915 | ||
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 15b2453d2647..ffc0adc2f6ce 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c | |||
@@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) | |||
367 | struct axi_dmac_desc *desc; | 367 | struct axi_dmac_desc *desc; |
368 | unsigned int i; | 368 | unsigned int i; |
369 | 369 | ||
370 | desc = kzalloc(sizeof(struct axi_dmac_desc) + | 370 | desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); |
371 | sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); | ||
372 | if (!desc) | 371 | if (!desc) |
373 | return NULL; | 372 | return NULL; |
374 | 373 | ||
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index a8b6225faa12..9ce0a386225b 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c | |||
@@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev) | |||
838 | if (!soc_data) | 838 | if (!soc_data) |
839 | return -EINVAL; | 839 | return -EINVAL; |
840 | 840 | ||
841 | jzdma = devm_kzalloc(dev, sizeof(*jzdma) | 841 | jzdma = devm_kzalloc(dev, struct_size(jzdma, chan, |
842 | + sizeof(*jzdma->chan) * soc_data->nb_channels, | 842 | soc_data->nb_channels), GFP_KERNEL); |
843 | GFP_KERNEL); | ||
844 | if (!jzdma) | 843 | if (!jzdma) |
845 | return -ENOMEM; | 844 | return -ENOMEM; |
846 | 845 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 6511928b4cdf..b96814a7dceb 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -200,15 +200,20 @@ struct dmatest_done { | |||
200 | wait_queue_head_t *wait; | 200 | wait_queue_head_t *wait; |
201 | }; | 201 | }; |
202 | 202 | ||
203 | struct dmatest_data { | ||
204 | u8 **raw; | ||
205 | u8 **aligned; | ||
206 | unsigned int cnt; | ||
207 | unsigned int off; | ||
208 | }; | ||
209 | |||
203 | struct dmatest_thread { | 210 | struct dmatest_thread { |
204 | struct list_head node; | 211 | struct list_head node; |
205 | struct dmatest_info *info; | 212 | struct dmatest_info *info; |
206 | struct task_struct *task; | 213 | struct task_struct *task; |
207 | struct dma_chan *chan; | 214 | struct dma_chan *chan; |
208 | u8 **srcs; | 215 | struct dmatest_data src; |
209 | u8 **usrcs; | 216 | struct dmatest_data dst; |
210 | u8 **dsts; | ||
211 | u8 **udsts; | ||
212 | enum dma_transaction_type type; | 217 | enum dma_transaction_type type; |
213 | wait_queue_head_t done_wait; | 218 | wait_queue_head_t done_wait; |
214 | struct dmatest_done test_done; | 219 | struct dmatest_done test_done; |
@@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) | |||
481 | return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); | 486 | return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); |
482 | } | 487 | } |
483 | 488 | ||
489 | static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt) | ||
490 | { | ||
491 | unsigned int i; | ||
492 | |||
493 | for (i = 0; i < cnt; i++) | ||
494 | kfree(d->raw[i]); | ||
495 | |||
496 | kfree(d->aligned); | ||
497 | kfree(d->raw); | ||
498 | } | ||
499 | |||
500 | static void dmatest_free_test_data(struct dmatest_data *d) | ||
501 | { | ||
502 | __dmatest_free_test_data(d, d->cnt); | ||
503 | } | ||
504 | |||
505 | static int dmatest_alloc_test_data(struct dmatest_data *d, | ||
506 | unsigned int buf_size, u8 align) | ||
507 | { | ||
508 | unsigned int i = 0; | ||
509 | |||
510 | d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
511 | if (!d->raw) | ||
512 | return -ENOMEM; | ||
513 | |||
514 | d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
515 | if (!d->aligned) | ||
516 | goto err; | ||
517 | |||
518 | for (i = 0; i < d->cnt; i++) { | ||
519 | d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL); | ||
520 | if (!d->raw[i]) | ||
521 | goto err; | ||
522 | |||
523 | /* align to alignment restriction */ | ||
524 | if (align) | ||
525 | d->aligned[i] = PTR_ALIGN(d->raw[i], align); | ||
526 | else | ||
527 | d->aligned[i] = d->raw[i]; | ||
528 | } | ||
529 | |||
530 | return 0; | ||
531 | err: | ||
532 | __dmatest_free_test_data(d, i); | ||
533 | return -ENOMEM; | ||
534 | } | ||
535 | |||
484 | /* | 536 | /* |
485 | * This function repeatedly tests DMA transfers of various lengths and | 537 | * This function repeatedly tests DMA transfers of various lengths and |
486 | * offsets for a given operation type until it is told to exit by | 538 | * offsets for a given operation type until it is told to exit by |
@@ -511,8 +563,9 @@ static int dmatest_func(void *data) | |||
511 | enum dma_ctrl_flags flags; | 563 | enum dma_ctrl_flags flags; |
512 | u8 *pq_coefs = NULL; | 564 | u8 *pq_coefs = NULL; |
513 | int ret; | 565 | int ret; |
514 | int src_cnt; | 566 | unsigned int buf_size; |
515 | int dst_cnt; | 567 | struct dmatest_data *src; |
568 | struct dmatest_data *dst; | ||
516 | int i; | 569 | int i; |
517 | ktime_t ktime, start, diff; | 570 | ktime_t ktime, start, diff; |
518 | ktime_t filltime = 0; | 571 | ktime_t filltime = 0; |
@@ -535,25 +588,27 @@ static int dmatest_func(void *data) | |||
535 | params = &info->params; | 588 | params = &info->params; |
536 | chan = thread->chan; | 589 | chan = thread->chan; |
537 | dev = chan->device; | 590 | dev = chan->device; |
591 | src = &thread->src; | ||
592 | dst = &thread->dst; | ||
538 | if (thread->type == DMA_MEMCPY) { | 593 | if (thread->type == DMA_MEMCPY) { |
539 | align = params->alignment < 0 ? dev->copy_align : | 594 | align = params->alignment < 0 ? dev->copy_align : |
540 | params->alignment; | 595 | params->alignment; |
541 | src_cnt = dst_cnt = 1; | 596 | src->cnt = dst->cnt = 1; |
542 | } else if (thread->type == DMA_MEMSET) { | 597 | } else if (thread->type == DMA_MEMSET) { |
543 | align = params->alignment < 0 ? dev->fill_align : | 598 | align = params->alignment < 0 ? dev->fill_align : |
544 | params->alignment; | 599 | params->alignment; |
545 | src_cnt = dst_cnt = 1; | 600 | src->cnt = dst->cnt = 1; |
546 | is_memset = true; | 601 | is_memset = true; |
547 | } else if (thread->type == DMA_XOR) { | 602 | } else if (thread->type == DMA_XOR) { |
548 | /* force odd to ensure dst = src */ | 603 | /* force odd to ensure dst = src */ |
549 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 604 | src->cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
550 | dst_cnt = 1; | 605 | dst->cnt = 1; |
551 | align = params->alignment < 0 ? dev->xor_align : | 606 | align = params->alignment < 0 ? dev->xor_align : |
552 | params->alignment; | 607 | params->alignment; |
553 | } else if (thread->type == DMA_PQ) { | 608 | } else if (thread->type == DMA_PQ) { |
554 | /* force odd to ensure dst = src */ | 609 | /* force odd to ensure dst = src */ |
555 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); | 610 | src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); |
556 | dst_cnt = 2; | 611 | dst->cnt = 2; |
557 | align = params->alignment < 0 ? dev->pq_align : | 612 | align = params->alignment < 0 ? dev->pq_align : |
558 | params->alignment; | 613 | params->alignment; |
559 | 614 | ||
@@ -561,75 +616,38 @@ static int dmatest_func(void *data) | |||
561 | if (!pq_coefs) | 616 | if (!pq_coefs) |
562 | goto err_thread_type; | 617 | goto err_thread_type; |
563 | 618 | ||
564 | for (i = 0; i < src_cnt; i++) | 619 | for (i = 0; i < src->cnt; i++) |
565 | pq_coefs[i] = 1; | 620 | pq_coefs[i] = 1; |
566 | } else | 621 | } else |
567 | goto err_thread_type; | 622 | goto err_thread_type; |
568 | 623 | ||
569 | /* Check if buffer count fits into map count variable (u8) */ | 624 | /* Check if buffer count fits into map count variable (u8) */ |
570 | if ((src_cnt + dst_cnt) >= 255) { | 625 | if ((src->cnt + dst->cnt) >= 255) { |
571 | pr_err("too many buffers (%d of 255 supported)\n", | 626 | pr_err("too many buffers (%d of 255 supported)\n", |
572 | src_cnt + dst_cnt); | 627 | src->cnt + dst->cnt); |
573 | goto err_free_coefs; | 628 | goto err_free_coefs; |
574 | } | 629 | } |
575 | 630 | ||
576 | if (1 << align > params->buf_size) { | 631 | buf_size = params->buf_size; |
632 | if (1 << align > buf_size) { | ||
577 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | 633 | pr_err("%u-byte buffer too small for %d-byte alignment\n", |
578 | params->buf_size, 1 << align); | 634 | buf_size, 1 << align); |
579 | goto err_free_coefs; | 635 | goto err_free_coefs; |
580 | } | 636 | } |
581 | 637 | ||
582 | thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | 638 | if (dmatest_alloc_test_data(src, buf_size, align) < 0) |
583 | if (!thread->srcs) | ||
584 | goto err_free_coefs; | 639 | goto err_free_coefs; |
585 | 640 | ||
586 | thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | 641 | if (dmatest_alloc_test_data(dst, buf_size, align) < 0) |
587 | if (!thread->usrcs) | 642 | goto err_src; |
588 | goto err_usrcs; | ||
589 | |||
590 | for (i = 0; i < src_cnt; i++) { | ||
591 | thread->usrcs[i] = kmalloc(params->buf_size + align, | ||
592 | GFP_KERNEL); | ||
593 | if (!thread->usrcs[i]) | ||
594 | goto err_srcbuf; | ||
595 | |||
596 | /* align srcs to alignment restriction */ | ||
597 | if (align) | ||
598 | thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align); | ||
599 | else | ||
600 | thread->srcs[i] = thread->usrcs[i]; | ||
601 | } | ||
602 | thread->srcs[i] = NULL; | ||
603 | |||
604 | thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
605 | if (!thread->dsts) | ||
606 | goto err_dsts; | ||
607 | |||
608 | thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
609 | if (!thread->udsts) | ||
610 | goto err_udsts; | ||
611 | |||
612 | for (i = 0; i < dst_cnt; i++) { | ||
613 | thread->udsts[i] = kmalloc(params->buf_size + align, | ||
614 | GFP_KERNEL); | ||
615 | if (!thread->udsts[i]) | ||
616 | goto err_dstbuf; | ||
617 | |||
618 | /* align dsts to alignment restriction */ | ||
619 | if (align) | ||
620 | thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align); | ||
621 | else | ||
622 | thread->dsts[i] = thread->udsts[i]; | ||
623 | } | ||
624 | thread->dsts[i] = NULL; | ||
625 | 643 | ||
626 | set_user_nice(current, 10); | 644 | set_user_nice(current, 10); |
627 | 645 | ||
628 | srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL); | 646 | srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL); |
629 | if (!srcs) | 647 | if (!srcs) |
630 | goto err_dstbuf; | 648 | goto err_dst; |
631 | 649 | ||
632 | dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL); | 650 | dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL); |
633 | if (!dma_pq) | 651 | if (!dma_pq) |
634 | goto err_srcs_array; | 652 | goto err_srcs_array; |
635 | 653 | ||
@@ -644,21 +662,21 @@ static int dmatest_func(void *data) | |||
644 | struct dma_async_tx_descriptor *tx = NULL; | 662 | struct dma_async_tx_descriptor *tx = NULL; |
645 | struct dmaengine_unmap_data *um; | 663 | struct dmaengine_unmap_data *um; |
646 | dma_addr_t *dsts; | 664 | dma_addr_t *dsts; |
647 | unsigned int src_off, dst_off, len; | 665 | unsigned int len; |
648 | 666 | ||
649 | total_tests++; | 667 | total_tests++; |
650 | 668 | ||
651 | if (params->transfer_size) { | 669 | if (params->transfer_size) { |
652 | if (params->transfer_size >= params->buf_size) { | 670 | if (params->transfer_size >= buf_size) { |
653 | pr_err("%u-byte transfer size must be lower than %u-buffer size\n", | 671 | pr_err("%u-byte transfer size must be lower than %u-buffer size\n", |
654 | params->transfer_size, params->buf_size); | 672 | params->transfer_size, buf_size); |
655 | break; | 673 | break; |
656 | } | 674 | } |
657 | len = params->transfer_size; | 675 | len = params->transfer_size; |
658 | } else if (params->norandom) { | 676 | } else if (params->norandom) { |
659 | len = params->buf_size; | 677 | len = buf_size; |
660 | } else { | 678 | } else { |
661 | len = dmatest_random() % params->buf_size + 1; | 679 | len = dmatest_random() % buf_size + 1; |
662 | } | 680 | } |
663 | 681 | ||
664 | /* Do not alter transfer size explicitly defined by user */ | 682 | /* Do not alter transfer size explicitly defined by user */ |
@@ -670,57 +688,57 @@ static int dmatest_func(void *data) | |||
670 | total_len += len; | 688 | total_len += len; |
671 | 689 | ||
672 | if (params->norandom) { | 690 | if (params->norandom) { |
673 | src_off = 0; | 691 | src->off = 0; |
674 | dst_off = 0; | 692 | dst->off = 0; |
675 | } else { | 693 | } else { |
676 | src_off = dmatest_random() % (params->buf_size - len + 1); | 694 | src->off = dmatest_random() % (buf_size - len + 1); |
677 | dst_off = dmatest_random() % (params->buf_size - len + 1); | 695 | dst->off = dmatest_random() % (buf_size - len + 1); |
678 | 696 | ||
679 | src_off = (src_off >> align) << align; | 697 | src->off = (src->off >> align) << align; |
680 | dst_off = (dst_off >> align) << align; | 698 | dst->off = (dst->off >> align) << align; |
681 | } | 699 | } |
682 | 700 | ||
683 | if (!params->noverify) { | 701 | if (!params->noverify) { |
684 | start = ktime_get(); | 702 | start = ktime_get(); |
685 | dmatest_init_srcs(thread->srcs, src_off, len, | 703 | dmatest_init_srcs(src->aligned, src->off, len, |
686 | params->buf_size, is_memset); | 704 | buf_size, is_memset); |
687 | dmatest_init_dsts(thread->dsts, dst_off, len, | 705 | dmatest_init_dsts(dst->aligned, dst->off, len, |
688 | params->buf_size, is_memset); | 706 | buf_size, is_memset); |
689 | 707 | ||
690 | diff = ktime_sub(ktime_get(), start); | 708 | diff = ktime_sub(ktime_get(), start); |
691 | filltime = ktime_add(filltime, diff); | 709 | filltime = ktime_add(filltime, diff); |
692 | } | 710 | } |
693 | 711 | ||
694 | um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt, | 712 | um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt, |
695 | GFP_KERNEL); | 713 | GFP_KERNEL); |
696 | if (!um) { | 714 | if (!um) { |
697 | failed_tests++; | 715 | failed_tests++; |
698 | result("unmap data NULL", total_tests, | 716 | result("unmap data NULL", total_tests, |
699 | src_off, dst_off, len, ret); | 717 | src->off, dst->off, len, ret); |
700 | continue; | 718 | continue; |
701 | } | 719 | } |
702 | 720 | ||
703 | um->len = params->buf_size; | 721 | um->len = buf_size; |
704 | for (i = 0; i < src_cnt; i++) { | 722 | for (i = 0; i < src->cnt; i++) { |
705 | void *buf = thread->srcs[i]; | 723 | void *buf = src->aligned[i]; |
706 | struct page *pg = virt_to_page(buf); | 724 | struct page *pg = virt_to_page(buf); |
707 | unsigned long pg_off = offset_in_page(buf); | 725 | unsigned long pg_off = offset_in_page(buf); |
708 | 726 | ||
709 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 727 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
710 | um->len, DMA_TO_DEVICE); | 728 | um->len, DMA_TO_DEVICE); |
711 | srcs[i] = um->addr[i] + src_off; | 729 | srcs[i] = um->addr[i] + src->off; |
712 | ret = dma_mapping_error(dev->dev, um->addr[i]); | 730 | ret = dma_mapping_error(dev->dev, um->addr[i]); |
713 | if (ret) { | 731 | if (ret) { |
714 | result("src mapping error", total_tests, | 732 | result("src mapping error", total_tests, |
715 | src_off, dst_off, len, ret); | 733 | src->off, dst->off, len, ret); |
716 | goto error_unmap_continue; | 734 | goto error_unmap_continue; |
717 | } | 735 | } |
718 | um->to_cnt++; | 736 | um->to_cnt++; |
719 | } | 737 | } |
720 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 738 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
721 | dsts = &um->addr[src_cnt]; | 739 | dsts = &um->addr[src->cnt]; |
722 | for (i = 0; i < dst_cnt; i++) { | 740 | for (i = 0; i < dst->cnt; i++) { |
723 | void *buf = thread->dsts[i]; | 741 | void *buf = dst->aligned[i]; |
724 | struct page *pg = virt_to_page(buf); | 742 | struct page *pg = virt_to_page(buf); |
725 | unsigned long pg_off = offset_in_page(buf); | 743 | unsigned long pg_off = offset_in_page(buf); |
726 | 744 | ||
@@ -729,7 +747,7 @@ static int dmatest_func(void *data) | |||
729 | ret = dma_mapping_error(dev->dev, dsts[i]); | 747 | ret = dma_mapping_error(dev->dev, dsts[i]); |
730 | if (ret) { | 748 | if (ret) { |
731 | result("dst mapping error", total_tests, | 749 | result("dst mapping error", total_tests, |
732 | src_off, dst_off, len, ret); | 750 | src->off, dst->off, len, ret); |
733 | goto error_unmap_continue; | 751 | goto error_unmap_continue; |
734 | } | 752 | } |
735 | um->bidi_cnt++; | 753 | um->bidi_cnt++; |
@@ -737,29 +755,29 @@ static int dmatest_func(void *data) | |||
737 | 755 | ||
738 | if (thread->type == DMA_MEMCPY) | 756 | if (thread->type == DMA_MEMCPY) |
739 | tx = dev->device_prep_dma_memcpy(chan, | 757 | tx = dev->device_prep_dma_memcpy(chan, |
740 | dsts[0] + dst_off, | 758 | dsts[0] + dst->off, |
741 | srcs[0], len, flags); | 759 | srcs[0], len, flags); |
742 | else if (thread->type == DMA_MEMSET) | 760 | else if (thread->type == DMA_MEMSET) |
743 | tx = dev->device_prep_dma_memset(chan, | 761 | tx = dev->device_prep_dma_memset(chan, |
744 | dsts[0] + dst_off, | 762 | dsts[0] + dst->off, |
745 | *(thread->srcs[0] + src_off), | 763 | *(src->aligned[0] + src->off), |
746 | len, flags); | 764 | len, flags); |
747 | else if (thread->type == DMA_XOR) | 765 | else if (thread->type == DMA_XOR) |
748 | tx = dev->device_prep_dma_xor(chan, | 766 | tx = dev->device_prep_dma_xor(chan, |
749 | dsts[0] + dst_off, | 767 | dsts[0] + dst->off, |
750 | srcs, src_cnt, | 768 | srcs, src->cnt, |
751 | len, flags); | 769 | len, flags); |
752 | else if (thread->type == DMA_PQ) { | 770 | else if (thread->type == DMA_PQ) { |
753 | for (i = 0; i < dst_cnt; i++) | 771 | for (i = 0; i < dst->cnt; i++) |
754 | dma_pq[i] = dsts[i] + dst_off; | 772 | dma_pq[i] = dsts[i] + dst->off; |
755 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, | 773 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, |
756 | src_cnt, pq_coefs, | 774 | src->cnt, pq_coefs, |
757 | len, flags); | 775 | len, flags); |
758 | } | 776 | } |
759 | 777 | ||
760 | if (!tx) { | 778 | if (!tx) { |
761 | result("prep error", total_tests, src_off, | 779 | result("prep error", total_tests, src->off, |
762 | dst_off, len, ret); | 780 | dst->off, len, ret); |
763 | msleep(100); | 781 | msleep(100); |
764 | goto error_unmap_continue; | 782 | goto error_unmap_continue; |
765 | } | 783 | } |
@@ -770,8 +788,8 @@ static int dmatest_func(void *data) | |||
770 | cookie = tx->tx_submit(tx); | 788 | cookie = tx->tx_submit(tx); |
771 | 789 | ||
772 | if (dma_submit_error(cookie)) { | 790 | if (dma_submit_error(cookie)) { |
773 | result("submit error", total_tests, src_off, | 791 | result("submit error", total_tests, src->off, |
774 | dst_off, len, ret); | 792 | dst->off, len, ret); |
775 | msleep(100); | 793 | msleep(100); |
776 | goto error_unmap_continue; | 794 | goto error_unmap_continue; |
777 | } | 795 | } |
@@ -783,58 +801,58 @@ static int dmatest_func(void *data) | |||
783 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 801 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
784 | 802 | ||
785 | if (!done->done) { | 803 | if (!done->done) { |
786 | result("test timed out", total_tests, src_off, dst_off, | 804 | result("test timed out", total_tests, src->off, dst->off, |
787 | len, 0); | 805 | len, 0); |
788 | goto error_unmap_continue; | 806 | goto error_unmap_continue; |
789 | } else if (status != DMA_COMPLETE) { | 807 | } else if (status != DMA_COMPLETE) { |
790 | result(status == DMA_ERROR ? | 808 | result(status == DMA_ERROR ? |
791 | "completion error status" : | 809 | "completion error status" : |
792 | "completion busy status", total_tests, src_off, | 810 | "completion busy status", total_tests, src->off, |
793 | dst_off, len, ret); | 811 | dst->off, len, ret); |
794 | goto error_unmap_continue; | 812 | goto error_unmap_continue; |
795 | } | 813 | } |
796 | 814 | ||
797 | dmaengine_unmap_put(um); | 815 | dmaengine_unmap_put(um); |
798 | 816 | ||
799 | if (params->noverify) { | 817 | if (params->noverify) { |
800 | verbose_result("test passed", total_tests, src_off, | 818 | verbose_result("test passed", total_tests, src->off, |
801 | dst_off, len, 0); | 819 | dst->off, len, 0); |
802 | continue; | 820 | continue; |
803 | } | 821 | } |
804 | 822 | ||
805 | start = ktime_get(); | 823 | start = ktime_get(); |
806 | pr_debug("%s: verifying source buffer...\n", current->comm); | 824 | pr_debug("%s: verifying source buffer...\n", current->comm); |
807 | error_count = dmatest_verify(thread->srcs, 0, src_off, | 825 | error_count = dmatest_verify(src->aligned, 0, src->off, |
808 | 0, PATTERN_SRC, true, is_memset); | 826 | 0, PATTERN_SRC, true, is_memset); |
809 | error_count += dmatest_verify(thread->srcs, src_off, | 827 | error_count += dmatest_verify(src->aligned, src->off, |
810 | src_off + len, src_off, | 828 | src->off + len, src->off, |
811 | PATTERN_SRC | PATTERN_COPY, true, is_memset); | 829 | PATTERN_SRC | PATTERN_COPY, true, is_memset); |
812 | error_count += dmatest_verify(thread->srcs, src_off + len, | 830 | error_count += dmatest_verify(src->aligned, src->off + len, |
813 | params->buf_size, src_off + len, | 831 | buf_size, src->off + len, |
814 | PATTERN_SRC, true, is_memset); | 832 | PATTERN_SRC, true, is_memset); |
815 | 833 | ||
816 | pr_debug("%s: verifying dest buffer...\n", current->comm); | 834 | pr_debug("%s: verifying dest buffer...\n", current->comm); |
817 | error_count += dmatest_verify(thread->dsts, 0, dst_off, | 835 | error_count += dmatest_verify(dst->aligned, 0, dst->off, |
818 | 0, PATTERN_DST, false, is_memset); | 836 | 0, PATTERN_DST, false, is_memset); |
819 | 837 | ||
820 | error_count += dmatest_verify(thread->dsts, dst_off, | 838 | error_count += dmatest_verify(dst->aligned, dst->off, |
821 | dst_off + len, src_off, | 839 | dst->off + len, src->off, |
822 | PATTERN_SRC | PATTERN_COPY, false, is_memset); | 840 | PATTERN_SRC | PATTERN_COPY, false, is_memset); |
823 | 841 | ||
824 | error_count += dmatest_verify(thread->dsts, dst_off + len, | 842 | error_count += dmatest_verify(dst->aligned, dst->off + len, |
825 | params->buf_size, dst_off + len, | 843 | buf_size, dst->off + len, |
826 | PATTERN_DST, false, is_memset); | 844 | PATTERN_DST, false, is_memset); |
827 | 845 | ||
828 | diff = ktime_sub(ktime_get(), start); | 846 | diff = ktime_sub(ktime_get(), start); |
829 | comparetime = ktime_add(comparetime, diff); | 847 | comparetime = ktime_add(comparetime, diff); |
830 | 848 | ||
831 | if (error_count) { | 849 | if (error_count) { |
832 | result("data error", total_tests, src_off, dst_off, | 850 | result("data error", total_tests, src->off, dst->off, |
833 | len, error_count); | 851 | len, error_count); |
834 | failed_tests++; | 852 | failed_tests++; |
835 | } else { | 853 | } else { |
836 | verbose_result("test passed", total_tests, src_off, | 854 | verbose_result("test passed", total_tests, src->off, |
837 | dst_off, len, 0); | 855 | dst->off, len, 0); |
838 | } | 856 | } |
839 | 857 | ||
840 | continue; | 858 | continue; |
@@ -852,19 +870,10 @@ error_unmap_continue: | |||
852 | kfree(dma_pq); | 870 | kfree(dma_pq); |
853 | err_srcs_array: | 871 | err_srcs_array: |
854 | kfree(srcs); | 872 | kfree(srcs); |
855 | err_dstbuf: | 873 | err_dst: |
856 | for (i = 0; thread->udsts[i]; i++) | 874 | dmatest_free_test_data(dst); |
857 | kfree(thread->udsts[i]); | 875 | err_src: |
858 | kfree(thread->udsts); | 876 | dmatest_free_test_data(src); |
859 | err_udsts: | ||
860 | kfree(thread->dsts); | ||
861 | err_dsts: | ||
862 | err_srcbuf: | ||
863 | for (i = 0; thread->usrcs[i]; i++) | ||
864 | kfree(thread->usrcs[i]); | ||
865 | kfree(thread->usrcs); | ||
866 | err_usrcs: | ||
867 | kfree(thread->srcs); | ||
868 | err_free_coefs: | 877 | err_free_coefs: |
869 | kfree(pq_coefs); | 878 | kfree(pq_coefs); |
870 | err_thread_type: | 879 | err_thread_type: |
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index f8888dc0b8dc..18b6014cf9b4 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h | |||
@@ -75,7 +75,7 @@ struct __packed axi_dma_lli { | |||
75 | __le32 sstat; | 75 | __le32 sstat; |
76 | __le32 dstat; | 76 | __le32 dstat; |
77 | __le32 status_lo; | 77 | __le32 status_lo; |
78 | __le32 ststus_hi; | 78 | __le32 status_hi; |
79 | __le32 reserved_lo; | 79 | __le32 reserved_lo; |
80 | __le32 reserved_hi; | 80 | __le32 reserved_hi; |
81 | }; | 81 | }; |
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index 04b9728c1d26..e5162690de8f 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig | |||
@@ -1,3 +1,5 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
1 | # | 3 | # |
2 | # DMA engine configuration for dw | 4 | # DMA engine configuration for dw |
3 | # | 5 | # |
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile index 2b949c2e4504..63ed895c09aa 100644 --- a/drivers/dma/dw/Makefile +++ b/drivers/dma/dw/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o | 2 | obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o |
3 | dw_dmac_core-objs := core.o | 3 | dw_dmac_core-objs := core.o dw.o idma32.o |
4 | 4 | ||
5 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | 5 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o |
6 | dw_dmac-objs := platform.o | 6 | dw_dmac-objs := platform.o |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index dc053e62f894..21cb2a58dbd2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1,13 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Core driver for the Synopsys DesignWare DMA Controller | 3 | * Core driver for the Synopsys DesignWare DMA Controller |
3 | * | 4 | * |
4 | * Copyright (C) 2007-2008 Atmel Corporation | 5 | * Copyright (C) 2007-2008 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
6 | * Copyright (C) 2013 Intel Corporation | 7 | * Copyright (C) 2013 Intel Corporation |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
@@ -37,27 +34,6 @@ | |||
37 | * support descriptor writeback. | 34 | * support descriptor writeback. |
38 | */ | 35 | */ |
39 | 36 | ||
40 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | ||
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | ||
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | ||
43 | bool _is_slave = is_slave_direction(_dwc->direction); \ | ||
44 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ | ||
45 | DW_DMA_MSIZE_16; \ | ||
46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ | ||
47 | DW_DMA_MSIZE_16; \ | ||
48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ | ||
49 | _dwc->dws.p_master : _dwc->dws.m_master; \ | ||
50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ | ||
51 | _dwc->dws.p_master : _dwc->dws.m_master; \ | ||
52 | \ | ||
53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | ||
54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | ||
55 | | DWC_CTLL_LLP_D_EN \ | ||
56 | | DWC_CTLL_LLP_S_EN \ | ||
57 | | DWC_CTLL_DMS(_dms) \ | ||
58 | | DWC_CTLL_SMS(_sms)); \ | ||
59 | }) | ||
60 | |||
61 | /* The set of bus widths supported by the DMA controller */ | 37 | /* The set of bus widths supported by the DMA controller */ |
62 | #define DW_DMA_BUSWIDTHS \ | 38 | #define DW_DMA_BUSWIDTHS \ |
63 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | 39 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
@@ -138,44 +114,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
138 | dwc->descs_allocated--; | 114 | dwc->descs_allocated--; |
139 | } | 115 | } |
140 | 116 | ||
141 | static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) | ||
142 | { | ||
143 | u32 cfghi = 0; | ||
144 | u32 cfglo = 0; | ||
145 | |||
146 | /* Set default burst alignment */ | ||
147 | cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; | ||
148 | |||
149 | /* Low 4 bits of the request lines */ | ||
150 | cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf); | ||
151 | cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf); | ||
152 | |||
153 | /* Request line extension (2 bits) */ | ||
154 | cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3); | ||
155 | cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3); | ||
156 | |||
157 | channel_writel(dwc, CFG_LO, cfglo); | ||
158 | channel_writel(dwc, CFG_HI, cfghi); | ||
159 | } | ||
160 | |||
161 | static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) | ||
162 | { | ||
163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
164 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
165 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
166 | bool hs_polarity = dwc->dws.hs_polarity; | ||
167 | |||
168 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); | ||
169 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); | ||
170 | cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); | ||
171 | |||
172 | /* Set polarity of handshake interface */ | ||
173 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; | ||
174 | |||
175 | channel_writel(dwc, CFG_LO, cfglo); | ||
176 | channel_writel(dwc, CFG_HI, cfghi); | ||
177 | } | ||
178 | |||
179 | static void dwc_initialize(struct dw_dma_chan *dwc) | 117 | static void dwc_initialize(struct dw_dma_chan *dwc) |
180 | { | 118 | { |
181 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 119 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
@@ -183,10 +121,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
183 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) | 121 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) |
184 | return; | 122 | return; |
185 | 123 | ||
186 | if (dw->pdata->is_idma32) | 124 | dw->initialize_chan(dwc); |
187 | dwc_initialize_chan_idma32(dwc); | ||
188 | else | ||
189 | dwc_initialize_chan_dw(dwc); | ||
190 | 125 | ||
191 | /* Enable interrupts */ | 126 | /* Enable interrupts */ |
192 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 127 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
@@ -215,37 +150,6 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
215 | cpu_relax(); | 150 | cpu_relax(); |
216 | } | 151 | } |
217 | 152 | ||
218 | static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes, | ||
219 | unsigned int width, size_t *len) | ||
220 | { | ||
221 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
222 | u32 block; | ||
223 | |||
224 | /* Always in bytes for iDMA 32-bit */ | ||
225 | if (dw->pdata->is_idma32) | ||
226 | width = 0; | ||
227 | |||
228 | if ((bytes >> width) > dwc->block_size) { | ||
229 | block = dwc->block_size; | ||
230 | *len = block << width; | ||
231 | } else { | ||
232 | block = bytes >> width; | ||
233 | *len = bytes; | ||
234 | } | ||
235 | |||
236 | return block; | ||
237 | } | ||
238 | |||
239 | static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) | ||
240 | { | ||
241 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
242 | |||
243 | if (dw->pdata->is_idma32) | ||
244 | return IDMA32C_CTLH_BLOCK_TS(block); | ||
245 | |||
246 | return DWC_CTLH_BLOCK_TS(block) << width; | ||
247 | } | ||
248 | |||
249 | /*----------------------------------------------------------------------*/ | 153 | /*----------------------------------------------------------------------*/ |
250 | 154 | ||
251 | /* Perform single block transfer */ | 155 | /* Perform single block transfer */ |
@@ -391,10 +295,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
391 | /* Returns how many bytes were already received from source */ | 295 | /* Returns how many bytes were already received from source */ |
392 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | 296 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) |
393 | { | 297 | { |
298 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
394 | u32 ctlhi = channel_readl(dwc, CTL_HI); | 299 | u32 ctlhi = channel_readl(dwc, CTL_HI); |
395 | u32 ctllo = channel_readl(dwc, CTL_LO); | 300 | u32 ctllo = channel_readl(dwc, CTL_LO); |
396 | 301 | ||
397 | return block2bytes(dwc, ctlhi, ctllo >> 4 & 7); | 302 | return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7); |
398 | } | 303 | } |
399 | 304 | ||
400 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 305 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -651,7 +556,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
651 | unsigned int src_width; | 556 | unsigned int src_width; |
652 | unsigned int dst_width; | 557 | unsigned int dst_width; |
653 | unsigned int data_width = dw->pdata->data_width[m_master]; | 558 | unsigned int data_width = dw->pdata->data_width[m_master]; |
654 | u32 ctllo; | 559 | u32 ctllo, ctlhi; |
655 | u8 lms = DWC_LLP_LMS(m_master); | 560 | u8 lms = DWC_LLP_LMS(m_master); |
656 | 561 | ||
657 | dev_vdbg(chan2dev(chan), | 562 | dev_vdbg(chan2dev(chan), |
@@ -667,7 +572,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
667 | 572 | ||
668 | src_width = dst_width = __ffs(data_width | src | dest | len); | 573 | src_width = dst_width = __ffs(data_width | src | dest | len); |
669 | 574 | ||
670 | ctllo = DWC_DEFAULT_CTLLO(chan) | 575 | ctllo = dw->prepare_ctllo(dwc) |
671 | | DWC_CTLL_DST_WIDTH(dst_width) | 576 | | DWC_CTLL_DST_WIDTH(dst_width) |
672 | | DWC_CTLL_SRC_WIDTH(src_width) | 577 | | DWC_CTLL_SRC_WIDTH(src_width) |
673 | | DWC_CTLL_DST_INC | 578 | | DWC_CTLL_DST_INC |
@@ -680,10 +585,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
680 | if (!desc) | 585 | if (!desc) |
681 | goto err_desc_get; | 586 | goto err_desc_get; |
682 | 587 | ||
588 | ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count); | ||
589 | |||
683 | lli_write(desc, sar, src + offset); | 590 | lli_write(desc, sar, src + offset); |
684 | lli_write(desc, dar, dest + offset); | 591 | lli_write(desc, dar, dest + offset); |
685 | lli_write(desc, ctllo, ctllo); | 592 | lli_write(desc, ctllo, ctllo); |
686 | lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count)); | 593 | lli_write(desc, ctlhi, ctlhi); |
687 | desc->len = xfer_count; | 594 | desc->len = xfer_count; |
688 | 595 | ||
689 | if (!first) { | 596 | if (!first) { |
@@ -721,7 +628,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
721 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | 628 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
722 | struct dw_desc *prev; | 629 | struct dw_desc *prev; |
723 | struct dw_desc *first; | 630 | struct dw_desc *first; |
724 | u32 ctllo; | 631 | u32 ctllo, ctlhi; |
725 | u8 m_master = dwc->dws.m_master; | 632 | u8 m_master = dwc->dws.m_master; |
726 | u8 lms = DWC_LLP_LMS(m_master); | 633 | u8 lms = DWC_LLP_LMS(m_master); |
727 | dma_addr_t reg; | 634 | dma_addr_t reg; |
@@ -745,10 +652,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
745 | case DMA_MEM_TO_DEV: | 652 | case DMA_MEM_TO_DEV: |
746 | reg_width = __ffs(sconfig->dst_addr_width); | 653 | reg_width = __ffs(sconfig->dst_addr_width); |
747 | reg = sconfig->dst_addr; | 654 | reg = sconfig->dst_addr; |
748 | ctllo = (DWC_DEFAULT_CTLLO(chan) | 655 | ctllo = dw->prepare_ctllo(dwc) |
749 | | DWC_CTLL_DST_WIDTH(reg_width) | 656 | | DWC_CTLL_DST_WIDTH(reg_width) |
750 | | DWC_CTLL_DST_FIX | 657 | | DWC_CTLL_DST_FIX |
751 | | DWC_CTLL_SRC_INC); | 658 | | DWC_CTLL_SRC_INC; |
752 | 659 | ||
753 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 660 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
754 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 661 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
@@ -768,9 +675,11 @@ slave_sg_todev_fill_desc: | |||
768 | if (!desc) | 675 | if (!desc) |
769 | goto err_desc_get; | 676 | goto err_desc_get; |
770 | 677 | ||
678 | ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen); | ||
679 | |||
771 | lli_write(desc, sar, mem); | 680 | lli_write(desc, sar, mem); |
772 | lli_write(desc, dar, reg); | 681 | lli_write(desc, dar, reg); |
773 | lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen)); | 682 | lli_write(desc, ctlhi, ctlhi); |
774 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); | 683 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); |
775 | desc->len = dlen; | 684 | desc->len = dlen; |
776 | 685 | ||
@@ -793,10 +702,10 @@ slave_sg_todev_fill_desc: | |||
793 | case DMA_DEV_TO_MEM: | 702 | case DMA_DEV_TO_MEM: |
794 | reg_width = __ffs(sconfig->src_addr_width); | 703 | reg_width = __ffs(sconfig->src_addr_width); |
795 | reg = sconfig->src_addr; | 704 | reg = sconfig->src_addr; |
796 | ctllo = (DWC_DEFAULT_CTLLO(chan) | 705 | ctllo = dw->prepare_ctllo(dwc) |
797 | | DWC_CTLL_SRC_WIDTH(reg_width) | 706 | | DWC_CTLL_SRC_WIDTH(reg_width) |
798 | | DWC_CTLL_DST_INC | 707 | | DWC_CTLL_DST_INC |
799 | | DWC_CTLL_SRC_FIX); | 708 | | DWC_CTLL_SRC_FIX; |
800 | 709 | ||
801 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 710 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
802 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 711 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
@@ -814,9 +723,11 @@ slave_sg_fromdev_fill_desc: | |||
814 | if (!desc) | 723 | if (!desc) |
815 | goto err_desc_get; | 724 | goto err_desc_get; |
816 | 725 | ||
726 | ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen); | ||
727 | |||
817 | lli_write(desc, sar, reg); | 728 | lli_write(desc, sar, reg); |
818 | lli_write(desc, dar, mem); | 729 | lli_write(desc, dar, mem); |
819 | lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen)); | 730 | lli_write(desc, ctlhi, ctlhi); |
820 | mem_width = __ffs(data_width | mem | dlen); | 731 | mem_width = __ffs(data_width | mem | dlen); |
821 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); | 732 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); |
822 | desc->len = dlen; | 733 | desc->len = dlen; |
@@ -876,22 +787,12 @@ EXPORT_SYMBOL_GPL(dw_dma_filter); | |||
876 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | 787 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
877 | { | 788 | { |
878 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 789 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
879 | struct dma_slave_config *sc = &dwc->dma_sconfig; | ||
880 | struct dw_dma *dw = to_dw_dma(chan->device); | 790 | struct dw_dma *dw = to_dw_dma(chan->device); |
881 | /* | ||
882 | * Fix sconfig's burst size according to dw_dmac. We need to convert | ||
883 | * them as: | ||
884 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
885 | * | ||
886 | * NOTE: burst size 2 is not supported by DesignWare controller. | ||
887 | * iDMA 32-bit supports it. | ||
888 | */ | ||
889 | u32 s = dw->pdata->is_idma32 ? 1 : 2; | ||
890 | 791 | ||
891 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 792 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
892 | 793 | ||
893 | sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0; | 794 | dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); |
894 | sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0; | 795 | dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); |
895 | 796 | ||
896 | return 0; | 797 | return 0; |
897 | } | 798 | } |
@@ -900,16 +801,9 @@ static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) | |||
900 | { | 801 | { |
901 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 802 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
902 | unsigned int count = 20; /* timeout iterations */ | 803 | unsigned int count = 20; /* timeout iterations */ |
903 | u32 cfglo; | ||
904 | 804 | ||
905 | cfglo = channel_readl(dwc, CFG_LO); | 805 | dw->suspend_chan(dwc, drain); |
906 | if (dw->pdata->is_idma32) { | 806 | |
907 | if (drain) | ||
908 | cfglo |= IDMA32C_CFGL_CH_DRAIN; | ||
909 | else | ||
910 | cfglo &= ~IDMA32C_CFGL_CH_DRAIN; | ||
911 | } | ||
912 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
913 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) | 807 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
914 | udelay(2); | 808 | udelay(2); |
915 | 809 | ||
@@ -928,11 +822,11 @@ static int dwc_pause(struct dma_chan *chan) | |||
928 | return 0; | 822 | return 0; |
929 | } | 823 | } |
930 | 824 | ||
931 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | 825 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain) |
932 | { | 826 | { |
933 | u32 cfglo = channel_readl(dwc, CFG_LO); | 827 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
934 | 828 | ||
935 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | 829 | dw->resume_chan(dwc, drain); |
936 | 830 | ||
937 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); | 831 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
938 | } | 832 | } |
@@ -945,7 +839,7 @@ static int dwc_resume(struct dma_chan *chan) | |||
945 | spin_lock_irqsave(&dwc->lock, flags); | 839 | spin_lock_irqsave(&dwc->lock, flags); |
946 | 840 | ||
947 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) | 841 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
948 | dwc_chan_resume(dwc); | 842 | dwc_chan_resume(dwc, false); |
949 | 843 | ||
950 | spin_unlock_irqrestore(&dwc->lock, flags); | 844 | spin_unlock_irqrestore(&dwc->lock, flags); |
951 | 845 | ||
@@ -968,7 +862,7 @@ static int dwc_terminate_all(struct dma_chan *chan) | |||
968 | 862 | ||
969 | dwc_chan_disable(dw, dwc); | 863 | dwc_chan_disable(dw, dwc); |
970 | 864 | ||
971 | dwc_chan_resume(dwc); | 865 | dwc_chan_resume(dwc, true); |
972 | 866 | ||
973 | /* active_list entries will end up before queued entries */ | 867 | /* active_list entries will end up before queued entries */ |
974 | list_splice_init(&dwc->queue, &list); | 868 | list_splice_init(&dwc->queue, &list); |
@@ -1058,33 +952,7 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
1058 | 952 | ||
1059 | /*----------------------------------------------------------------------*/ | 953 | /*----------------------------------------------------------------------*/ |
1060 | 954 | ||
1061 | /* | 955 | void do_dw_dma_off(struct dw_dma *dw) |
1062 | * Program FIFO size of channels. | ||
1063 | * | ||
1064 | * By default full FIFO (512 bytes) is assigned to channel 0. Here we | ||
1065 | * slice FIFO on equal parts between channels. | ||
1066 | */ | ||
1067 | static void idma32_fifo_partition(struct dw_dma *dw) | ||
1068 | { | ||
1069 | u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | | ||
1070 | IDMA32C_FP_UPDATE; | ||
1071 | u64 fifo_partition = 0; | ||
1072 | |||
1073 | if (!dw->pdata->is_idma32) | ||
1074 | return; | ||
1075 | |||
1076 | /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */ | ||
1077 | fifo_partition |= value << 0; | ||
1078 | |||
1079 | /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ | ||
1080 | fifo_partition |= value << 32; | ||
1081 | |||
1082 | /* Program FIFO Partition registers - 64 bytes per channel */ | ||
1083 | idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); | ||
1084 | idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); | ||
1085 | } | ||
1086 | |||
1087 | static void dw_dma_off(struct dw_dma *dw) | ||
1088 | { | 956 | { |
1089 | unsigned int i; | 957 | unsigned int i; |
1090 | 958 | ||
@@ -1103,7 +971,7 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1103 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); | 971 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); |
1104 | } | 972 | } |
1105 | 973 | ||
1106 | static void dw_dma_on(struct dw_dma *dw) | 974 | void do_dw_dma_on(struct dw_dma *dw) |
1107 | { | 975 | { |
1108 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 976 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1109 | } | 977 | } |
@@ -1139,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1139 | 1007 | ||
1140 | /* Enable controller here if needed */ | 1008 | /* Enable controller here if needed */ |
1141 | if (!dw->in_use) | 1009 | if (!dw->in_use) |
1142 | dw_dma_on(dw); | 1010 | do_dw_dma_on(dw); |
1143 | dw->in_use |= dwc->mask; | 1011 | dw->in_use |= dwc->mask; |
1144 | 1012 | ||
1145 | return 0; | 1013 | return 0; |
@@ -1150,7 +1018,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1150 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1018 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1151 | struct dw_dma *dw = to_dw_dma(chan->device); | 1019 | struct dw_dma *dw = to_dw_dma(chan->device); |
1152 | unsigned long flags; | 1020 | unsigned long flags; |
1153 | LIST_HEAD(list); | ||
1154 | 1021 | ||
1155 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, | 1022 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
1156 | dwc->descs_allocated); | 1023 | dwc->descs_allocated); |
@@ -1177,30 +1044,25 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1177 | /* Disable controller in case it was a last user */ | 1044 | /* Disable controller in case it was a last user */ |
1178 | dw->in_use &= ~dwc->mask; | 1045 | dw->in_use &= ~dwc->mask; |
1179 | if (!dw->in_use) | 1046 | if (!dw->in_use) |
1180 | dw_dma_off(dw); | 1047 | do_dw_dma_off(dw); |
1181 | 1048 | ||
1182 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1049 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1183 | } | 1050 | } |
1184 | 1051 | ||
1185 | int dw_dma_probe(struct dw_dma_chip *chip) | 1052 | int do_dma_probe(struct dw_dma_chip *chip) |
1186 | { | 1053 | { |
1054 | struct dw_dma *dw = chip->dw; | ||
1187 | struct dw_dma_platform_data *pdata; | 1055 | struct dw_dma_platform_data *pdata; |
1188 | struct dw_dma *dw; | ||
1189 | bool autocfg = false; | 1056 | bool autocfg = false; |
1190 | unsigned int dw_params; | 1057 | unsigned int dw_params; |
1191 | unsigned int i; | 1058 | unsigned int i; |
1192 | int err; | 1059 | int err; |
1193 | 1060 | ||
1194 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | ||
1195 | if (!dw) | ||
1196 | return -ENOMEM; | ||
1197 | |||
1198 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); | 1061 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); |
1199 | if (!dw->pdata) | 1062 | if (!dw->pdata) |
1200 | return -ENOMEM; | 1063 | return -ENOMEM; |
1201 | 1064 | ||
1202 | dw->regs = chip->regs; | 1065 | dw->regs = chip->regs; |
1203 | chip->dw = dw; | ||
1204 | 1066 | ||
1205 | pm_runtime_get_sync(chip->dev); | 1067 | pm_runtime_get_sync(chip->dev); |
1206 | 1068 | ||
@@ -1227,8 +1089,6 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
1227 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); | 1089 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
1228 | 1090 | ||
1229 | /* Fill platform data with the default values */ | 1091 | /* Fill platform data with the default values */ |
1230 | pdata->is_private = true; | ||
1231 | pdata->is_memcpy = true; | ||
1232 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 1092 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1233 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 1093 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
1234 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { | 1094 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
@@ -1252,15 +1112,10 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
1252 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1112 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1253 | 1113 | ||
1254 | /* Force dma off, just in case */ | 1114 | /* Force dma off, just in case */ |
1255 | dw_dma_off(dw); | 1115 | dw->disable(dw); |
1256 | |||
1257 | idma32_fifo_partition(dw); | ||
1258 | 1116 | ||
1259 | /* Device and instance ID for IRQ and DMA pool */ | 1117 | /* Device and instance ID for IRQ and DMA pool */ |
1260 | if (pdata->is_idma32) | 1118 | dw->set_device_name(dw, chip->id); |
1261 | snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id); | ||
1262 | else | ||
1263 | snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id); | ||
1264 | 1119 | ||
1265 | /* Create a pool of consistent memory blocks for hardware descriptors */ | 1120 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
1266 | dw->desc_pool = dmam_pool_create(dw->name, chip->dev, | 1121 | dw->desc_pool = dmam_pool_create(dw->name, chip->dev, |
@@ -1340,10 +1195,8 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
1340 | 1195 | ||
1341 | /* Set capabilities */ | 1196 | /* Set capabilities */ |
1342 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1197 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1343 | if (pdata->is_private) | 1198 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); |
1344 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | 1199 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1345 | if (pdata->is_memcpy) | ||
1346 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | ||
1347 | 1200 | ||
1348 | dw->dma.dev = chip->dev; | 1201 | dw->dma.dev = chip->dev; |
1349 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1202 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
@@ -1384,16 +1237,15 @@ err_pdata: | |||
1384 | pm_runtime_put_sync_suspend(chip->dev); | 1237 | pm_runtime_put_sync_suspend(chip->dev); |
1385 | return err; | 1238 | return err; |
1386 | } | 1239 | } |
1387 | EXPORT_SYMBOL_GPL(dw_dma_probe); | ||
1388 | 1240 | ||
1389 | int dw_dma_remove(struct dw_dma_chip *chip) | 1241 | int do_dma_remove(struct dw_dma_chip *chip) |
1390 | { | 1242 | { |
1391 | struct dw_dma *dw = chip->dw; | 1243 | struct dw_dma *dw = chip->dw; |
1392 | struct dw_dma_chan *dwc, *_dwc; | 1244 | struct dw_dma_chan *dwc, *_dwc; |
1393 | 1245 | ||
1394 | pm_runtime_get_sync(chip->dev); | 1246 | pm_runtime_get_sync(chip->dev); |
1395 | 1247 | ||
1396 | dw_dma_off(dw); | 1248 | do_dw_dma_off(dw); |
1397 | dma_async_device_unregister(&dw->dma); | 1249 | dma_async_device_unregister(&dw->dma); |
1398 | 1250 | ||
1399 | free_irq(chip->irq, dw); | 1251 | free_irq(chip->irq, dw); |
@@ -1408,27 +1260,24 @@ int dw_dma_remove(struct dw_dma_chip *chip) | |||
1408 | pm_runtime_put_sync_suspend(chip->dev); | 1260 | pm_runtime_put_sync_suspend(chip->dev); |
1409 | return 0; | 1261 | return 0; |
1410 | } | 1262 | } |
1411 | EXPORT_SYMBOL_GPL(dw_dma_remove); | ||
1412 | 1263 | ||
1413 | int dw_dma_disable(struct dw_dma_chip *chip) | 1264 | int do_dw_dma_disable(struct dw_dma_chip *chip) |
1414 | { | 1265 | { |
1415 | struct dw_dma *dw = chip->dw; | 1266 | struct dw_dma *dw = chip->dw; |
1416 | 1267 | ||
1417 | dw_dma_off(dw); | 1268 | dw->disable(dw); |
1418 | return 0; | 1269 | return 0; |
1419 | } | 1270 | } |
1420 | EXPORT_SYMBOL_GPL(dw_dma_disable); | 1271 | EXPORT_SYMBOL_GPL(do_dw_dma_disable); |
1421 | 1272 | ||
1422 | int dw_dma_enable(struct dw_dma_chip *chip) | 1273 | int do_dw_dma_enable(struct dw_dma_chip *chip) |
1423 | { | 1274 | { |
1424 | struct dw_dma *dw = chip->dw; | 1275 | struct dw_dma *dw = chip->dw; |
1425 | 1276 | ||
1426 | idma32_fifo_partition(dw); | 1277 | dw->enable(dw); |
1427 | |||
1428 | dw_dma_on(dw); | ||
1429 | return 0; | 1278 | return 0; |
1430 | } | 1279 | } |
1431 | EXPORT_SYMBOL_GPL(dw_dma_enable); | 1280 | EXPORT_SYMBOL_GPL(do_dw_dma_enable); |
1432 | 1281 | ||
1433 | MODULE_LICENSE("GPL v2"); | 1282 | MODULE_LICENSE("GPL v2"); |
1434 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); | 1283 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c new file mode 100644 index 000000000000..7a085b3c1854 --- /dev/null +++ b/drivers/dma/dw/dw.c | |||
@@ -0,0 +1,138 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (C) 2007-2008 Atmel Corporation | ||
3 | // Copyright (C) 2010-2011 ST Microelectronics | ||
4 | // Copyright (C) 2013,2018 Intel Corporation | ||
5 | |||
6 | #include <linux/bitops.h> | ||
7 | #include <linux/dmaengine.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/types.h> | ||
11 | |||
12 | #include "internal.h" | ||
13 | |||
14 | static void dw_dma_initialize_chan(struct dw_dma_chan *dwc) | ||
15 | { | ||
16 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
17 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
18 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
19 | bool hs_polarity = dwc->dws.hs_polarity; | ||
20 | |||
21 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); | ||
22 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); | ||
23 | cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); | ||
24 | |||
25 | /* Set polarity of handshake interface */ | ||
26 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; | ||
27 | |||
28 | channel_writel(dwc, CFG_LO, cfglo); | ||
29 | channel_writel(dwc, CFG_HI, cfghi); | ||
30 | } | ||
31 | |||
32 | static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain) | ||
33 | { | ||
34 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
35 | |||
36 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
37 | } | ||
38 | |||
39 | static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain) | ||
40 | { | ||
41 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
42 | |||
43 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
44 | } | ||
45 | |||
46 | static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc, | ||
47 | size_t bytes, unsigned int width, size_t *len) | ||
48 | { | ||
49 | u32 block; | ||
50 | |||
51 | if ((bytes >> width) > dwc->block_size) { | ||
52 | block = dwc->block_size; | ||
53 | *len = dwc->block_size << width; | ||
54 | } else { | ||
55 | block = bytes >> width; | ||
56 | *len = bytes; | ||
57 | } | ||
58 | |||
59 | return block; | ||
60 | } | ||
61 | |||
62 | static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) | ||
63 | { | ||
64 | return DWC_CTLH_BLOCK_TS(block) << width; | ||
65 | } | ||
66 | |||
67 | static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc) | ||
68 | { | ||
69 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
70 | bool is_slave = is_slave_direction(dwc->direction); | ||
71 | u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16; | ||
72 | u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16; | ||
73 | u8 p_master = dwc->dws.p_master; | ||
74 | u8 m_master = dwc->dws.m_master; | ||
75 | u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master; | ||
76 | u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master; | ||
77 | |||
78 | return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN | | ||
79 | DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) | | ||
80 | DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms); | ||
81 | } | ||
82 | |||
83 | static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst) | ||
84 | { | ||
85 | /* | ||
86 | * Fix burst size according to dw_dmac. We need to convert them as: | ||
87 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
88 | */ | ||
89 | *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0; | ||
90 | } | ||
91 | |||
92 | static void dw_dma_set_device_name(struct dw_dma *dw, int id) | ||
93 | { | ||
94 | snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id); | ||
95 | } | ||
96 | |||
97 | static void dw_dma_disable(struct dw_dma *dw) | ||
98 | { | ||
99 | do_dw_dma_off(dw); | ||
100 | } | ||
101 | |||
102 | static void dw_dma_enable(struct dw_dma *dw) | ||
103 | { | ||
104 | do_dw_dma_on(dw); | ||
105 | } | ||
106 | |||
107 | int dw_dma_probe(struct dw_dma_chip *chip) | ||
108 | { | ||
109 | struct dw_dma *dw; | ||
110 | |||
111 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | ||
112 | if (!dw) | ||
113 | return -ENOMEM; | ||
114 | |||
115 | /* Channel operations */ | ||
116 | dw->initialize_chan = dw_dma_initialize_chan; | ||
117 | dw->suspend_chan = dw_dma_suspend_chan; | ||
118 | dw->resume_chan = dw_dma_resume_chan; | ||
119 | dw->prepare_ctllo = dw_dma_prepare_ctllo; | ||
120 | dw->encode_maxburst = dw_dma_encode_maxburst; | ||
121 | dw->bytes2block = dw_dma_bytes2block; | ||
122 | dw->block2bytes = dw_dma_block2bytes; | ||
123 | |||
124 | /* Device operations */ | ||
125 | dw->set_device_name = dw_dma_set_device_name; | ||
126 | dw->disable = dw_dma_disable; | ||
127 | dw->enable = dw_dma_enable; | ||
128 | |||
129 | chip->dw = dw; | ||
130 | return do_dma_probe(chip); | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(dw_dma_probe); | ||
133 | |||
134 | int dw_dma_remove(struct dw_dma_chip *chip) | ||
135 | { | ||
136 | return do_dma_remove(chip); | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(dw_dma_remove); | ||
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c new file mode 100644 index 000000000000..f00657308811 --- /dev/null +++ b/drivers/dma/dw/idma32.c | |||
@@ -0,0 +1,160 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (C) 2013,2018 Intel Corporation | ||
3 | |||
4 | #include <linux/bitops.h> | ||
5 | #include <linux/dmaengine.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/types.h> | ||
9 | |||
10 | #include "internal.h" | ||
11 | |||
12 | static void idma32_initialize_chan(struct dw_dma_chan *dwc) | ||
13 | { | ||
14 | u32 cfghi = 0; | ||
15 | u32 cfglo = 0; | ||
16 | |||
17 | /* Set default burst alignment */ | ||
18 | cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; | ||
19 | |||
20 | /* Low 4 bits of the request lines */ | ||
21 | cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf); | ||
22 | cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf); | ||
23 | |||
24 | /* Request line extension (2 bits) */ | ||
25 | cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3); | ||
26 | cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3); | ||
27 | |||
28 | channel_writel(dwc, CFG_LO, cfglo); | ||
29 | channel_writel(dwc, CFG_HI, cfghi); | ||
30 | } | ||
31 | |||
32 | static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain) | ||
33 | { | ||
34 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
35 | |||
36 | if (drain) | ||
37 | cfglo |= IDMA32C_CFGL_CH_DRAIN; | ||
38 | |||
39 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
40 | } | ||
41 | |||
42 | static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain) | ||
43 | { | ||
44 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
45 | |||
46 | if (drain) | ||
47 | cfglo &= ~IDMA32C_CFGL_CH_DRAIN; | ||
48 | |||
49 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
50 | } | ||
51 | |||
52 | static u32 idma32_bytes2block(struct dw_dma_chan *dwc, | ||
53 | size_t bytes, unsigned int width, size_t *len) | ||
54 | { | ||
55 | u32 block; | ||
56 | |||
57 | if (bytes > dwc->block_size) { | ||
58 | block = dwc->block_size; | ||
59 | *len = dwc->block_size; | ||
60 | } else { | ||
61 | block = bytes; | ||
62 | *len = bytes; | ||
63 | } | ||
64 | |||
65 | return block; | ||
66 | } | ||
67 | |||
68 | static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) | ||
69 | { | ||
70 | return IDMA32C_CTLH_BLOCK_TS(block); | ||
71 | } | ||
72 | |||
73 | static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc) | ||
74 | { | ||
75 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
76 | bool is_slave = is_slave_direction(dwc->direction); | ||
77 | u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8; | ||
78 | u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8; | ||
79 | |||
80 | return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN | | ||
81 | DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize); | ||
82 | } | ||
83 | |||
84 | static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst) | ||
85 | { | ||
86 | *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0; | ||
87 | } | ||
88 | |||
89 | static void idma32_set_device_name(struct dw_dma *dw, int id) | ||
90 | { | ||
91 | snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Program FIFO size of channels. | ||
96 | * | ||
97 | * By default full FIFO (512 bytes) is assigned to channel 0. Here we | ||
98 | * slice FIFO on equal parts between channels. | ||
99 | */ | ||
100 | static void idma32_fifo_partition(struct dw_dma *dw) | ||
101 | { | ||
102 | u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | | ||
103 | IDMA32C_FP_UPDATE; | ||
104 | u64 fifo_partition = 0; | ||
105 | |||
106 | /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */ | ||
107 | fifo_partition |= value << 0; | ||
108 | |||
109 | /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ | ||
110 | fifo_partition |= value << 32; | ||
111 | |||
112 | /* Program FIFO Partition registers - 64 bytes per channel */ | ||
113 | idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); | ||
114 | idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); | ||
115 | } | ||
116 | |||
117 | static void idma32_disable(struct dw_dma *dw) | ||
118 | { | ||
119 | do_dw_dma_off(dw); | ||
120 | idma32_fifo_partition(dw); | ||
121 | } | ||
122 | |||
123 | static void idma32_enable(struct dw_dma *dw) | ||
124 | { | ||
125 | idma32_fifo_partition(dw); | ||
126 | do_dw_dma_on(dw); | ||
127 | } | ||
128 | |||
129 | int idma32_dma_probe(struct dw_dma_chip *chip) | ||
130 | { | ||
131 | struct dw_dma *dw; | ||
132 | |||
133 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | ||
134 | if (!dw) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | /* Channel operations */ | ||
138 | dw->initialize_chan = idma32_initialize_chan; | ||
139 | dw->suspend_chan = idma32_suspend_chan; | ||
140 | dw->resume_chan = idma32_resume_chan; | ||
141 | dw->prepare_ctllo = idma32_prepare_ctllo; | ||
142 | dw->encode_maxburst = idma32_encode_maxburst; | ||
143 | dw->bytes2block = idma32_bytes2block; | ||
144 | dw->block2bytes = idma32_block2bytes; | ||
145 | |||
146 | /* Device operations */ | ||
147 | dw->set_device_name = idma32_set_device_name; | ||
148 | dw->disable = idma32_disable; | ||
149 | dw->enable = idma32_enable; | ||
150 | |||
151 | chip->dw = dw; | ||
152 | return do_dma_probe(chip); | ||
153 | } | ||
154 | EXPORT_SYMBOL_GPL(idma32_dma_probe); | ||
155 | |||
156 | int idma32_dma_remove(struct dw_dma_chip *chip) | ||
157 | { | ||
158 | return do_dma_remove(chip); | ||
159 | } | ||
160 | EXPORT_SYMBOL_GPL(idma32_dma_remove); | ||
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 41439732ff6b..1dd7a4e6dd23 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h | |||
@@ -1,11 +1,8 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller | 3 | * Driver for the Synopsys DesignWare DMA Controller |
3 | * | 4 | * |
4 | * Copyright (C) 2013 Intel Corporation | 5 | * Copyright (C) 2013 Intel Corporation |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #ifndef _DMA_DW_INTERNAL_H | 8 | #ifndef _DMA_DW_INTERNAL_H |
@@ -15,8 +12,14 @@ | |||
15 | 12 | ||
16 | #include "regs.h" | 13 | #include "regs.h" |
17 | 14 | ||
18 | int dw_dma_disable(struct dw_dma_chip *chip); | 15 | int do_dma_probe(struct dw_dma_chip *chip); |
19 | int dw_dma_enable(struct dw_dma_chip *chip); | 16 | int do_dma_remove(struct dw_dma_chip *chip); |
17 | |||
18 | void do_dw_dma_on(struct dw_dma *dw); | ||
19 | void do_dw_dma_off(struct dw_dma *dw); | ||
20 | |||
21 | int do_dw_dma_disable(struct dw_dma_chip *chip); | ||
22 | int do_dw_dma_enable(struct dw_dma_chip *chip); | ||
20 | 23 | ||
21 | extern bool dw_dma_filter(struct dma_chan *chan, void *param); | 24 | extern bool dw_dma_filter(struct dma_chan *chan, void *param); |
22 | 25 | ||
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 7778ed705a1a..e79a75db0852 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -1,12 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * PCI driver for the Synopsys DesignWare DMA Controller | 3 | * PCI driver for the Synopsys DesignWare DMA Controller |
3 | * | 4 | * |
4 | * Copyright (C) 2013 Intel Corporation | 5 | * Copyright (C) 2013 Intel Corporation |
5 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 6 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #include <linux/module.h> | 9 | #include <linux/module.h> |
@@ -15,21 +12,33 @@ | |||
15 | 12 | ||
16 | #include "internal.h" | 13 | #include "internal.h" |
17 | 14 | ||
18 | static struct dw_dma_platform_data mrfld_pdata = { | 15 | struct dw_dma_pci_data { |
16 | const struct dw_dma_platform_data *pdata; | ||
17 | int (*probe)(struct dw_dma_chip *chip); | ||
18 | }; | ||
19 | |||
20 | static const struct dw_dma_pci_data dw_pci_data = { | ||
21 | .probe = dw_dma_probe, | ||
22 | }; | ||
23 | |||
24 | static const struct dw_dma_platform_data idma32_pdata = { | ||
19 | .nr_channels = 8, | 25 | .nr_channels = 8, |
20 | .is_private = true, | ||
21 | .is_memcpy = true, | ||
22 | .is_idma32 = true, | ||
23 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | 26 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, |
24 | .chan_priority = CHAN_PRIORITY_ASCENDING, | 27 | .chan_priority = CHAN_PRIORITY_ASCENDING, |
25 | .block_size = 131071, | 28 | .block_size = 131071, |
26 | .nr_masters = 1, | 29 | .nr_masters = 1, |
27 | .data_width = {4}, | 30 | .data_width = {4}, |
31 | .multi_block = {1, 1, 1, 1, 1, 1, 1, 1}, | ||
32 | }; | ||
33 | |||
34 | static const struct dw_dma_pci_data idma32_pci_data = { | ||
35 | .pdata = &idma32_pdata, | ||
36 | .probe = idma32_dma_probe, | ||
28 | }; | 37 | }; |
29 | 38 | ||
30 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | 39 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) |
31 | { | 40 | { |
32 | const struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | 41 | const struct dw_dma_pci_data *data = (void *)pid->driver_data; |
33 | struct dw_dma_chip *chip; | 42 | struct dw_dma_chip *chip; |
34 | int ret; | 43 | int ret; |
35 | 44 | ||
@@ -62,9 +71,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
62 | chip->id = pdev->devfn; | 71 | chip->id = pdev->devfn; |
63 | chip->regs = pcim_iomap_table(pdev)[0]; | 72 | chip->regs = pcim_iomap_table(pdev)[0]; |
64 | chip->irq = pdev->irq; | 73 | chip->irq = pdev->irq; |
65 | chip->pdata = pdata; | 74 | chip->pdata = data->pdata; |
66 | 75 | ||
67 | ret = dw_dma_probe(chip); | 76 | ret = data->probe(chip); |
68 | if (ret) | 77 | if (ret) |
69 | return ret; | 78 | return ret; |
70 | 79 | ||
@@ -90,7 +99,7 @@ static int dw_pci_suspend_late(struct device *dev) | |||
90 | struct pci_dev *pci = to_pci_dev(dev); | 99 | struct pci_dev *pci = to_pci_dev(dev); |
91 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 100 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
92 | 101 | ||
93 | return dw_dma_disable(chip); | 102 | return do_dw_dma_disable(chip); |
94 | }; | 103 | }; |
95 | 104 | ||
96 | static int dw_pci_resume_early(struct device *dev) | 105 | static int dw_pci_resume_early(struct device *dev) |
@@ -98,7 +107,7 @@ static int dw_pci_resume_early(struct device *dev) | |||
98 | struct pci_dev *pci = to_pci_dev(dev); | 107 | struct pci_dev *pci = to_pci_dev(dev); |
99 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 108 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
100 | 109 | ||
101 | return dw_dma_enable(chip); | 110 | return do_dw_dma_enable(chip); |
102 | }; | 111 | }; |
103 | 112 | ||
104 | #endif /* CONFIG_PM_SLEEP */ | 113 | #endif /* CONFIG_PM_SLEEP */ |
@@ -109,24 +118,24 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = { | |||
109 | 118 | ||
110 | static const struct pci_device_id dw_pci_id_table[] = { | 119 | static const struct pci_device_id dw_pci_id_table[] = { |
111 | /* Medfield (GPDMA) */ | 120 | /* Medfield (GPDMA) */ |
112 | { PCI_VDEVICE(INTEL, 0x0827) }, | 121 | { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data }, |
113 | 122 | ||
114 | /* BayTrail */ | 123 | /* BayTrail */ |
115 | { PCI_VDEVICE(INTEL, 0x0f06) }, | 124 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data }, |
116 | { PCI_VDEVICE(INTEL, 0x0f40) }, | 125 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data }, |
117 | 126 | ||
118 | /* Merrifield iDMA 32-bit (GPDMA) */ | 127 | /* Merrifield */ |
119 | { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata }, | 128 | { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data }, |
120 | 129 | ||
121 | /* Braswell */ | 130 | /* Braswell */ |
122 | { PCI_VDEVICE(INTEL, 0x2286) }, | 131 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, |
123 | { PCI_VDEVICE(INTEL, 0x22c0) }, | 132 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, |
124 | 133 | ||
125 | /* Haswell */ | 134 | /* Haswell */ |
126 | { PCI_VDEVICE(INTEL, 0x9c60) }, | 135 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data }, |
127 | 136 | ||
128 | /* Broadwell */ | 137 | /* Broadwell */ |
129 | { PCI_VDEVICE(INTEL, 0x9ce0) }, | 138 | { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data }, |
130 | 139 | ||
131 | { } | 140 | { } |
132 | }; | 141 | }; |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 31ff8113c3de..382dfd9e9600 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Platform driver for the Synopsys DesignWare DMA Controller | 3 | * Platform driver for the Synopsys DesignWare DMA Controller |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * Copyright (C) 2013 Intel Corporation | 7 | * Copyright (C) 2013 Intel Corporation |
7 | * | 8 | * |
8 | * Some parts of this driver are derived from the original dw_dmac. | 9 | * Some parts of this driver are derived from the original dw_dmac. |
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | 10 | */ |
14 | 11 | ||
15 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -128,15 +125,6 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
128 | pdata->nr_masters = nr_masters; | 125 | pdata->nr_masters = nr_masters; |
129 | pdata->nr_channels = nr_channels; | 126 | pdata->nr_channels = nr_channels; |
130 | 127 | ||
131 | if (of_property_read_bool(np, "is_private")) | ||
132 | pdata->is_private = true; | ||
133 | |||
134 | /* | ||
135 | * All known devices, which use DT for configuration, support | ||
136 | * memory-to-memory transfers. So enable it by default. | ||
137 | */ | ||
138 | pdata->is_memcpy = true; | ||
139 | |||
140 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | 128 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) |
141 | pdata->chan_allocation_order = (unsigned char)tmp; | 129 | pdata->chan_allocation_order = (unsigned char)tmp; |
142 | 130 | ||
@@ -264,7 +252,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
264 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 252 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
265 | 253 | ||
266 | /* | 254 | /* |
267 | * We have to call dw_dma_disable() to stop any ongoing transfer. On | 255 | * We have to call do_dw_dma_disable() to stop any ongoing transfer. On |
268 | * some platforms we can't do that since DMA device is powered off. | 256 | * some platforms we can't do that since DMA device is powered off. |
269 | * Moreover we have no possibility to check if the platform is affected | 257 | * Moreover we have no possibility to check if the platform is affected |
270 | * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() | 258 | * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() |
@@ -273,7 +261,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
273 | * used by the driver. | 261 | * used by the driver. |
274 | */ | 262 | */ |
275 | pm_runtime_get_sync(chip->dev); | 263 | pm_runtime_get_sync(chip->dev); |
276 | dw_dma_disable(chip); | 264 | do_dw_dma_disable(chip); |
277 | pm_runtime_put_sync_suspend(chip->dev); | 265 | pm_runtime_put_sync_suspend(chip->dev); |
278 | 266 | ||
279 | clk_disable_unprepare(chip->clk); | 267 | clk_disable_unprepare(chip->clk); |
@@ -303,7 +291,7 @@ static int dw_suspend_late(struct device *dev) | |||
303 | { | 291 | { |
304 | struct dw_dma_chip *chip = dev_get_drvdata(dev); | 292 | struct dw_dma_chip *chip = dev_get_drvdata(dev); |
305 | 293 | ||
306 | dw_dma_disable(chip); | 294 | do_dw_dma_disable(chip); |
307 | clk_disable_unprepare(chip->clk); | 295 | clk_disable_unprepare(chip->clk); |
308 | 296 | ||
309 | return 0; | 297 | return 0; |
@@ -318,7 +306,7 @@ static int dw_resume_early(struct device *dev) | |||
318 | if (ret) | 306 | if (ret) |
319 | return ret; | 307 | return ret; |
320 | 308 | ||
321 | return dw_dma_enable(chip); | 309 | return do_dw_dma_enable(chip); |
322 | } | 310 | } |
323 | 311 | ||
324 | #endif /* CONFIG_PM_SLEEP */ | 312 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 646c9c960c07..3fce66ecee7a 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Driver for the Synopsys DesignWare AHB DMA Controller | 3 | * Driver for the Synopsys DesignWare AHB DMA Controller |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2007 Atmel Corporation | 5 | * Copyright (C) 2005-2007 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
6 | * Copyright (C) 2016 Intel Corporation | 7 | * Copyright (C) 2016 Intel Corporation |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
@@ -222,6 +219,16 @@ enum dw_dma_msize { | |||
222 | 219 | ||
223 | /* iDMA 32-bit support */ | 220 | /* iDMA 32-bit support */ |
224 | 221 | ||
222 | /* bursts size */ | ||
223 | enum idma32_msize { | ||
224 | IDMA32_MSIZE_1, | ||
225 | IDMA32_MSIZE_2, | ||
226 | IDMA32_MSIZE_4, | ||
227 | IDMA32_MSIZE_8, | ||
228 | IDMA32_MSIZE_16, | ||
229 | IDMA32_MSIZE_32, | ||
230 | }; | ||
231 | |||
225 | /* Bitfields in CTL_HI */ | 232 | /* Bitfields in CTL_HI */ |
226 | #define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0) | 233 | #define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0) |
227 | #define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK) | 234 | #define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK) |
@@ -312,6 +319,21 @@ struct dw_dma { | |||
312 | u8 all_chan_mask; | 319 | u8 all_chan_mask; |
313 | u8 in_use; | 320 | u8 in_use; |
314 | 321 | ||
322 | /* Channel operations */ | ||
323 | void (*initialize_chan)(struct dw_dma_chan *dwc); | ||
324 | void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain); | ||
325 | void (*resume_chan)(struct dw_dma_chan *dwc, bool drain); | ||
326 | u32 (*prepare_ctllo)(struct dw_dma_chan *dwc); | ||
327 | void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst); | ||
328 | u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes, | ||
329 | unsigned int width, size_t *len); | ||
330 | size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width); | ||
331 | |||
332 | /* Device operations */ | ||
333 | void (*set_device_name)(struct dw_dma *dw, int id); | ||
334 | void (*disable)(struct dw_dma *dw); | ||
335 | void (*enable)(struct dw_dma *dw); | ||
336 | |||
315 | /* platform data */ | 337 | /* platform data */ |
316 | struct dw_dma_platform_data *pdata; | 338 | struct dw_dma_platform_data *pdata; |
317 | }; | 339 | }; |
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 8876c4c1bb2c..680b2a00a953 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/dmapool.h> | 6 | #include <linux/dmapool.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/dma-mapping.h> | ||
9 | 10 | ||
10 | #include "fsl-edma-common.h" | 11 | #include "fsl-edma-common.h" |
11 | 12 | ||
@@ -173,12 +174,62 @@ int fsl_edma_resume(struct dma_chan *chan) | |||
173 | } | 174 | } |
174 | EXPORT_SYMBOL_GPL(fsl_edma_resume); | 175 | EXPORT_SYMBOL_GPL(fsl_edma_resume); |
175 | 176 | ||
177 | static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) | ||
178 | { | ||
179 | if (fsl_chan->dma_dir != DMA_NONE) | ||
180 | dma_unmap_resource(fsl_chan->vchan.chan.device->dev, | ||
181 | fsl_chan->dma_dev_addr, | ||
182 | fsl_chan->dma_dev_size, | ||
183 | fsl_chan->dma_dir, 0); | ||
184 | fsl_chan->dma_dir = DMA_NONE; | ||
185 | } | ||
186 | |||
187 | static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, | ||
188 | enum dma_transfer_direction dir) | ||
189 | { | ||
190 | struct device *dev = fsl_chan->vchan.chan.device->dev; | ||
191 | enum dma_data_direction dma_dir; | ||
192 | phys_addr_t addr = 0; | ||
193 | u32 size = 0; | ||
194 | |||
195 | switch (dir) { | ||
196 | case DMA_MEM_TO_DEV: | ||
197 | dma_dir = DMA_FROM_DEVICE; | ||
198 | addr = fsl_chan->cfg.dst_addr; | ||
199 | size = fsl_chan->cfg.dst_maxburst; | ||
200 | break; | ||
201 | case DMA_DEV_TO_MEM: | ||
202 | dma_dir = DMA_TO_DEVICE; | ||
203 | addr = fsl_chan->cfg.src_addr; | ||
204 | size = fsl_chan->cfg.src_maxburst; | ||
205 | break; | ||
206 | default: | ||
207 | dma_dir = DMA_NONE; | ||
208 | break; | ||
209 | } | ||
210 | |||
211 | /* Already mapped for this config? */ | ||
212 | if (fsl_chan->dma_dir == dma_dir) | ||
213 | return true; | ||
214 | |||
215 | fsl_edma_unprep_slave_dma(fsl_chan); | ||
216 | |||
217 | fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); | ||
218 | if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) | ||
219 | return false; | ||
220 | fsl_chan->dma_dev_size = size; | ||
221 | fsl_chan->dma_dir = dma_dir; | ||
222 | |||
223 | return true; | ||
224 | } | ||
225 | |||
176 | int fsl_edma_slave_config(struct dma_chan *chan, | 226 | int fsl_edma_slave_config(struct dma_chan *chan, |
177 | struct dma_slave_config *cfg) | 227 | struct dma_slave_config *cfg) |
178 | { | 228 | { |
179 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | 229 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
180 | 230 | ||
181 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); | 231 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); |
232 | fsl_edma_unprep_slave_dma(fsl_chan); | ||
182 | 233 | ||
183 | return 0; | 234 | return 0; |
184 | } | 235 | } |
@@ -339,9 +390,7 @@ static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, | |||
339 | struct fsl_edma_desc *fsl_desc; | 390 | struct fsl_edma_desc *fsl_desc; |
340 | int i; | 391 | int i; |
341 | 392 | ||
342 | fsl_desc = kzalloc(sizeof(*fsl_desc) + | 393 | fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); |
343 | sizeof(struct fsl_edma_sw_tcd) * | ||
344 | sg_len, GFP_NOWAIT); | ||
345 | if (!fsl_desc) | 394 | if (!fsl_desc) |
346 | return NULL; | 395 | return NULL; |
347 | 396 | ||
@@ -378,6 +427,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |||
378 | if (!is_slave_direction(direction)) | 427 | if (!is_slave_direction(direction)) |
379 | return NULL; | 428 | return NULL; |
380 | 429 | ||
430 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) | ||
431 | return NULL; | ||
432 | |||
381 | sg_len = buf_len / period_len; | 433 | sg_len = buf_len / period_len; |
382 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | 434 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
383 | if (!fsl_desc) | 435 | if (!fsl_desc) |
@@ -409,11 +461,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |||
409 | 461 | ||
410 | if (direction == DMA_MEM_TO_DEV) { | 462 | if (direction == DMA_MEM_TO_DEV) { |
411 | src_addr = dma_buf_next; | 463 | src_addr = dma_buf_next; |
412 | dst_addr = fsl_chan->cfg.dst_addr; | 464 | dst_addr = fsl_chan->dma_dev_addr; |
413 | soff = fsl_chan->cfg.dst_addr_width; | 465 | soff = fsl_chan->cfg.dst_addr_width; |
414 | doff = 0; | 466 | doff = 0; |
415 | } else { | 467 | } else { |
416 | src_addr = fsl_chan->cfg.src_addr; | 468 | src_addr = fsl_chan->dma_dev_addr; |
417 | dst_addr = dma_buf_next; | 469 | dst_addr = dma_buf_next; |
418 | soff = 0; | 470 | soff = 0; |
419 | doff = fsl_chan->cfg.src_addr_width; | 471 | doff = fsl_chan->cfg.src_addr_width; |
@@ -444,6 +496,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |||
444 | if (!is_slave_direction(direction)) | 496 | if (!is_slave_direction(direction)) |
445 | return NULL; | 497 | return NULL; |
446 | 498 | ||
499 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) | ||
500 | return NULL; | ||
501 | |||
447 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | 502 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
448 | if (!fsl_desc) | 503 | if (!fsl_desc) |
449 | return NULL; | 504 | return NULL; |
@@ -468,11 +523,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |||
468 | 523 | ||
469 | if (direction == DMA_MEM_TO_DEV) { | 524 | if (direction == DMA_MEM_TO_DEV) { |
470 | src_addr = sg_dma_address(sg); | 525 | src_addr = sg_dma_address(sg); |
471 | dst_addr = fsl_chan->cfg.dst_addr; | 526 | dst_addr = fsl_chan->dma_dev_addr; |
472 | soff = fsl_chan->cfg.dst_addr_width; | 527 | soff = fsl_chan->cfg.dst_addr_width; |
473 | doff = 0; | 528 | doff = 0; |
474 | } else { | 529 | } else { |
475 | src_addr = fsl_chan->cfg.src_addr; | 530 | src_addr = fsl_chan->dma_dev_addr; |
476 | dst_addr = sg_dma_address(sg); | 531 | dst_addr = sg_dma_address(sg); |
477 | soff = 0; | 532 | soff = 0; |
478 | doff = fsl_chan->cfg.src_addr_width; | 533 | doff = fsl_chan->cfg.src_addr_width; |
@@ -555,6 +610,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) | |||
555 | fsl_edma_chan_mux(fsl_chan, 0, false); | 610 | fsl_edma_chan_mux(fsl_chan, 0, false); |
556 | fsl_chan->edesc = NULL; | 611 | fsl_chan->edesc = NULL; |
557 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | 612 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); |
613 | fsl_edma_unprep_slave_dma(fsl_chan); | ||
558 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | 614 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
559 | 615 | ||
560 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | 616 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); |
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 8917e8865959..b435d8e1e3a1 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #ifndef _FSL_EDMA_COMMON_H_ | 6 | #ifndef _FSL_EDMA_COMMON_H_ |
7 | #define _FSL_EDMA_COMMON_H_ | 7 | #define _FSL_EDMA_COMMON_H_ |
8 | 8 | ||
9 | #include <linux/dma-direction.h> | ||
9 | #include "virt-dma.h" | 10 | #include "virt-dma.h" |
10 | 11 | ||
11 | #define EDMA_CR_EDBG BIT(1) | 12 | #define EDMA_CR_EDBG BIT(1) |
@@ -120,6 +121,9 @@ struct fsl_edma_chan { | |||
120 | struct dma_slave_config cfg; | 121 | struct dma_slave_config cfg; |
121 | u32 attr; | 122 | u32 attr; |
122 | struct dma_pool *tcd_pool; | 123 | struct dma_pool *tcd_pool; |
124 | dma_addr_t dma_dev_addr; | ||
125 | u32 dma_dev_size; | ||
126 | enum dma_data_direction dma_dir; | ||
123 | }; | 127 | }; |
124 | 128 | ||
125 | struct fsl_edma_desc { | 129 | struct fsl_edma_desc { |
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 34d70112fcc9..75e8a7ba3a22 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
@@ -254,6 +254,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
254 | fsl_chan->pm_state = RUNNING; | 254 | fsl_chan->pm_state = RUNNING; |
255 | fsl_chan->slave_id = 0; | 255 | fsl_chan->slave_id = 0; |
256 | fsl_chan->idle = true; | 256 | fsl_chan->idle = true; |
257 | fsl_chan->dma_dir = DMA_NONE; | ||
257 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; | 258 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; |
258 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); | 259 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); |
259 | 260 | ||
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c new file mode 100644 index 000000000000..aa1d0ae3d207 --- /dev/null +++ b/drivers/dma/fsl-qdma.c | |||
@@ -0,0 +1,1259 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright 2014-2015 Freescale | ||
3 | // Copyright 2018 NXP | ||
4 | |||
5 | /* | ||
6 | * Driver for NXP Layerscape Queue Direct Memory Access Controller | ||
7 | * | ||
8 | * Author: | ||
9 | * Wen He <wen.he_1@nxp.com> | ||
10 | * Jiaheng Fan <jiaheng.fan@nxp.com> | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/of_irq.h> | ||
17 | #include <linux/of_platform.h> | ||
18 | #include <linux/of_dma.h> | ||
19 | #include <linux/dma-mapping.h> | ||
20 | |||
21 | #include "virt-dma.h" | ||
22 | #include "fsldma.h" | ||
23 | |||
24 | /* Register related definition */ | ||
25 | #define FSL_QDMA_DMR 0x0 | ||
26 | #define FSL_QDMA_DSR 0x4 | ||
27 | #define FSL_QDMA_DEIER 0xe00 | ||
28 | #define FSL_QDMA_DEDR 0xe04 | ||
29 | #define FSL_QDMA_DECFDW0R 0xe10 | ||
30 | #define FSL_QDMA_DECFDW1R 0xe14 | ||
31 | #define FSL_QDMA_DECFDW2R 0xe18 | ||
32 | #define FSL_QDMA_DECFDW3R 0xe1c | ||
33 | #define FSL_QDMA_DECFQIDR 0xe30 | ||
34 | #define FSL_QDMA_DECBR 0xe34 | ||
35 | |||
36 | #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x)) | ||
37 | #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x)) | ||
38 | #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x)) | ||
39 | #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x)) | ||
40 | #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x)) | ||
41 | #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x)) | ||
42 | #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x)) | ||
43 | #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x)) | ||
44 | |||
45 | #define FSL_QDMA_SQDPAR 0x80c | ||
46 | #define FSL_QDMA_SQEPAR 0x814 | ||
47 | #define FSL_QDMA_BSQMR 0x800 | ||
48 | #define FSL_QDMA_BSQSR 0x804 | ||
49 | #define FSL_QDMA_BSQICR 0x828 | ||
50 | #define FSL_QDMA_CQMR 0xa00 | ||
51 | #define FSL_QDMA_CQDSCR1 0xa08 | ||
52 | #define FSL_QDMA_CQDSCR2 0xa0c | ||
53 | #define FSL_QDMA_CQIER 0xa10 | ||
54 | #define FSL_QDMA_CQEDR 0xa14 | ||
55 | #define FSL_QDMA_SQCCMR 0xa20 | ||
56 | |||
57 | /* Registers for bit and genmask */ | ||
58 | #define FSL_QDMA_CQIDR_SQT BIT(15) | ||
59 | #define QDMA_CCDF_FOTMAT BIT(29) | ||
60 | #define QDMA_CCDF_SER BIT(30) | ||
61 | #define QDMA_SG_FIN BIT(30) | ||
62 | #define QDMA_SG_LEN_MASK GENMASK(29, 0) | ||
63 | #define QDMA_CCDF_MASK GENMASK(28, 20) | ||
64 | |||
65 | #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0) | ||
66 | #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0) | ||
67 | #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0) | ||
68 | |||
69 | #define FSL_QDMA_BCQIER_CQTIE BIT(15) | ||
70 | #define FSL_QDMA_BCQIER_CQPEIE BIT(23) | ||
71 | #define FSL_QDMA_BSQICR_ICEN BIT(31) | ||
72 | |||
73 | #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16) | ||
74 | #define FSL_QDMA_CQIER_MEIE BIT(31) | ||
75 | #define FSL_QDMA_CQIER_TEIE BIT(0) | ||
76 | #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21) | ||
77 | |||
78 | #define FSL_QDMA_BCQMR_EN BIT(31) | ||
79 | #define FSL_QDMA_BCQMR_EI BIT(30) | ||
80 | #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) | ||
81 | #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) | ||
82 | |||
83 | #define FSL_QDMA_BCQSR_QF BIT(16) | ||
84 | #define FSL_QDMA_BCQSR_XOFF BIT(0) | ||
85 | |||
86 | #define FSL_QDMA_BSQMR_EN BIT(31) | ||
87 | #define FSL_QDMA_BSQMR_DI BIT(30) | ||
88 | #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) | ||
89 | |||
90 | #define FSL_QDMA_BSQSR_QE BIT(17) | ||
91 | |||
92 | #define FSL_QDMA_DMR_DQD BIT(30) | ||
93 | #define FSL_QDMA_DSR_DB BIT(31) | ||
94 | |||
95 | /* Size related definition */ | ||
96 | #define FSL_QDMA_QUEUE_MAX 8 | ||
97 | #define FSL_QDMA_COMMAND_BUFFER_SIZE 64 | ||
98 | #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32 | ||
99 | #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64 | ||
100 | #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384 | ||
101 | #define FSL_QDMA_QUEUE_NUM_MAX 8 | ||
102 | |||
103 | /* Field definition for CMD */ | ||
104 | #define FSL_QDMA_CMD_RWTTYPE 0x4 | ||
105 | #define FSL_QDMA_CMD_LWC 0x2 | ||
106 | #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 | ||
107 | #define FSL_QDMA_CMD_NS_OFFSET 27 | ||
108 | #define FSL_QDMA_CMD_DQOS_OFFSET 24 | ||
109 | #define FSL_QDMA_CMD_WTHROTL_OFFSET 20 | ||
110 | #define FSL_QDMA_CMD_DSEN_OFFSET 19 | ||
111 | #define FSL_QDMA_CMD_LWC_OFFSET 16 | ||
112 | |||
113 | /* Field definition for Descriptor offset */ | ||
114 | #define QDMA_CCDF_STATUS 20 | ||
115 | #define QDMA_CCDF_OFFSET 20 | ||
116 | |||
117 | /* Field definition for safe loop count*/ | ||
118 | #define FSL_QDMA_HALT_COUNT 1500 | ||
119 | #define FSL_QDMA_MAX_SIZE 16385 | ||
120 | #define FSL_QDMA_COMP_TIMEOUT 1000 | ||
121 | #define FSL_COMMAND_QUEUE_OVERFLLOW 10 | ||
122 | |||
123 | #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \ | ||
124 | (((fsl_qdma_engine)->block_offset) * (x)) | ||
125 | |||
126 | /** | ||
127 | * struct fsl_qdma_format - This is the struct holding describing compound | ||
128 | * descriptor format with qDMA. | ||
129 | * @status: Command status and enqueue status notification. | ||
130 | * @cfg: Frame offset and frame format. | ||
131 | * @addr_lo: Holding the compound descriptor of the lower | ||
132 | * 32-bits address in memory 40-bit address. | ||
133 | * @addr_hi: Same as above member, but point high 8-bits in | ||
134 | * memory 40-bit address. | ||
135 | * @__reserved1: Reserved field. | ||
136 | * @cfg8b_w1: Compound descriptor command queue origin produced | ||
137 | * by qDMA and dynamic debug field. | ||
138 | * @data Pointer to the memory 40-bit address, describes DMA | ||
139 | * source information and DMA destination information. | ||
140 | */ | ||
141 | struct fsl_qdma_format { | ||
142 | __le32 status; | ||
143 | __le32 cfg; | ||
144 | union { | ||
145 | struct { | ||
146 | __le32 addr_lo; | ||
147 | u8 addr_hi; | ||
148 | u8 __reserved1[2]; | ||
149 | u8 cfg8b_w1; | ||
150 | } __packed; | ||
151 | __le64 data; | ||
152 | }; | ||
153 | } __packed; | ||
154 | |||
155 | /* qDMA status notification pre information */ | ||
156 | struct fsl_pre_status { | ||
157 | u64 addr; | ||
158 | u8 queue; | ||
159 | }; | ||
160 | |||
161 | static DEFINE_PER_CPU(struct fsl_pre_status, pre); | ||
162 | |||
163 | struct fsl_qdma_chan { | ||
164 | struct virt_dma_chan vchan; | ||
165 | struct virt_dma_desc vdesc; | ||
166 | enum dma_status status; | ||
167 | struct fsl_qdma_engine *qdma; | ||
168 | struct fsl_qdma_queue *queue; | ||
169 | }; | ||
170 | |||
171 | struct fsl_qdma_queue { | ||
172 | struct fsl_qdma_format *virt_head; | ||
173 | struct fsl_qdma_format *virt_tail; | ||
174 | struct list_head comp_used; | ||
175 | struct list_head comp_free; | ||
176 | struct dma_pool *comp_pool; | ||
177 | struct dma_pool *desc_pool; | ||
178 | spinlock_t queue_lock; | ||
179 | dma_addr_t bus_addr; | ||
180 | u32 n_cq; | ||
181 | u32 id; | ||
182 | struct fsl_qdma_format *cq; | ||
183 | void __iomem *block_base; | ||
184 | }; | ||
185 | |||
186 | struct fsl_qdma_comp { | ||
187 | dma_addr_t bus_addr; | ||
188 | dma_addr_t desc_bus_addr; | ||
189 | struct fsl_qdma_format *virt_addr; | ||
190 | struct fsl_qdma_format *desc_virt_addr; | ||
191 | struct fsl_qdma_chan *qchan; | ||
192 | struct virt_dma_desc vdesc; | ||
193 | struct list_head list; | ||
194 | }; | ||
195 | |||
196 | struct fsl_qdma_engine { | ||
197 | struct dma_device dma_dev; | ||
198 | void __iomem *ctrl_base; | ||
199 | void __iomem *status_base; | ||
200 | void __iomem *block_base; | ||
201 | u32 n_chans; | ||
202 | u32 n_queues; | ||
203 | struct mutex fsl_qdma_mutex; | ||
204 | int error_irq; | ||
205 | int *queue_irq; | ||
206 | u32 feature; | ||
207 | struct fsl_qdma_queue *queue; | ||
208 | struct fsl_qdma_queue **status; | ||
209 | struct fsl_qdma_chan *chans; | ||
210 | int block_number; | ||
211 | int block_offset; | ||
212 | int irq_base; | ||
213 | int desc_allocated; | ||
214 | |||
215 | }; | ||
216 | |||
217 | static inline u64 | ||
218 | qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf) | ||
219 | { | ||
220 | return le64_to_cpu(ccdf->data) & (U64_MAX >> 24); | ||
221 | } | ||
222 | |||
223 | static inline void | ||
224 | qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr) | ||
225 | { | ||
226 | ccdf->addr_hi = upper_32_bits(addr); | ||
227 | ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr)); | ||
228 | } | ||
229 | |||
230 | static inline u8 | ||
231 | qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf) | ||
232 | { | ||
233 | return ccdf->cfg8b_w1 & U8_MAX; | ||
234 | } | ||
235 | |||
236 | static inline int | ||
237 | qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf) | ||
238 | { | ||
239 | return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET; | ||
240 | } | ||
241 | |||
242 | static inline void | ||
243 | qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset) | ||
244 | { | ||
245 | ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset); | ||
246 | } | ||
247 | |||
248 | static inline int | ||
249 | qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf) | ||
250 | { | ||
251 | return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS; | ||
252 | } | ||
253 | |||
254 | static inline void | ||
255 | qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status) | ||
256 | { | ||
257 | ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status); | ||
258 | } | ||
259 | |||
260 | static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len) | ||
261 | { | ||
262 | csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK); | ||
263 | } | ||
264 | |||
265 | static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len) | ||
266 | { | ||
267 | csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK)); | ||
268 | } | ||
269 | |||
270 | static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr) | ||
271 | { | ||
272 | return FSL_DMA_IN(qdma, addr, 32); | ||
273 | } | ||
274 | |||
275 | static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val, | ||
276 | void __iomem *addr) | ||
277 | { | ||
278 | FSL_DMA_OUT(qdma, addr, val, 32); | ||
279 | } | ||
280 | |||
281 | static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan) | ||
282 | { | ||
283 | return container_of(chan, struct fsl_qdma_chan, vchan.chan); | ||
284 | } | ||
285 | |||
286 | static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) | ||
287 | { | ||
288 | return container_of(vd, struct fsl_qdma_comp, vdesc); | ||
289 | } | ||
290 | |||
291 | static void fsl_qdma_free_chan_resources(struct dma_chan *chan) | ||
292 | { | ||
293 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
294 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
295 | struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; | ||
296 | struct fsl_qdma_comp *comp_temp, *_comp_temp; | ||
297 | unsigned long flags; | ||
298 | LIST_HEAD(head); | ||
299 | |||
300 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
301 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | ||
302 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
303 | |||
304 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | ||
305 | |||
306 | if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) | ||
307 | return; | ||
308 | |||
309 | list_for_each_entry_safe(comp_temp, _comp_temp, | ||
310 | &fsl_queue->comp_used, list) { | ||
311 | dma_pool_free(fsl_queue->comp_pool, | ||
312 | comp_temp->virt_addr, | ||
313 | comp_temp->bus_addr); | ||
314 | dma_pool_free(fsl_queue->desc_pool, | ||
315 | comp_temp->desc_virt_addr, | ||
316 | comp_temp->desc_bus_addr); | ||
317 | list_del(&comp_temp->list); | ||
318 | kfree(comp_temp); | ||
319 | } | ||
320 | |||
321 | list_for_each_entry_safe(comp_temp, _comp_temp, | ||
322 | &fsl_queue->comp_free, list) { | ||
323 | dma_pool_free(fsl_queue->comp_pool, | ||
324 | comp_temp->virt_addr, | ||
325 | comp_temp->bus_addr); | ||
326 | dma_pool_free(fsl_queue->desc_pool, | ||
327 | comp_temp->desc_virt_addr, | ||
328 | comp_temp->desc_bus_addr); | ||
329 | list_del(&comp_temp->list); | ||
330 | kfree(comp_temp); | ||
331 | } | ||
332 | |||
333 | dma_pool_destroy(fsl_queue->comp_pool); | ||
334 | dma_pool_destroy(fsl_queue->desc_pool); | ||
335 | |||
336 | fsl_qdma->desc_allocated--; | ||
337 | fsl_queue->comp_pool = NULL; | ||
338 | fsl_queue->desc_pool = NULL; | ||
339 | } | ||
340 | |||
341 | static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, | ||
342 | dma_addr_t dst, dma_addr_t src, u32 len) | ||
343 | { | ||
344 | struct fsl_qdma_format *sdf, *ddf; | ||
345 | struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; | ||
346 | |||
347 | ccdf = fsl_comp->virt_addr; | ||
348 | csgf_desc = fsl_comp->virt_addr + 1; | ||
349 | csgf_src = fsl_comp->virt_addr + 2; | ||
350 | csgf_dest = fsl_comp->virt_addr + 3; | ||
351 | sdf = fsl_comp->desc_virt_addr; | ||
352 | ddf = fsl_comp->desc_virt_addr + 1; | ||
353 | |||
354 | memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE); | ||
355 | memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE); | ||
356 | /* Head Command Descriptor(Frame Descriptor) */ | ||
357 | qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16); | ||
358 | qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf)); | ||
359 | qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf)); | ||
360 | /* Status notification is enqueued to status queue. */ | ||
361 | /* Compound Command Descriptor(Frame List Table) */ | ||
362 | qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr); | ||
363 | /* It must be 32 as Compound S/G Descriptor */ | ||
364 | qdma_csgf_set_len(csgf_desc, 32); | ||
365 | qdma_desc_addr_set64(csgf_src, src); | ||
366 | qdma_csgf_set_len(csgf_src, len); | ||
367 | qdma_desc_addr_set64(csgf_dest, dst); | ||
368 | qdma_csgf_set_len(csgf_dest, len); | ||
369 | /* This entry is the last entry. */ | ||
370 | qdma_csgf_set_f(csgf_dest, len); | ||
371 | /* Descriptor Buffer */ | ||
372 | sdf->data = | ||
373 | cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << | ||
374 | FSL_QDMA_CMD_RWTTYPE_OFFSET); | ||
375 | ddf->data = | ||
376 | cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << | ||
377 | FSL_QDMA_CMD_RWTTYPE_OFFSET); | ||
378 | ddf->data |= | ||
379 | cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Pre-request full command descriptor for enqueue. | ||
384 | */ | ||
385 | static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue) | ||
386 | { | ||
387 | int i; | ||
388 | struct fsl_qdma_comp *comp_temp, *_comp_temp; | ||
389 | |||
390 | for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) { | ||
391 | comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); | ||
392 | if (!comp_temp) | ||
393 | goto err_alloc; | ||
394 | comp_temp->virt_addr = | ||
395 | dma_pool_alloc(queue->comp_pool, GFP_KERNEL, | ||
396 | &comp_temp->bus_addr); | ||
397 | if (!comp_temp->virt_addr) | ||
398 | goto err_dma_alloc; | ||
399 | |||
400 | comp_temp->desc_virt_addr = | ||
401 | dma_pool_alloc(queue->desc_pool, GFP_KERNEL, | ||
402 | &comp_temp->desc_bus_addr); | ||
403 | if (!comp_temp->desc_virt_addr) | ||
404 | goto err_desc_dma_alloc; | ||
405 | |||
406 | list_add_tail(&comp_temp->list, &queue->comp_free); | ||
407 | } | ||
408 | |||
409 | return 0; | ||
410 | |||
411 | err_desc_dma_alloc: | ||
412 | dma_pool_free(queue->comp_pool, comp_temp->virt_addr, | ||
413 | comp_temp->bus_addr); | ||
414 | |||
415 | err_dma_alloc: | ||
416 | kfree(comp_temp); | ||
417 | |||
418 | err_alloc: | ||
419 | list_for_each_entry_safe(comp_temp, _comp_temp, | ||
420 | &queue->comp_free, list) { | ||
421 | if (comp_temp->virt_addr) | ||
422 | dma_pool_free(queue->comp_pool, | ||
423 | comp_temp->virt_addr, | ||
424 | comp_temp->bus_addr); | ||
425 | if (comp_temp->desc_virt_addr) | ||
426 | dma_pool_free(queue->desc_pool, | ||
427 | comp_temp->desc_virt_addr, | ||
428 | comp_temp->desc_bus_addr); | ||
429 | |||
430 | list_del(&comp_temp->list); | ||
431 | kfree(comp_temp); | ||
432 | } | ||
433 | |||
434 | return -ENOMEM; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Request a command descriptor for enqueue. | ||
439 | */ | ||
440 | static struct fsl_qdma_comp | ||
441 | *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan) | ||
442 | { | ||
443 | unsigned long flags; | ||
444 | struct fsl_qdma_comp *comp_temp; | ||
445 | int timeout = FSL_QDMA_COMP_TIMEOUT; | ||
446 | struct fsl_qdma_queue *queue = fsl_chan->queue; | ||
447 | |||
448 | while (timeout--) { | ||
449 | spin_lock_irqsave(&queue->queue_lock, flags); | ||
450 | if (!list_empty(&queue->comp_free)) { | ||
451 | comp_temp = list_first_entry(&queue->comp_free, | ||
452 | struct fsl_qdma_comp, | ||
453 | list); | ||
454 | list_del(&comp_temp->list); | ||
455 | |||
456 | spin_unlock_irqrestore(&queue->queue_lock, flags); | ||
457 | comp_temp->qchan = fsl_chan; | ||
458 | return comp_temp; | ||
459 | } | ||
460 | spin_unlock_irqrestore(&queue->queue_lock, flags); | ||
461 | udelay(1); | ||
462 | } | ||
463 | |||
464 | return NULL; | ||
465 | } | ||
466 | |||
467 | static struct fsl_qdma_queue | ||
468 | *fsl_qdma_alloc_queue_resources(struct platform_device *pdev, | ||
469 | struct fsl_qdma_engine *fsl_qdma) | ||
470 | { | ||
471 | int ret, len, i, j; | ||
472 | int queue_num, block_number; | ||
473 | unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; | ||
474 | struct fsl_qdma_queue *queue_head, *queue_temp; | ||
475 | |||
476 | queue_num = fsl_qdma->n_queues; | ||
477 | block_number = fsl_qdma->block_number; | ||
478 | |||
479 | if (queue_num > FSL_QDMA_QUEUE_MAX) | ||
480 | queue_num = FSL_QDMA_QUEUE_MAX; | ||
481 | len = sizeof(*queue_head) * queue_num * block_number; | ||
482 | queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
483 | if (!queue_head) | ||
484 | return NULL; | ||
485 | |||
486 | ret = device_property_read_u32_array(&pdev->dev, "queue-sizes", | ||
487 | queue_size, queue_num); | ||
488 | if (ret) { | ||
489 | dev_err(&pdev->dev, "Can't get queue-sizes.\n"); | ||
490 | return NULL; | ||
491 | } | ||
492 | for (j = 0; j < block_number; j++) { | ||
493 | for (i = 0; i < queue_num; i++) { | ||
494 | if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || | ||
495 | queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { | ||
496 | dev_err(&pdev->dev, | ||
497 | "Get wrong queue-sizes.\n"); | ||
498 | return NULL; | ||
499 | } | ||
500 | queue_temp = queue_head + i + (j * queue_num); | ||
501 | |||
502 | queue_temp->cq = | ||
503 | dma_alloc_coherent(&pdev->dev, | ||
504 | sizeof(struct fsl_qdma_format) * | ||
505 | queue_size[i], | ||
506 | &queue_temp->bus_addr, | ||
507 | GFP_KERNEL); | ||
508 | if (!queue_temp->cq) | ||
509 | return NULL; | ||
510 | queue_temp->block_base = fsl_qdma->block_base + | ||
511 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
512 | queue_temp->n_cq = queue_size[i]; | ||
513 | queue_temp->id = i; | ||
514 | queue_temp->virt_head = queue_temp->cq; | ||
515 | queue_temp->virt_tail = queue_temp->cq; | ||
516 | /* | ||
517 | * List for queue command buffer | ||
518 | */ | ||
519 | INIT_LIST_HEAD(&queue_temp->comp_used); | ||
520 | spin_lock_init(&queue_temp->queue_lock); | ||
521 | } | ||
522 | } | ||
523 | return queue_head; | ||
524 | } | ||
525 | |||
526 | static struct fsl_qdma_queue | ||
527 | *fsl_qdma_prep_status_queue(struct platform_device *pdev) | ||
528 | { | ||
529 | int ret; | ||
530 | unsigned int status_size; | ||
531 | struct fsl_qdma_queue *status_head; | ||
532 | struct device_node *np = pdev->dev.of_node; | ||
533 | |||
534 | ret = of_property_read_u32(np, "status-sizes", &status_size); | ||
535 | if (ret) { | ||
536 | dev_err(&pdev->dev, "Can't get status-sizes.\n"); | ||
537 | return NULL; | ||
538 | } | ||
539 | if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || | ||
540 | status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { | ||
541 | dev_err(&pdev->dev, "Get wrong status_size.\n"); | ||
542 | return NULL; | ||
543 | } | ||
544 | status_head = devm_kzalloc(&pdev->dev, | ||
545 | sizeof(*status_head), GFP_KERNEL); | ||
546 | if (!status_head) | ||
547 | return NULL; | ||
548 | |||
549 | /* | ||
550 | * Buffer for queue command | ||
551 | */ | ||
552 | status_head->cq = dma_alloc_coherent(&pdev->dev, | ||
553 | sizeof(struct fsl_qdma_format) * | ||
554 | status_size, | ||
555 | &status_head->bus_addr, | ||
556 | GFP_KERNEL); | ||
557 | if (!status_head->cq) { | ||
558 | devm_kfree(&pdev->dev, status_head); | ||
559 | return NULL; | ||
560 | } | ||
561 | status_head->n_cq = status_size; | ||
562 | status_head->virt_head = status_head->cq; | ||
563 | status_head->virt_tail = status_head->cq; | ||
564 | status_head->comp_pool = NULL; | ||
565 | |||
566 | return status_head; | ||
567 | } | ||
568 | |||
569 | static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) | ||
570 | { | ||
571 | u32 reg; | ||
572 | int i, j, count = FSL_QDMA_HALT_COUNT; | ||
573 | void __iomem *block, *ctrl = fsl_qdma->ctrl_base; | ||
574 | |||
575 | /* Disable the command queue and wait for idle state. */ | ||
576 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); | ||
577 | reg |= FSL_QDMA_DMR_DQD; | ||
578 | qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); | ||
579 | for (j = 0; j < fsl_qdma->block_number; j++) { | ||
580 | block = fsl_qdma->block_base + | ||
581 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
582 | for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++) | ||
583 | qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i)); | ||
584 | } | ||
585 | while (1) { | ||
586 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR); | ||
587 | if (!(reg & FSL_QDMA_DSR_DB)) | ||
588 | break; | ||
589 | if (count-- < 0) | ||
590 | return -EBUSY; | ||
591 | udelay(100); | ||
592 | } | ||
593 | |||
594 | for (j = 0; j < fsl_qdma->block_number; j++) { | ||
595 | block = fsl_qdma->block_base + | ||
596 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
597 | |||
598 | /* Disable status queue. */ | ||
599 | qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR); | ||
600 | |||
601 | /* | ||
602 | * clear the command queue interrupt detect register for | ||
603 | * all queues. | ||
604 | */ | ||
605 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, | ||
606 | block + FSL_QDMA_BCQIDR(0)); | ||
607 | } | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | static int | ||
613 | fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma, | ||
614 | void *block, | ||
615 | int id) | ||
616 | { | ||
617 | bool duplicate; | ||
618 | u32 reg, i, count; | ||
619 | struct fsl_qdma_queue *temp_queue; | ||
620 | struct fsl_qdma_format *status_addr; | ||
621 | struct fsl_qdma_comp *fsl_comp = NULL; | ||
622 | struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; | ||
623 | struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id]; | ||
624 | |||
625 | count = FSL_QDMA_MAX_SIZE; | ||
626 | |||
627 | while (count--) { | ||
628 | duplicate = 0; | ||
629 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR); | ||
630 | if (reg & FSL_QDMA_BSQSR_QE) | ||
631 | return 0; | ||
632 | |||
633 | status_addr = fsl_status->virt_head; | ||
634 | |||
635 | if (qdma_ccdf_get_queue(status_addr) == | ||
636 | __this_cpu_read(pre.queue) && | ||
637 | qdma_ccdf_addr_get64(status_addr) == | ||
638 | __this_cpu_read(pre.addr)) | ||
639 | duplicate = 1; | ||
640 | i = qdma_ccdf_get_queue(status_addr) + | ||
641 | id * fsl_qdma->n_queues; | ||
642 | __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr)); | ||
643 | __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr)); | ||
644 | temp_queue = fsl_queue + i; | ||
645 | |||
646 | spin_lock(&temp_queue->queue_lock); | ||
647 | if (list_empty(&temp_queue->comp_used)) { | ||
648 | if (!duplicate) { | ||
649 | spin_unlock(&temp_queue->queue_lock); | ||
650 | return -EAGAIN; | ||
651 | } | ||
652 | } else { | ||
653 | fsl_comp = list_first_entry(&temp_queue->comp_used, | ||
654 | struct fsl_qdma_comp, list); | ||
655 | if (fsl_comp->bus_addr + 16 != | ||
656 | __this_cpu_read(pre.addr)) { | ||
657 | if (!duplicate) { | ||
658 | spin_unlock(&temp_queue->queue_lock); | ||
659 | return -EAGAIN; | ||
660 | } | ||
661 | } | ||
662 | } | ||
663 | |||
664 | if (duplicate) { | ||
665 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); | ||
666 | reg |= FSL_QDMA_BSQMR_DI; | ||
667 | qdma_desc_addr_set64(status_addr, 0x0); | ||
668 | fsl_status->virt_head++; | ||
669 | if (fsl_status->virt_head == fsl_status->cq | ||
670 | + fsl_status->n_cq) | ||
671 | fsl_status->virt_head = fsl_status->cq; | ||
672 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); | ||
673 | spin_unlock(&temp_queue->queue_lock); | ||
674 | continue; | ||
675 | } | ||
676 | list_del(&fsl_comp->list); | ||
677 | |||
678 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); | ||
679 | reg |= FSL_QDMA_BSQMR_DI; | ||
680 | qdma_desc_addr_set64(status_addr, 0x0); | ||
681 | fsl_status->virt_head++; | ||
682 | if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) | ||
683 | fsl_status->virt_head = fsl_status->cq; | ||
684 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); | ||
685 | spin_unlock(&temp_queue->queue_lock); | ||
686 | |||
687 | spin_lock(&fsl_comp->qchan->vchan.lock); | ||
688 | vchan_cookie_complete(&fsl_comp->vdesc); | ||
689 | fsl_comp->qchan->status = DMA_COMPLETE; | ||
690 | spin_unlock(&fsl_comp->qchan->vchan.lock); | ||
691 | } | ||
692 | |||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) | ||
697 | { | ||
698 | unsigned int intr; | ||
699 | struct fsl_qdma_engine *fsl_qdma = dev_id; | ||
700 | void __iomem *status = fsl_qdma->status_base; | ||
701 | |||
702 | intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); | ||
703 | |||
704 | if (intr) { | ||
705 | dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); | ||
706 | return IRQ_NONE; | ||
707 | } | ||
708 | |||
709 | qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); | ||
710 | return IRQ_HANDLED; | ||
711 | } | ||
712 | |||
713 | static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id) | ||
714 | { | ||
715 | int id; | ||
716 | unsigned int intr, reg; | ||
717 | struct fsl_qdma_engine *fsl_qdma = dev_id; | ||
718 | void __iomem *block, *ctrl = fsl_qdma->ctrl_base; | ||
719 | |||
720 | id = irq - fsl_qdma->irq_base; | ||
721 | if (id < 0 && id > fsl_qdma->block_number) { | ||
722 | dev_err(fsl_qdma->dma_dev.dev, | ||
723 | "irq %d is wrong irq_base is %d\n", | ||
724 | irq, fsl_qdma->irq_base); | ||
725 | } | ||
726 | |||
727 | block = fsl_qdma->block_base + | ||
728 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id); | ||
729 | |||
730 | intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0)); | ||
731 | |||
732 | if ((intr & FSL_QDMA_CQIDR_SQT) != 0) | ||
733 | intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id); | ||
734 | |||
735 | if (intr != 0) { | ||
736 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); | ||
737 | reg |= FSL_QDMA_DMR_DQD; | ||
738 | qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); | ||
739 | qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0)); | ||
740 | dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n"); | ||
741 | } | ||
742 | |||
743 | /* Clear all detected events and interrupts. */ | ||
744 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, | ||
745 | block + FSL_QDMA_BCQIDR(0)); | ||
746 | |||
747 | return IRQ_HANDLED; | ||
748 | } | ||
749 | |||
750 | static int | ||
751 | fsl_qdma_irq_init(struct platform_device *pdev, | ||
752 | struct fsl_qdma_engine *fsl_qdma) | ||
753 | { | ||
754 | int i; | ||
755 | int cpu; | ||
756 | int ret; | ||
757 | char irq_name[20]; | ||
758 | |||
759 | fsl_qdma->error_irq = | ||
760 | platform_get_irq_byname(pdev, "qdma-error"); | ||
761 | if (fsl_qdma->error_irq < 0) { | ||
762 | dev_err(&pdev->dev, "Can't get qdma controller irq.\n"); | ||
763 | return fsl_qdma->error_irq; | ||
764 | } | ||
765 | |||
766 | ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq, | ||
767 | fsl_qdma_error_handler, 0, | ||
768 | "qDMA error", fsl_qdma); | ||
769 | if (ret) { | ||
770 | dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n"); | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
775 | sprintf(irq_name, "qdma-queue%d", i); | ||
776 | fsl_qdma->queue_irq[i] = | ||
777 | platform_get_irq_byname(pdev, irq_name); | ||
778 | |||
779 | if (fsl_qdma->queue_irq[i] < 0) { | ||
780 | dev_err(&pdev->dev, | ||
781 | "Can't get qdma queue %d irq.\n", i); | ||
782 | return fsl_qdma->queue_irq[i]; | ||
783 | } | ||
784 | |||
785 | ret = devm_request_irq(&pdev->dev, | ||
786 | fsl_qdma->queue_irq[i], | ||
787 | fsl_qdma_queue_handler, | ||
788 | 0, | ||
789 | "qDMA queue", | ||
790 | fsl_qdma); | ||
791 | if (ret) { | ||
792 | dev_err(&pdev->dev, | ||
793 | "Can't register qDMA queue IRQ.\n"); | ||
794 | return ret; | ||
795 | } | ||
796 | |||
797 | cpu = i % num_online_cpus(); | ||
798 | ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i], | ||
799 | get_cpu_mask(cpu)); | ||
800 | if (ret) { | ||
801 | dev_err(&pdev->dev, | ||
802 | "Can't set cpu %d affinity to IRQ %d.\n", | ||
803 | cpu, | ||
804 | fsl_qdma->queue_irq[i]); | ||
805 | return ret; | ||
806 | } | ||
807 | } | ||
808 | |||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | static void fsl_qdma_irq_exit(struct platform_device *pdev, | ||
813 | struct fsl_qdma_engine *fsl_qdma) | ||
814 | { | ||
815 | int i; | ||
816 | |||
817 | devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma); | ||
818 | for (i = 0; i < fsl_qdma->block_number; i++) | ||
819 | devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma); | ||
820 | } | ||
821 | |||
822 | static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) | ||
823 | { | ||
824 | u32 reg; | ||
825 | int i, j, ret; | ||
826 | struct fsl_qdma_queue *temp; | ||
827 | void __iomem *status = fsl_qdma->status_base; | ||
828 | void __iomem *block, *ctrl = fsl_qdma->ctrl_base; | ||
829 | struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; | ||
830 | |||
831 | /* Try to halt the qDMA engine first. */ | ||
832 | ret = fsl_qdma_halt(fsl_qdma); | ||
833 | if (ret) { | ||
834 | dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!"); | ||
835 | return ret; | ||
836 | } | ||
837 | |||
838 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
839 | /* | ||
840 | * Clear the command queue interrupt detect register for | ||
841 | * all queues. | ||
842 | */ | ||
843 | |||
844 | block = fsl_qdma->block_base + | ||
845 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i); | ||
846 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, | ||
847 | block + FSL_QDMA_BCQIDR(0)); | ||
848 | } | ||
849 | |||
850 | for (j = 0; j < fsl_qdma->block_number; j++) { | ||
851 | block = fsl_qdma->block_base + | ||
852 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
853 | for (i = 0; i < fsl_qdma->n_queues; i++) { | ||
854 | temp = fsl_queue + i + (j * fsl_qdma->n_queues); | ||
855 | /* | ||
856 | * Initialize Command Queue registers to | ||
857 | * point to the first | ||
858 | * command descriptor in memory. | ||
859 | * Dequeue Pointer Address Registers | ||
860 | * Enqueue Pointer Address Registers | ||
861 | */ | ||
862 | |||
863 | qdma_writel(fsl_qdma, temp->bus_addr, | ||
864 | block + FSL_QDMA_BCQDPA_SADDR(i)); | ||
865 | qdma_writel(fsl_qdma, temp->bus_addr, | ||
866 | block + FSL_QDMA_BCQEPA_SADDR(i)); | ||
867 | |||
868 | /* Initialize the queue mode. */ | ||
869 | reg = FSL_QDMA_BCQMR_EN; | ||
870 | reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4); | ||
871 | reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6); | ||
872 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i)); | ||
873 | } | ||
874 | |||
875 | /* | ||
876 | * Workaround for erratum: ERR010812. | ||
877 | * We must enable XOFF to avoid the enqueue rejection occurs. | ||
878 | * Setting SQCCMR ENTER_WM to 0x20. | ||
879 | */ | ||
880 | |||
881 | qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM, | ||
882 | block + FSL_QDMA_SQCCMR); | ||
883 | |||
884 | /* | ||
885 | * Initialize status queue registers to point to the first | ||
886 | * command descriptor in memory. | ||
887 | * Dequeue Pointer Address Registers | ||
888 | * Enqueue Pointer Address Registers | ||
889 | */ | ||
890 | |||
891 | qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr, | ||
892 | block + FSL_QDMA_SQEPAR); | ||
893 | qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr, | ||
894 | block + FSL_QDMA_SQDPAR); | ||
895 | /* Initialize status queue interrupt. */ | ||
896 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE, | ||
897 | block + FSL_QDMA_BCQIER(0)); | ||
898 | qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | | ||
899 | FSL_QDMA_BSQICR_ICST(5) | 0x8000, | ||
900 | block + FSL_QDMA_BSQICR); | ||
901 | qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | | ||
902 | FSL_QDMA_CQIER_TEIE, | ||
903 | block + FSL_QDMA_CQIER); | ||
904 | |||
905 | /* Initialize the status queue mode. */ | ||
906 | reg = FSL_QDMA_BSQMR_EN; | ||
907 | reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2 | ||
908 | (fsl_qdma->status[j]->n_cq) - 6); | ||
909 | |||
910 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); | ||
911 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); | ||
912 | } | ||
913 | |||
914 | /* Initialize controller interrupt register. */ | ||
915 | qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); | ||
916 | qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER); | ||
917 | |||
918 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); | ||
919 | reg &= ~FSL_QDMA_DMR_DQD; | ||
920 | qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); | ||
921 | |||
922 | return 0; | ||
923 | } | ||
924 | |||
925 | static struct dma_async_tx_descriptor * | ||
926 | fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, | ||
927 | dma_addr_t src, size_t len, unsigned long flags) | ||
928 | { | ||
929 | struct fsl_qdma_comp *fsl_comp; | ||
930 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
931 | |||
932 | fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan); | ||
933 | |||
934 | if (!fsl_comp) | ||
935 | return NULL; | ||
936 | |||
937 | fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len); | ||
938 | |||
939 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); | ||
940 | } | ||
941 | |||
942 | static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan) | ||
943 | { | ||
944 | u32 reg; | ||
945 | struct virt_dma_desc *vdesc; | ||
946 | struct fsl_qdma_comp *fsl_comp; | ||
947 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
948 | void __iomem *block = fsl_queue->block_base; | ||
949 | |||
950 | reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id)); | ||
951 | if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF)) | ||
952 | return; | ||
953 | vdesc = vchan_next_desc(&fsl_chan->vchan); | ||
954 | if (!vdesc) | ||
955 | return; | ||
956 | list_del(&vdesc->node); | ||
957 | fsl_comp = to_fsl_qdma_comp(vdesc); | ||
958 | |||
959 | memcpy(fsl_queue->virt_head++, | ||
960 | fsl_comp->virt_addr, sizeof(struct fsl_qdma_format)); | ||
961 | if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) | ||
962 | fsl_queue->virt_head = fsl_queue->cq; | ||
963 | |||
964 | list_add_tail(&fsl_comp->list, &fsl_queue->comp_used); | ||
965 | barrier(); | ||
966 | reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id)); | ||
967 | reg |= FSL_QDMA_BCQMR_EI; | ||
968 | qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); | ||
969 | fsl_chan->status = DMA_IN_PROGRESS; | ||
970 | } | ||
971 | |||
972 | static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc) | ||
973 | { | ||
974 | unsigned long flags; | ||
975 | struct fsl_qdma_comp *fsl_comp; | ||
976 | struct fsl_qdma_queue *fsl_queue; | ||
977 | |||
978 | fsl_comp = to_fsl_qdma_comp(vdesc); | ||
979 | fsl_queue = fsl_comp->qchan->queue; | ||
980 | |||
981 | spin_lock_irqsave(&fsl_queue->queue_lock, flags); | ||
982 | list_add_tail(&fsl_comp->list, &fsl_queue->comp_free); | ||
983 | spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); | ||
984 | } | ||
985 | |||
986 | static void fsl_qdma_issue_pending(struct dma_chan *chan) | ||
987 | { | ||
988 | unsigned long flags; | ||
989 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
990 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
991 | |||
992 | spin_lock_irqsave(&fsl_queue->queue_lock, flags); | ||
993 | spin_lock(&fsl_chan->vchan.lock); | ||
994 | if (vchan_issue_pending(&fsl_chan->vchan)) | ||
995 | fsl_qdma_enqueue_desc(fsl_chan); | ||
996 | spin_unlock(&fsl_chan->vchan.lock); | ||
997 | spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); | ||
998 | } | ||
999 | |||
1000 | static void fsl_qdma_synchronize(struct dma_chan *chan) | ||
1001 | { | ||
1002 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
1003 | |||
1004 | vchan_synchronize(&fsl_chan->vchan); | ||
1005 | } | ||
1006 | |||
1007 | static int fsl_qdma_terminate_all(struct dma_chan *chan) | ||
1008 | { | ||
1009 | LIST_HEAD(head); | ||
1010 | unsigned long flags; | ||
1011 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
1012 | |||
1013 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
1014 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | ||
1015 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
1016 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | ||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) | ||
1021 | { | ||
1022 | int ret; | ||
1023 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
1024 | struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; | ||
1025 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
1026 | |||
1027 | if (fsl_queue->comp_pool && fsl_queue->desc_pool) | ||
1028 | return fsl_qdma->desc_allocated; | ||
1029 | |||
1030 | INIT_LIST_HEAD(&fsl_queue->comp_free); | ||
1031 | |||
1032 | /* | ||
1033 | * The dma pool for queue command buffer | ||
1034 | */ | ||
1035 | fsl_queue->comp_pool = | ||
1036 | dma_pool_create("comp_pool", | ||
1037 | chan->device->dev, | ||
1038 | FSL_QDMA_COMMAND_BUFFER_SIZE, | ||
1039 | 64, 0); | ||
1040 | if (!fsl_queue->comp_pool) | ||
1041 | return -ENOMEM; | ||
1042 | |||
1043 | /* | ||
1044 | * The dma pool for Descriptor(SD/DD) buffer | ||
1045 | */ | ||
1046 | fsl_queue->desc_pool = | ||
1047 | dma_pool_create("desc_pool", | ||
1048 | chan->device->dev, | ||
1049 | FSL_QDMA_DESCRIPTOR_BUFFER_SIZE, | ||
1050 | 32, 0); | ||
1051 | if (!fsl_queue->desc_pool) | ||
1052 | goto err_desc_pool; | ||
1053 | |||
1054 | ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue); | ||
1055 | if (ret) { | ||
1056 | dev_err(chan->device->dev, | ||
1057 | "failed to alloc dma buffer for S/G descriptor\n"); | ||
1058 | goto err_mem; | ||
1059 | } | ||
1060 | |||
1061 | fsl_qdma->desc_allocated++; | ||
1062 | return fsl_qdma->desc_allocated; | ||
1063 | |||
1064 | err_mem: | ||
1065 | dma_pool_destroy(fsl_queue->desc_pool); | ||
1066 | err_desc_pool: | ||
1067 | dma_pool_destroy(fsl_queue->comp_pool); | ||
1068 | return -ENOMEM; | ||
1069 | } | ||
1070 | |||
1071 | static int fsl_qdma_probe(struct platform_device *pdev) | ||
1072 | { | ||
1073 | int ret, i; | ||
1074 | int blk_num, blk_off; | ||
1075 | u32 len, chans, queues; | ||
1076 | struct resource *res; | ||
1077 | struct fsl_qdma_chan *fsl_chan; | ||
1078 | struct fsl_qdma_engine *fsl_qdma; | ||
1079 | struct device_node *np = pdev->dev.of_node; | ||
1080 | |||
1081 | ret = of_property_read_u32(np, "dma-channels", &chans); | ||
1082 | if (ret) { | ||
1083 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | ||
1084 | return ret; | ||
1085 | } | ||
1086 | |||
1087 | ret = of_property_read_u32(np, "block-offset", &blk_off); | ||
1088 | if (ret) { | ||
1089 | dev_err(&pdev->dev, "Can't get block-offset.\n"); | ||
1090 | return ret; | ||
1091 | } | ||
1092 | |||
1093 | ret = of_property_read_u32(np, "block-number", &blk_num); | ||
1094 | if (ret) { | ||
1095 | dev_err(&pdev->dev, "Can't get block-number.\n"); | ||
1096 | return ret; | ||
1097 | } | ||
1098 | |||
1099 | blk_num = min_t(int, blk_num, num_online_cpus()); | ||
1100 | |||
1101 | len = sizeof(*fsl_qdma); | ||
1102 | fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
1103 | if (!fsl_qdma) | ||
1104 | return -ENOMEM; | ||
1105 | |||
1106 | len = sizeof(*fsl_chan) * chans; | ||
1107 | fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
1108 | if (!fsl_qdma->chans) | ||
1109 | return -ENOMEM; | ||
1110 | |||
1111 | len = sizeof(struct fsl_qdma_queue *) * blk_num; | ||
1112 | fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
1113 | if (!fsl_qdma->status) | ||
1114 | return -ENOMEM; | ||
1115 | |||
1116 | len = sizeof(int) * blk_num; | ||
1117 | fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
1118 | if (!fsl_qdma->queue_irq) | ||
1119 | return -ENOMEM; | ||
1120 | |||
1121 | ret = of_property_read_u32(np, "fsl,dma-queues", &queues); | ||
1122 | if (ret) { | ||
1123 | dev_err(&pdev->dev, "Can't get queues.\n"); | ||
1124 | return ret; | ||
1125 | } | ||
1126 | |||
1127 | fsl_qdma->desc_allocated = 0; | ||
1128 | fsl_qdma->n_chans = chans; | ||
1129 | fsl_qdma->n_queues = queues; | ||
1130 | fsl_qdma->block_number = blk_num; | ||
1131 | fsl_qdma->block_offset = blk_off; | ||
1132 | |||
1133 | mutex_init(&fsl_qdma->fsl_qdma_mutex); | ||
1134 | |||
1135 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
1136 | fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev); | ||
1137 | if (!fsl_qdma->status[i]) | ||
1138 | return -ENOMEM; | ||
1139 | } | ||
1140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1141 | fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); | ||
1142 | if (IS_ERR(fsl_qdma->ctrl_base)) | ||
1143 | return PTR_ERR(fsl_qdma->ctrl_base); | ||
1144 | |||
1145 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1146 | fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); | ||
1147 | if (IS_ERR(fsl_qdma->status_base)) | ||
1148 | return PTR_ERR(fsl_qdma->status_base); | ||
1149 | |||
1150 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
1151 | fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); | ||
1152 | if (IS_ERR(fsl_qdma->block_base)) | ||
1153 | return PTR_ERR(fsl_qdma->block_base); | ||
1154 | fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma); | ||
1155 | if (!fsl_qdma->queue) | ||
1156 | return -ENOMEM; | ||
1157 | |||
1158 | ret = fsl_qdma_irq_init(pdev, fsl_qdma); | ||
1159 | if (ret) | ||
1160 | return ret; | ||
1161 | |||
1162 | fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); | ||
1163 | fsl_qdma->feature = of_property_read_bool(np, "big-endian"); | ||
1164 | INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); | ||
1165 | |||
1166 | for (i = 0; i < fsl_qdma->n_chans; i++) { | ||
1167 | struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; | ||
1168 | |||
1169 | fsl_chan->qdma = fsl_qdma; | ||
1170 | fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues * | ||
1171 | fsl_qdma->block_number); | ||
1172 | fsl_chan->vchan.desc_free = fsl_qdma_free_desc; | ||
1173 | vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev); | ||
1174 | } | ||
1175 | |||
1176 | dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask); | ||
1177 | |||
1178 | fsl_qdma->dma_dev.dev = &pdev->dev; | ||
1179 | fsl_qdma->dma_dev.device_free_chan_resources = | ||
1180 | fsl_qdma_free_chan_resources; | ||
1181 | fsl_qdma->dma_dev.device_alloc_chan_resources = | ||
1182 | fsl_qdma_alloc_chan_resources; | ||
1183 | fsl_qdma->dma_dev.device_tx_status = dma_cookie_status; | ||
1184 | fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy; | ||
1185 | fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending; | ||
1186 | fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize; | ||
1187 | fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all; | ||
1188 | |||
1189 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); | ||
1190 | |||
1191 | platform_set_drvdata(pdev, fsl_qdma); | ||
1192 | |||
1193 | ret = dma_async_device_register(&fsl_qdma->dma_dev); | ||
1194 | if (ret) { | ||
1195 | dev_err(&pdev->dev, | ||
1196 | "Can't register NXP Layerscape qDMA engine.\n"); | ||
1197 | return ret; | ||
1198 | } | ||
1199 | |||
1200 | ret = fsl_qdma_reg_init(fsl_qdma); | ||
1201 | if (ret) { | ||
1202 | dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); | ||
1203 | return ret; | ||
1204 | } | ||
1205 | |||
1206 | return 0; | ||
1207 | } | ||
1208 | |||
1209 | static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev) | ||
1210 | { | ||
1211 | struct fsl_qdma_chan *chan, *_chan; | ||
1212 | |||
1213 | list_for_each_entry_safe(chan, _chan, | ||
1214 | &dmadev->channels, vchan.chan.device_node) { | ||
1215 | list_del(&chan->vchan.chan.device_node); | ||
1216 | tasklet_kill(&chan->vchan.task); | ||
1217 | } | ||
1218 | } | ||
1219 | |||
1220 | static int fsl_qdma_remove(struct platform_device *pdev) | ||
1221 | { | ||
1222 | int i; | ||
1223 | struct fsl_qdma_queue *status; | ||
1224 | struct device_node *np = pdev->dev.of_node; | ||
1225 | struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); | ||
1226 | |||
1227 | fsl_qdma_irq_exit(pdev, fsl_qdma); | ||
1228 | fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev); | ||
1229 | of_dma_controller_free(np); | ||
1230 | dma_async_device_unregister(&fsl_qdma->dma_dev); | ||
1231 | |||
1232 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
1233 | status = fsl_qdma->status[i]; | ||
1234 | dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) * | ||
1235 | status->n_cq, status->cq, status->bus_addr); | ||
1236 | } | ||
1237 | return 0; | ||
1238 | } | ||
1239 | |||
1240 | static const struct of_device_id fsl_qdma_dt_ids[] = { | ||
1241 | { .compatible = "fsl,ls1021a-qdma", }, | ||
1242 | { /* sentinel */ } | ||
1243 | }; | ||
1244 | MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids); | ||
1245 | |||
1246 | static struct platform_driver fsl_qdma_driver = { | ||
1247 | .driver = { | ||
1248 | .name = "fsl-qdma", | ||
1249 | .of_match_table = fsl_qdma_dt_ids, | ||
1250 | }, | ||
1251 | .probe = fsl_qdma_probe, | ||
1252 | .remove = fsl_qdma_remove, | ||
1253 | }; | ||
1254 | |||
1255 | module_platform_driver(fsl_qdma_driver); | ||
1256 | |||
1257 | MODULE_ALIAS("platform:fsl-qdma"); | ||
1258 | MODULE_LICENSE("GPL v2"); | ||
1259 | MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver"); | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 9d360a3fbae3..1e38e6b94006 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -53,42 +53,42 @@ static const char msg_ld_oom[] = "No free memory for link descriptor"; | |||
53 | 53 | ||
54 | static void set_sr(struct fsldma_chan *chan, u32 val) | 54 | static void set_sr(struct fsldma_chan *chan, u32 val) |
55 | { | 55 | { |
56 | DMA_OUT(chan, &chan->regs->sr, val, 32); | 56 | FSL_DMA_OUT(chan, &chan->regs->sr, val, 32); |
57 | } | 57 | } |
58 | 58 | ||
59 | static u32 get_sr(struct fsldma_chan *chan) | 59 | static u32 get_sr(struct fsldma_chan *chan) |
60 | { | 60 | { |
61 | return DMA_IN(chan, &chan->regs->sr, 32); | 61 | return FSL_DMA_IN(chan, &chan->regs->sr, 32); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void set_mr(struct fsldma_chan *chan, u32 val) | 64 | static void set_mr(struct fsldma_chan *chan, u32 val) |
65 | { | 65 | { |
66 | DMA_OUT(chan, &chan->regs->mr, val, 32); | 66 | FSL_DMA_OUT(chan, &chan->regs->mr, val, 32); |
67 | } | 67 | } |
68 | 68 | ||
69 | static u32 get_mr(struct fsldma_chan *chan) | 69 | static u32 get_mr(struct fsldma_chan *chan) |
70 | { | 70 | { |
71 | return DMA_IN(chan, &chan->regs->mr, 32); | 71 | return FSL_DMA_IN(chan, &chan->regs->mr, 32); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 74 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) |
75 | { | 75 | { |
76 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 76 | FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); |
77 | } | 77 | } |
78 | 78 | ||
79 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | 79 | static dma_addr_t get_cdar(struct fsldma_chan *chan) |
80 | { | 80 | { |
81 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | 81 | return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
82 | } | 82 | } |
83 | 83 | ||
84 | static void set_bcr(struct fsldma_chan *chan, u32 val) | 84 | static void set_bcr(struct fsldma_chan *chan, u32 val) |
85 | { | 85 | { |
86 | DMA_OUT(chan, &chan->regs->bcr, val, 32); | 86 | FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32); |
87 | } | 87 | } |
88 | 88 | ||
89 | static u32 get_bcr(struct fsldma_chan *chan) | 89 | static u32 get_bcr(struct fsldma_chan *chan) |
90 | { | 90 | { |
91 | return DMA_IN(chan, &chan->regs->bcr, 32); | 91 | return FSL_DMA_IN(chan, &chan->regs->bcr, 32); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* | 94 | /* |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 4787d485dd76..a9b12f82b5c3 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -196,39 +196,67 @@ struct fsldma_chan { | |||
196 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | 196 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) |
197 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | 197 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) |
198 | 198 | ||
199 | #ifndef __powerpc64__ | 199 | #ifdef CONFIG_PPC |
200 | static u64 in_be64(const u64 __iomem *addr) | 200 | #define fsl_ioread32(p) in_le32(p) |
201 | #define fsl_ioread32be(p) in_be32(p) | ||
202 | #define fsl_iowrite32(v, p) out_le32(p, v) | ||
203 | #define fsl_iowrite32be(v, p) out_be32(p, v) | ||
204 | |||
205 | #ifdef __powerpc64__ | ||
206 | #define fsl_ioread64(p) in_le64(p) | ||
207 | #define fsl_ioread64be(p) in_be64(p) | ||
208 | #define fsl_iowrite64(v, p) out_le64(p, v) | ||
209 | #define fsl_iowrite64be(v, p) out_be64(p, v) | ||
210 | #else | ||
211 | static u64 fsl_ioread64(const u64 __iomem *addr) | ||
201 | { | 212 | { |
202 | return ((u64)in_be32((u32 __iomem *)addr) << 32) | | 213 | u32 fsl_addr = lower_32_bits(addr); |
203 | (in_be32((u32 __iomem *)addr + 1)); | 214 | u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32; |
215 | |||
216 | return fsl_addr_hi | in_le32((u32 *)fsl_addr); | ||
204 | } | 217 | } |
205 | 218 | ||
206 | static void out_be64(u64 __iomem *addr, u64 val) | 219 | static void fsl_iowrite64(u64 val, u64 __iomem *addr) |
207 | { | 220 | { |
208 | out_be32((u32 __iomem *)addr, val >> 32); | 221 | out_le32((u32 __iomem *)addr + 1, val >> 32); |
209 | out_be32((u32 __iomem *)addr + 1, (u32)val); | 222 | out_le32((u32 __iomem *)addr, (u32)val); |
210 | } | 223 | } |
211 | 224 | ||
212 | /* There is no asm instructions for 64 bits reverse loads and stores */ | 225 | static u64 fsl_ioread64be(const u64 __iomem *addr) |
213 | static u64 in_le64(const u64 __iomem *addr) | ||
214 | { | 226 | { |
215 | return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) | | 227 | u32 fsl_addr = lower_32_bits(addr); |
216 | (in_le32((u32 __iomem *)addr)); | 228 | u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32; |
229 | |||
230 | return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1)); | ||
217 | } | 231 | } |
218 | 232 | ||
219 | static void out_le64(u64 __iomem *addr, u64 val) | 233 | static void fsl_iowrite64be(u64 val, u64 __iomem *addr) |
220 | { | 234 | { |
221 | out_le32((u32 __iomem *)addr + 1, val >> 32); | 235 | out_be32((u32 __iomem *)addr, val >> 32); |
222 | out_le32((u32 __iomem *)addr, (u32)val); | 236 | out_be32((u32 __iomem *)addr + 1, (u32)val); |
223 | } | 237 | } |
224 | #endif | 238 | #endif |
239 | #endif | ||
225 | 240 | ||
226 | #define DMA_IN(fsl_chan, addr, width) \ | 241 | #if defined(CONFIG_ARM64) || defined(CONFIG_ARM) |
227 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | 242 | #define fsl_ioread32(p) ioread32(p) |
228 | in_be##width(addr) : in_le##width(addr)) | 243 | #define fsl_ioread32be(p) ioread32be(p) |
229 | #define DMA_OUT(fsl_chan, addr, val, width) \ | 244 | #define fsl_iowrite32(v, p) iowrite32(v, p) |
230 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | 245 | #define fsl_iowrite32be(v, p) iowrite32be(v, p) |
231 | out_be##width(addr, val) : out_le##width(addr, val)) | 246 | #define fsl_ioread64(p) ioread64(p) |
247 | #define fsl_ioread64be(p) ioread64be(p) | ||
248 | #define fsl_iowrite64(v, p) iowrite64(v, p) | ||
249 | #define fsl_iowrite64be(v, p) iowrite64be(v, p) | ||
250 | #endif | ||
251 | |||
252 | #define FSL_DMA_IN(fsl_dma, addr, width) \ | ||
253 | (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
254 | fsl_ioread##width##be(addr) : fsl_ioread##width(addr)) | ||
255 | |||
256 | #define FSL_DMA_OUT(fsl_dma, addr, val, width) \ | ||
257 | (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
258 | fsl_iowrite##width##be(val, addr) : fsl_iowrite \ | ||
259 | ##width(val, addr)) | ||
232 | 260 | ||
233 | #define DMA_TO_CPU(fsl_chan, d, width) \ | 261 | #define DMA_TO_CPU(fsl_chan, d, width) \ |
234 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | 262 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 4a09af3cd546..00a089e24150 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -278,14 +278,14 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac) | |||
278 | /* | 278 | /* |
279 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | 279 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation |
280 | */ | 280 | */ |
281 | static inline int imxdma_sg_next(struct imxdma_desc *d) | 281 | static inline void imxdma_sg_next(struct imxdma_desc *d) |
282 | { | 282 | { |
283 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | 283 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
284 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 284 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
285 | struct scatterlist *sg = d->sg; | 285 | struct scatterlist *sg = d->sg; |
286 | unsigned long now; | 286 | size_t now; |
287 | 287 | ||
288 | now = min(d->len, sg_dma_len(sg)); | 288 | now = min_t(size_t, d->len, sg_dma_len(sg)); |
289 | if (d->len != IMX_DMA_LENGTH_LOOP) | 289 | if (d->len != IMX_DMA_LENGTH_LOOP) |
290 | d->len -= now; | 290 | d->len -= now; |
291 | 291 | ||
@@ -303,8 +303,6 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) | |||
303 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), | 303 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), |
304 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), | 304 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), |
305 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); | 305 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); |
306 | |||
307 | return now; | ||
308 | } | 306 | } |
309 | 307 | ||
310 | static void imxdma_enable_hw(struct imxdma_desc *d) | 308 | static void imxdma_enable_hw(struct imxdma_desc *d) |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 86708fb9bda1..5f3c1378b90e 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -377,6 +377,7 @@ struct sdma_channel { | |||
377 | unsigned long watermark_level; | 377 | unsigned long watermark_level; |
378 | u32 shp_addr, per_addr; | 378 | u32 shp_addr, per_addr; |
379 | enum dma_status status; | 379 | enum dma_status status; |
380 | bool context_loaded; | ||
380 | struct imx_dma_data data; | 381 | struct imx_dma_data data; |
381 | struct work_struct terminate_worker; | 382 | struct work_struct terminate_worker; |
382 | }; | 383 | }; |
@@ -440,6 +441,8 @@ struct sdma_engine { | |||
440 | unsigned int irq; | 441 | unsigned int irq; |
441 | dma_addr_t bd0_phys; | 442 | dma_addr_t bd0_phys; |
442 | struct sdma_buffer_descriptor *bd0; | 443 | struct sdma_buffer_descriptor *bd0; |
444 | /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ | ||
445 | bool clk_ratio; | ||
443 | }; | 446 | }; |
444 | 447 | ||
445 | static int sdma_config_write(struct dma_chan *chan, | 448 | static int sdma_config_write(struct dma_chan *chan, |
@@ -662,8 +665,11 @@ static int sdma_run_channel0(struct sdma_engine *sdma) | |||
662 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | 665 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
663 | 666 | ||
664 | /* Set bits of CONFIG register with dynamic context switching */ | 667 | /* Set bits of CONFIG register with dynamic context switching */ |
665 | if (readl(sdma->regs + SDMA_H_CONFIG) == 0) | 668 | reg = readl(sdma->regs + SDMA_H_CONFIG); |
666 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | 669 | if ((reg & SDMA_H_CONFIG_CSM) == 0) { |
670 | reg |= SDMA_H_CONFIG_CSM; | ||
671 | writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG); | ||
672 | } | ||
667 | 673 | ||
668 | return ret; | 674 | return ret; |
669 | } | 675 | } |
@@ -677,7 +683,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
677 | int ret; | 683 | int ret; |
678 | unsigned long flags; | 684 | unsigned long flags; |
679 | 685 | ||
680 | buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); | 686 | buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); |
681 | if (!buf_virt) { | 687 | if (!buf_virt) { |
682 | return -ENOMEM; | 688 | return -ENOMEM; |
683 | } | 689 | } |
@@ -696,7 +702,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
696 | 702 | ||
697 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); | 703 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
698 | 704 | ||
699 | dma_free_coherent(NULL, size, buf_virt, buf_phys); | 705 | dma_free_coherent(sdma->dev, size, buf_virt, buf_phys); |
700 | 706 | ||
701 | return ret; | 707 | return ret; |
702 | } | 708 | } |
@@ -970,6 +976,9 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
970 | int ret; | 976 | int ret; |
971 | unsigned long flags; | 977 | unsigned long flags; |
972 | 978 | ||
979 | if (sdmac->context_loaded) | ||
980 | return 0; | ||
981 | |||
973 | if (sdmac->direction == DMA_DEV_TO_MEM) | 982 | if (sdmac->direction == DMA_DEV_TO_MEM) |
974 | load_address = sdmac->pc_from_device; | 983 | load_address = sdmac->pc_from_device; |
975 | else if (sdmac->direction == DMA_DEV_TO_DEV) | 984 | else if (sdmac->direction == DMA_DEV_TO_DEV) |
@@ -1012,6 +1021,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
1012 | 1021 | ||
1013 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); | 1022 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
1014 | 1023 | ||
1024 | sdmac->context_loaded = true; | ||
1025 | |||
1015 | return ret; | 1026 | return ret; |
1016 | } | 1027 | } |
1017 | 1028 | ||
@@ -1051,6 +1062,7 @@ static void sdma_channel_terminate_work(struct work_struct *work) | |||
1051 | sdmac->desc = NULL; | 1062 | sdmac->desc = NULL; |
1052 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); | 1063 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
1053 | vchan_dma_desc_free_list(&sdmac->vc, &head); | 1064 | vchan_dma_desc_free_list(&sdmac->vc, &head); |
1065 | sdmac->context_loaded = false; | ||
1054 | } | 1066 | } |
1055 | 1067 | ||
1056 | static int sdma_disable_channel_async(struct dma_chan *chan) | 1068 | static int sdma_disable_channel_async(struct dma_chan *chan) |
@@ -1182,8 +1194,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma) | |||
1182 | { | 1194 | { |
1183 | int ret = -EBUSY; | 1195 | int ret = -EBUSY; |
1184 | 1196 | ||
1185 | sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, | 1197 | sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, |
1186 | GFP_NOWAIT); | 1198 | GFP_NOWAIT); |
1187 | if (!sdma->bd0) { | 1199 | if (!sdma->bd0) { |
1188 | ret = -ENOMEM; | 1200 | ret = -ENOMEM; |
1189 | goto out; | 1201 | goto out; |
@@ -1205,8 +1217,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc) | |||
1205 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | 1217 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
1206 | int ret = 0; | 1218 | int ret = 0; |
1207 | 1219 | ||
1208 | desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, | 1220 | desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, |
1209 | GFP_NOWAIT); | 1221 | &desc->bd_phys, GFP_NOWAIT); |
1210 | if (!desc->bd) { | 1222 | if (!desc->bd) { |
1211 | ret = -ENOMEM; | 1223 | ret = -ENOMEM; |
1212 | goto out; | 1224 | goto out; |
@@ -1219,7 +1231,8 @@ static void sdma_free_bd(struct sdma_desc *desc) | |||
1219 | { | 1231 | { |
1220 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | 1232 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
1221 | 1233 | ||
1222 | dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); | 1234 | dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, |
1235 | desc->bd_phys); | ||
1223 | } | 1236 | } |
1224 | 1237 | ||
1225 | static void sdma_desc_free(struct virt_dma_desc *vd) | 1238 | static void sdma_desc_free(struct virt_dma_desc *vd) |
@@ -1839,10 +1852,13 @@ static int sdma_init(struct sdma_engine *sdma) | |||
1839 | if (ret) | 1852 | if (ret) |
1840 | goto disable_clk_ipg; | 1853 | goto disable_clk_ipg; |
1841 | 1854 | ||
1855 | if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)) | ||
1856 | sdma->clk_ratio = 1; | ||
1857 | |||
1842 | /* Be sure SDMA has not started yet */ | 1858 | /* Be sure SDMA has not started yet */ |
1843 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); | 1859 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); |
1844 | 1860 | ||
1845 | sdma->channel_control = dma_alloc_coherent(NULL, | 1861 | sdma->channel_control = dma_alloc_coherent(sdma->dev, |
1846 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | 1862 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
1847 | sizeof(struct sdma_context_data), | 1863 | sizeof(struct sdma_context_data), |
1848 | &ccb_phys, GFP_KERNEL); | 1864 | &ccb_phys, GFP_KERNEL); |
@@ -1879,8 +1895,10 @@ static int sdma_init(struct sdma_engine *sdma) | |||
1879 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); | 1895 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); |
1880 | 1896 | ||
1881 | /* Set bits of CONFIG register but with static context switching */ | 1897 | /* Set bits of CONFIG register but with static context switching */ |
1882 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | 1898 | if (sdma->clk_ratio) |
1883 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); | 1899 | writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG); |
1900 | else | ||
1901 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); | ||
1884 | 1902 | ||
1885 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1903 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1886 | 1904 | ||
@@ -1903,11 +1921,16 @@ disable_clk_ipg: | |||
1903 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) | 1921 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) |
1904 | { | 1922 | { |
1905 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1923 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1924 | struct sdma_engine *sdma = sdmac->sdma; | ||
1906 | struct imx_dma_data *data = fn_param; | 1925 | struct imx_dma_data *data = fn_param; |
1907 | 1926 | ||
1908 | if (!imx_dma_is_general_purpose(chan)) | 1927 | if (!imx_dma_is_general_purpose(chan)) |
1909 | return false; | 1928 | return false; |
1910 | 1929 | ||
1930 | /* return false if it's not the right device */ | ||
1931 | if (sdma->dev->of_node != data->of_node) | ||
1932 | return false; | ||
1933 | |||
1911 | sdmac->data = *data; | 1934 | sdmac->data = *data; |
1912 | chan->private = &sdmac->data; | 1935 | chan->private = &sdmac->data; |
1913 | 1936 | ||
@@ -1935,6 +1958,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, | |||
1935 | * be set to sdmac->event_id1. | 1958 | * be set to sdmac->event_id1. |
1936 | */ | 1959 | */ |
1937 | data.dma_request2 = 0; | 1960 | data.dma_request2 = 0; |
1961 | data.of_node = ofdma->of_node; | ||
1938 | 1962 | ||
1939 | return dma_request_channel(mask, sdma_filter_fn, &data); | 1963 | return dma_request_channel(mask, sdma_filter_fn, &data); |
1940 | } | 1964 | } |
@@ -2097,6 +2121,7 @@ static int sdma_probe(struct platform_device *pdev) | |||
2097 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; | 2121 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; |
2098 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 2122 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
2099 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 2123 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
2124 | sdma->dma_device.copy_align = 2; | ||
2100 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); | 2125 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); |
2101 | 2126 | ||
2102 | platform_set_drvdata(pdev, sdma); | 2127 | platform_set_drvdata(pdev, sdma); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 23fb2fa04000..f373a139e0c3 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -372,6 +372,7 @@ struct ioat_ring_ent ** | |||
372 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | 372 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
373 | { | 373 | { |
374 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 374 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
375 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
375 | struct ioat_ring_ent **ring; | 376 | struct ioat_ring_ent **ring; |
376 | int total_descs = 1 << order; | 377 | int total_descs = 1 << order; |
377 | int i, chunks; | 378 | int i, chunks; |
@@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | |||
437 | } | 438 | } |
438 | ring[i]->hw->next = ring[0]->txd.phys; | 439 | ring[i]->hw->next = ring[0]->txd.phys; |
439 | 440 | ||
441 | /* setup descriptor pre-fetching for v3.4 */ | ||
442 | if (ioat_dma->cap & IOAT_CAP_DPS) { | ||
443 | u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN; | ||
444 | |||
445 | if (chunks == 1) | ||
446 | drsctl |= IOAT_CHAN_DRS_AUTOWRAP; | ||
447 | |||
448 | writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET); | ||
449 | |||
450 | } | ||
451 | |||
440 | return ring; | 452 | return ring; |
441 | } | 453 | } |
442 | 454 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1ab42ec2b7ff..aaafd0e882b5 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "registers.h" | 27 | #include "registers.h" |
28 | #include "hw.h" | 28 | #include "hw.h" |
29 | 29 | ||
30 | #define IOAT_DMA_VERSION "4.00" | 30 | #define IOAT_DMA_VERSION "5.00" |
31 | 31 | ||
32 | #define IOAT_DMA_DCA_ANY_CPU ~0 | 32 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
33 | 33 | ||
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index abcc51b343ce..781c94de8e81 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -66,11 +66,14 @@ | |||
66 | 66 | ||
67 | #define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 | 67 | #define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 |
68 | 68 | ||
69 | #define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00 | ||
70 | |||
69 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | 71 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ |
70 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | 72 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ |
71 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | 73 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ |
72 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | 74 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ |
73 | #define IOAT_VER_3_3 0x33 /* Version 3.3 */ | 75 | #define IOAT_VER_3_3 0x33 /* Version 3.3 */ |
76 | #define IOAT_VER_3_4 0x34 /* Version 3.4 */ | ||
74 | 77 | ||
75 | 78 | ||
76 | int system_has_dca_enabled(struct pci_dev *pdev); | 79 | int system_has_dca_enabled(struct pci_dev *pdev); |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 2d810dfcdc48..d41dc9a9ff68 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = { | |||
119 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, | 119 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, |
120 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, | 120 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, |
121 | 121 | ||
122 | /* I/OAT v3.4 platforms */ | ||
123 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) }, | ||
124 | |||
122 | { 0, } | 125 | { 0, } |
123 | }; | 126 | }; |
124 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 127 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
@@ -135,10 +138,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); | |||
135 | static int ioat_dca_enabled = 1; | 138 | static int ioat_dca_enabled = 1; |
136 | module_param(ioat_dca_enabled, int, 0644); | 139 | module_param(ioat_dca_enabled, int, 0644); |
137 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 140 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
138 | int ioat_pending_level = 4; | 141 | int ioat_pending_level = 7; |
139 | module_param(ioat_pending_level, int, 0644); | 142 | module_param(ioat_pending_level, int, 0644); |
140 | MODULE_PARM_DESC(ioat_pending_level, | 143 | MODULE_PARM_DESC(ioat_pending_level, |
141 | "high-water mark for pushing ioat descriptors (default: 4)"); | 144 | "high-water mark for pushing ioat descriptors (default: 7)"); |
142 | static char ioat_interrupt_style[32] = "msix"; | 145 | static char ioat_interrupt_style[32] = "msix"; |
143 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | 146 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
144 | sizeof(ioat_interrupt_style), 0644); | 147 | sizeof(ioat_interrupt_style), 0644); |
@@ -635,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c) | |||
635 | ioat_stop(ioat_chan); | 638 | ioat_stop(ioat_chan); |
636 | ioat_reset_hw(ioat_chan); | 639 | ioat_reset_hw(ioat_chan); |
637 | 640 | ||
641 | /* Put LTR to idle */ | ||
642 | if (ioat_dma->version >= IOAT_VER_3_4) | ||
643 | writeb(IOAT_CHAN_LTR_SWSEL_IDLE, | ||
644 | ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); | ||
645 | |||
638 | spin_lock_bh(&ioat_chan->cleanup_lock); | 646 | spin_lock_bh(&ioat_chan->cleanup_lock); |
639 | spin_lock_bh(&ioat_chan->prep_lock); | 647 | spin_lock_bh(&ioat_chan->prep_lock); |
640 | descs = ioat_ring_space(ioat_chan); | 648 | descs = ioat_ring_space(ioat_chan); |
@@ -724,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | |||
724 | spin_unlock_bh(&ioat_chan->prep_lock); | 732 | spin_unlock_bh(&ioat_chan->prep_lock); |
725 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 733 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
726 | 734 | ||
735 | /* Setting up LTR values for 3.4 or later */ | ||
736 | if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) { | ||
737 | u32 lat_val; | ||
738 | |||
739 | lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL | | ||
740 | IOAT_CHAN_LTR_ACTIVE_SNLATSCALE | | ||
741 | IOAT_CHAN_LTR_ACTIVE_SNREQMNT; | ||
742 | writel(lat_val, ioat_chan->reg_base + | ||
743 | IOAT_CHAN_LTR_ACTIVE_OFFSET); | ||
744 | |||
745 | lat_val = IOAT_CHAN_LTR_IDLE_SNVAL | | ||
746 | IOAT_CHAN_LTR_IDLE_SNLATSCALE | | ||
747 | IOAT_CHAN_LTR_IDLE_SNREQMNT; | ||
748 | writel(lat_val, ioat_chan->reg_base + | ||
749 | IOAT_CHAN_LTR_IDLE_OFFSET); | ||
750 | |||
751 | /* Select to active */ | ||
752 | writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE, | ||
753 | ioat_chan->reg_base + | ||
754 | IOAT_CHAN_LTR_SWSEL_OFFSET); | ||
755 | } | ||
756 | |||
727 | ioat_start_null_desc(ioat_chan); | 757 | ioat_start_null_desc(ioat_chan); |
728 | 758 | ||
729 | /* check that we got off the ground */ | 759 | /* check that we got off the ground */ |
@@ -1185,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1185 | if (err) | 1215 | if (err) |
1186 | return err; | 1216 | return err; |
1187 | 1217 | ||
1218 | if (ioat_dma->cap & IOAT_CAP_DPS) | ||
1219 | writeb(ioat_pending_level + 1, | ||
1220 | ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); | ||
1221 | |||
1188 | return 0; | 1222 | return 0; |
1189 | } | 1223 | } |
1190 | 1224 | ||
@@ -1350,6 +1384,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1350 | pci_set_drvdata(pdev, device); | 1384 | pci_set_drvdata(pdev, device); |
1351 | 1385 | ||
1352 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 1386 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
1387 | if (device->version >= IOAT_VER_3_4) | ||
1388 | ioat_dca_enabled = 0; | ||
1353 | if (device->version >= IOAT_VER_3_0) { | 1389 | if (device->version >= IOAT_VER_3_0) { |
1354 | if (is_skx_ioat(pdev)) | 1390 | if (is_skx_ioat(pdev)) |
1355 | device->version = IOAT_VER_3_2; | 1391 | device->version = IOAT_VER_3_2; |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 2f3bbc88ff2a..99c1c24d465d 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -84,6 +84,9 @@ | |||
84 | #define IOAT_CAP_PQ 0x00000200 | 84 | #define IOAT_CAP_PQ 0x00000200 |
85 | #define IOAT_CAP_DWBES 0x00002000 | 85 | #define IOAT_CAP_DWBES 0x00002000 |
86 | #define IOAT_CAP_RAID16SS 0x00020000 | 86 | #define IOAT_CAP_RAID16SS 0x00020000 |
87 | #define IOAT_CAP_DPS 0x00800000 | ||
88 | |||
89 | #define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */ | ||
87 | 90 | ||
88 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ | 91 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ |
89 | 92 | ||
@@ -243,4 +246,25 @@ | |||
243 | 246 | ||
244 | #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ | 247 | #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ |
245 | 248 | ||
249 | #define IOAT_CHAN_DRSCTL_OFFSET 0xB6 | ||
250 | #define IOAT_CHAN_DRSZ_4KB 0x0000 | ||
251 | #define IOAT_CHAN_DRSZ_8KB 0x0001 | ||
252 | #define IOAT_CHAN_DRSZ_2MB 0x0009 | ||
253 | #define IOAT_CHAN_DRS_EN 0x0100 | ||
254 | #define IOAT_CHAN_DRS_AUTOWRAP 0x0200 | ||
255 | |||
256 | #define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC | ||
257 | #define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0 | ||
258 | #define IOAT_CHAN_LTR_SWSEL_IDLE 0x1 | ||
259 | |||
260 | #define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0 | ||
261 | #define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */ | ||
262 | #define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */ | ||
263 | #define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */ | ||
264 | |||
265 | #define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4 | ||
266 | #define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */ | ||
267 | #define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */ | ||
268 | #define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */ | ||
269 | |||
246 | #endif /* _IOAT_REGISTERS_H_ */ | 270 | #endif /* _IOAT_REGISTERS_H_ */ |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index fdec2b6cfbb0..5737d92eaeeb 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -52,8 +52,6 @@ | |||
52 | #define CX_SRC 0x814 | 52 | #define CX_SRC 0x814 |
53 | #define CX_DST 0x818 | 53 | #define CX_DST 0x818 |
54 | #define CX_CFG 0x81c | 54 | #define CX_CFG 0x81c |
55 | #define AXI_CFG 0x820 | ||
56 | #define AXI_CFG_DEFAULT 0x201201 | ||
57 | 55 | ||
58 | #define CX_LLI_CHAIN_EN 0x2 | 56 | #define CX_LLI_CHAIN_EN 0x2 |
59 | #define CX_CFG_EN 0x1 | 57 | #define CX_CFG_EN 0x1 |
@@ -113,9 +111,18 @@ struct k3_dma_dev { | |||
113 | struct dma_pool *pool; | 111 | struct dma_pool *pool; |
114 | u32 dma_channels; | 112 | u32 dma_channels; |
115 | u32 dma_requests; | 113 | u32 dma_requests; |
114 | u32 dma_channel_mask; | ||
116 | unsigned int irq; | 115 | unsigned int irq; |
117 | }; | 116 | }; |
118 | 117 | ||
118 | |||
119 | #define K3_FLAG_NOCLK BIT(1) | ||
120 | |||
121 | struct k3dma_soc_data { | ||
122 | unsigned long flags; | ||
123 | }; | ||
124 | |||
125 | |||
119 | #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) | 126 | #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) |
120 | 127 | ||
121 | static int k3_dma_config_write(struct dma_chan *chan, | 128 | static int k3_dma_config_write(struct dma_chan *chan, |
@@ -161,7 +168,6 @@ static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) | |||
161 | writel_relaxed(hw->count, phy->base + CX_CNT0); | 168 | writel_relaxed(hw->count, phy->base + CX_CNT0); |
162 | writel_relaxed(hw->saddr, phy->base + CX_SRC); | 169 | writel_relaxed(hw->saddr, phy->base + CX_SRC); |
163 | writel_relaxed(hw->daddr, phy->base + CX_DST); | 170 | writel_relaxed(hw->daddr, phy->base + CX_DST); |
164 | writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); | ||
165 | writel_relaxed(hw->config, phy->base + CX_CFG); | 171 | writel_relaxed(hw->config, phy->base + CX_CFG); |
166 | } | 172 | } |
167 | 173 | ||
@@ -314,6 +320,9 @@ static void k3_dma_tasklet(unsigned long arg) | |||
314 | /* check new channel request in d->chan_pending */ | 320 | /* check new channel request in d->chan_pending */ |
315 | spin_lock_irq(&d->lock); | 321 | spin_lock_irq(&d->lock); |
316 | for (pch = 0; pch < d->dma_channels; pch++) { | 322 | for (pch = 0; pch < d->dma_channels; pch++) { |
323 | if (!(d->dma_channel_mask & (1 << pch))) | ||
324 | continue; | ||
325 | |||
317 | p = &d->phy[pch]; | 326 | p = &d->phy[pch]; |
318 | 327 | ||
319 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { | 328 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { |
@@ -331,6 +340,9 @@ static void k3_dma_tasklet(unsigned long arg) | |||
331 | spin_unlock_irq(&d->lock); | 340 | spin_unlock_irq(&d->lock); |
332 | 341 | ||
333 | for (pch = 0; pch < d->dma_channels; pch++) { | 342 | for (pch = 0; pch < d->dma_channels; pch++) { |
343 | if (!(d->dma_channel_mask & (1 << pch))) | ||
344 | continue; | ||
345 | |||
334 | if (pch_alloc & (1 << pch)) { | 346 | if (pch_alloc & (1 << pch)) { |
335 | p = &d->phy[pch]; | 347 | p = &d->phy[pch]; |
336 | c = p->vchan; | 348 | c = p->vchan; |
@@ -790,8 +802,21 @@ static int k3_dma_transfer_resume(struct dma_chan *chan) | |||
790 | return 0; | 802 | return 0; |
791 | } | 803 | } |
792 | 804 | ||
805 | static const struct k3dma_soc_data k3_v1_dma_data = { | ||
806 | .flags = 0, | ||
807 | }; | ||
808 | |||
809 | static const struct k3dma_soc_data asp_v1_dma_data = { | ||
810 | .flags = K3_FLAG_NOCLK, | ||
811 | }; | ||
812 | |||
793 | static const struct of_device_id k3_pdma_dt_ids[] = { | 813 | static const struct of_device_id k3_pdma_dt_ids[] = { |
794 | { .compatible = "hisilicon,k3-dma-1.0", }, | 814 | { .compatible = "hisilicon,k3-dma-1.0", |
815 | .data = &k3_v1_dma_data | ||
816 | }, | ||
817 | { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0", | ||
818 | .data = &asp_v1_dma_data | ||
819 | }, | ||
795 | {} | 820 | {} |
796 | }; | 821 | }; |
797 | MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); | 822 | MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); |
@@ -810,6 +835,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
810 | 835 | ||
811 | static int k3_dma_probe(struct platform_device *op) | 836 | static int k3_dma_probe(struct platform_device *op) |
812 | { | 837 | { |
838 | const struct k3dma_soc_data *soc_data; | ||
813 | struct k3_dma_dev *d; | 839 | struct k3_dma_dev *d; |
814 | const struct of_device_id *of_id; | 840 | const struct of_device_id *of_id; |
815 | struct resource *iores; | 841 | struct resource *iores; |
@@ -823,6 +849,10 @@ static int k3_dma_probe(struct platform_device *op) | |||
823 | if (!d) | 849 | if (!d) |
824 | return -ENOMEM; | 850 | return -ENOMEM; |
825 | 851 | ||
852 | soc_data = device_get_match_data(&op->dev); | ||
853 | if (!soc_data) | ||
854 | return -EINVAL; | ||
855 | |||
826 | d->base = devm_ioremap_resource(&op->dev, iores); | 856 | d->base = devm_ioremap_resource(&op->dev, iores); |
827 | if (IS_ERR(d->base)) | 857 | if (IS_ERR(d->base)) |
828 | return PTR_ERR(d->base); | 858 | return PTR_ERR(d->base); |
@@ -833,12 +863,21 @@ static int k3_dma_probe(struct platform_device *op) | |||
833 | "dma-channels", &d->dma_channels); | 863 | "dma-channels", &d->dma_channels); |
834 | of_property_read_u32((&op->dev)->of_node, | 864 | of_property_read_u32((&op->dev)->of_node, |
835 | "dma-requests", &d->dma_requests); | 865 | "dma-requests", &d->dma_requests); |
866 | ret = of_property_read_u32((&op->dev)->of_node, | ||
867 | "dma-channel-mask", &d->dma_channel_mask); | ||
868 | if (ret) { | ||
869 | dev_warn(&op->dev, | ||
870 | "dma-channel-mask doesn't exist, considering all as available.\n"); | ||
871 | d->dma_channel_mask = (u32)~0UL; | ||
872 | } | ||
836 | } | 873 | } |
837 | 874 | ||
838 | d->clk = devm_clk_get(&op->dev, NULL); | 875 | if (!(soc_data->flags & K3_FLAG_NOCLK)) { |
839 | if (IS_ERR(d->clk)) { | 876 | d->clk = devm_clk_get(&op->dev, NULL); |
840 | dev_err(&op->dev, "no dma clk\n"); | 877 | if (IS_ERR(d->clk)) { |
841 | return PTR_ERR(d->clk); | 878 | dev_err(&op->dev, "no dma clk\n"); |
879 | return PTR_ERR(d->clk); | ||
880 | } | ||
842 | } | 881 | } |
843 | 882 | ||
844 | irq = platform_get_irq(op, 0); | 883 | irq = platform_get_irq(op, 0); |
@@ -862,8 +901,12 @@ static int k3_dma_probe(struct platform_device *op) | |||
862 | return -ENOMEM; | 901 | return -ENOMEM; |
863 | 902 | ||
864 | for (i = 0; i < d->dma_channels; i++) { | 903 | for (i = 0; i < d->dma_channels; i++) { |
865 | struct k3_dma_phy *p = &d->phy[i]; | 904 | struct k3_dma_phy *p; |
905 | |||
906 | if (!(d->dma_channel_mask & BIT(i))) | ||
907 | continue; | ||
866 | 908 | ||
909 | p = &d->phy[i]; | ||
867 | p->idx = i; | 910 | p->idx = i; |
868 | p->base = d->base + i * 0x40; | 911 | p->base = d->base + i * 0x40; |
869 | } | 912 | } |
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index 5de1b07eddff..7de54b2fafdb 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c | |||
@@ -214,6 +214,7 @@ static int mcf_edma_probe(struct platform_device *pdev) | |||
214 | mcf_chan->edma = mcf_edma; | 214 | mcf_chan->edma = mcf_edma; |
215 | mcf_chan->slave_id = i; | 215 | mcf_chan->slave_id = i; |
216 | mcf_chan->idle = true; | 216 | mcf_chan->idle = true; |
217 | mcf_chan->dma_dir = DMA_NONE; | ||
217 | mcf_chan->vchan.desc_free = fsl_edma_free_desc; | 218 | mcf_chan->vchan.desc_free = fsl_edma_free_desc; |
218 | vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); | 219 | vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); |
219 | iowrite32(0x0, ®s->tcd[i].csr); | 220 | iowrite32(0x0, ®s->tcd[i].csr); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7f595355fb79..65af2e7fcb2c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1059 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; | 1059 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; |
1060 | 1060 | ||
1061 | dma_dev = &mv_chan->dmadev; | 1061 | dma_dev = &mv_chan->dmadev; |
1062 | dma_dev->dev = &pdev->dev; | ||
1062 | mv_chan->xordev = xordev; | 1063 | mv_chan->xordev = xordev; |
1063 | 1064 | ||
1064 | /* | 1065 | /* |
@@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1091 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | 1092 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; |
1092 | dma_dev->device_tx_status = mv_xor_status; | 1093 | dma_dev->device_tx_status = mv_xor_status; |
1093 | dma_dev->device_issue_pending = mv_xor_issue_pending; | 1094 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
1094 | dma_dev->dev = &pdev->dev; | ||
1095 | 1095 | ||
1096 | /* set prep routines based on capability */ | 1096 | /* set prep routines based on capability */ |
1097 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) | 1097 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
@@ -1153,7 +1153,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1153 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1153 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1154 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1154 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1155 | 1155 | ||
1156 | dma_async_device_register(dma_dev); | 1156 | ret = dma_async_device_register(dma_dev); |
1157 | if (ret) | ||
1158 | goto err_free_irq; | ||
1159 | |||
1157 | return mv_chan; | 1160 | return mv_chan; |
1158 | 1161 | ||
1159 | err_free_irq: | 1162 | err_free_irq: |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cff1b143fff5..eec79fdf27a5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan) | |||
2267 | struct dma_pl330_desc *desc; | 2267 | struct dma_pl330_desc *desc; |
2268 | unsigned long flags; | 2268 | unsigned long flags; |
2269 | struct pl330_dmac *pl330 = pch->dmac; | 2269 | struct pl330_dmac *pl330 = pch->dmac; |
2270 | LIST_HEAD(list); | ||
2271 | bool power_down = false; | 2270 | bool power_down = false; |
2272 | 2271 | ||
2273 | pm_runtime_get_sync(pl330->ddma.dev); | 2272 | pm_runtime_get_sync(pl330->ddma.dev); |
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 1617715aa6e0..cb860cb53c27 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
636 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); | 636 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); |
637 | 637 | ||
638 | /* allocate enough room to accomodate the number of entries */ | 638 | /* allocate enough room to accomodate the number of entries */ |
639 | async_desc = kzalloc(sizeof(*async_desc) + | 639 | async_desc = kzalloc(struct_size(async_desc, desc, num_alloc), |
640 | (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); | 640 | GFP_NOWAIT); |
641 | 641 | ||
642 | if (!async_desc) | 642 | if (!async_desc) |
643 | goto err_out; | 643 | goto err_out; |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 43d4b00b8138..411f91fde734 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
@@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan) | |||
138 | desc = &mdesc->desc; | 138 | desc = &mdesc->desc; |
139 | last_cookie = desc->cookie; | 139 | last_cookie = desc->cookie; |
140 | 140 | ||
141 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
142 | |||
141 | spin_lock_irqsave(&mchan->lock, irqflags); | 143 | spin_lock_irqsave(&mchan->lock, irqflags); |
144 | if (llstat == DMA_COMPLETE) { | ||
145 | mchan->last_success = last_cookie; | ||
146 | result.result = DMA_TRANS_NOERROR; | ||
147 | } else { | ||
148 | result.result = DMA_TRANS_ABORTED; | ||
149 | } | ||
150 | |||
142 | dma_cookie_complete(desc); | 151 | dma_cookie_complete(desc); |
143 | spin_unlock_irqrestore(&mchan->lock, irqflags); | 152 | spin_unlock_irqrestore(&mchan->lock, irqflags); |
144 | 153 | ||
145 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
146 | dmaengine_desc_get_callback(desc, &cb); | 154 | dmaengine_desc_get_callback(desc, &cb); |
147 | 155 | ||
148 | dma_run_dependencies(desc); | 156 | dma_run_dependencies(desc); |
149 | 157 | ||
150 | spin_lock_irqsave(&mchan->lock, irqflags); | 158 | spin_lock_irqsave(&mchan->lock, irqflags); |
151 | list_move(&mdesc->node, &mchan->free); | 159 | list_move(&mdesc->node, &mchan->free); |
152 | |||
153 | if (llstat == DMA_COMPLETE) { | ||
154 | mchan->last_success = last_cookie; | ||
155 | result.result = DMA_TRANS_NOERROR; | ||
156 | } else | ||
157 | result.result = DMA_TRANS_ABORTED; | ||
158 | |||
159 | spin_unlock_irqrestore(&mchan->lock, irqflags); | 160 | spin_unlock_irqrestore(&mchan->lock, irqflags); |
160 | 161 | ||
161 | dmaengine_desc_callback_invoke(&cb, &result); | 162 | dmaengine_desc_callback_invoke(&cb, &result); |
@@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | |||
415 | if (!mdesc) | 416 | if (!mdesc) |
416 | return NULL; | 417 | return NULL; |
417 | 418 | ||
419 | mdesc->desc.flags = flags; | ||
418 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | 420 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, |
419 | src, dest, len, flags, | 421 | src, dest, len, flags, |
420 | HIDMA_TRE_MEMCPY); | 422 | HIDMA_TRE_MEMCPY); |
@@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, | |||
447 | if (!mdesc) | 449 | if (!mdesc) |
448 | return NULL; | 450 | return NULL; |
449 | 451 | ||
452 | mdesc->desc.flags = flags; | ||
450 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | 453 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, |
451 | value, dest, len, flags, | 454 | value, dest, len, flags, |
452 | HIDMA_TRE_MEMSET); | 455 | HIDMA_TRE_MEMSET); |
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index d64edeb6771a..681de12f4c67 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void) | |||
423 | hidma_mgmt_of_populate_channels(child); | 423 | hidma_mgmt_of_populate_channels(child); |
424 | } | 424 | } |
425 | #endif | 425 | #endif |
426 | platform_driver_register(&hidma_mgmt_driver); | 426 | return platform_driver_register(&hidma_mgmt_driver); |
427 | 427 | ||
428 | return 0; | ||
429 | } | 428 | } |
430 | module_init(hidma_mgmt_init); | 429 | module_init(hidma_mgmt_init); |
431 | MODULE_LICENSE("GPL v2"); | 430 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 784d5f1a473b..3fae23768b47 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -705,7 +705,6 @@ static int sa11x0_dma_device_pause(struct dma_chan *chan) | |||
705 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 705 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
706 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 706 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
707 | struct sa11x0_dma_phy *p; | 707 | struct sa11x0_dma_phy *p; |
708 | LIST_HEAD(head); | ||
709 | unsigned long flags; | 708 | unsigned long flags; |
710 | 709 | ||
711 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | 710 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
@@ -732,7 +731,6 @@ static int sa11x0_dma_device_resume(struct dma_chan *chan) | |||
732 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 731 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
733 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 732 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
734 | struct sa11x0_dma_phy *p; | 733 | struct sa11x0_dma_phy *p; |
735 | LIST_HEAD(head); | ||
736 | unsigned long flags; | 734 | unsigned long flags; |
737 | 735 | ||
738 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | 736 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 7f7184c3cf95..59403f6d008a 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -694,6 +694,8 @@ static int usb_dmac_runtime_resume(struct device *dev) | |||
694 | #endif /* CONFIG_PM */ | 694 | #endif /* CONFIG_PM */ |
695 | 695 | ||
696 | static const struct dev_pm_ops usb_dmac_pm = { | 696 | static const struct dev_pm_ops usb_dmac_pm = { |
697 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | ||
698 | pm_runtime_force_resume) | ||
697 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, | 699 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, |
698 | NULL) | 700 | NULL) |
699 | }; | 701 | }; |
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index e2f016700fcc..48431e2da987 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c | |||
@@ -580,15 +580,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) | |||
580 | 580 | ||
581 | static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) | 581 | static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) |
582 | { | 582 | { |
583 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | 583 | return pm_runtime_get_sync(chan->device->dev); |
584 | int ret; | ||
585 | |||
586 | ret = pm_runtime_get_sync(chan->device->dev); | ||
587 | if (ret < 0) | ||
588 | return ret; | ||
589 | |||
590 | schan->dev_id = SPRD_DMA_SOFTWARE_UID; | ||
591 | return 0; | ||
592 | } | 584 | } |
593 | 585 | ||
594 | static void sprd_dma_free_chan_resources(struct dma_chan *chan) | 586 | static void sprd_dma_free_chan_resources(struct dma_chan *chan) |
@@ -1021,13 +1013,10 @@ static void sprd_dma_free_desc(struct virt_dma_desc *vd) | |||
1021 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) | 1013 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) |
1022 | { | 1014 | { |
1023 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | 1015 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); |
1024 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | 1016 | u32 slave_id = *(u32 *)param; |
1025 | u32 req = *(u32 *)param; | ||
1026 | 1017 | ||
1027 | if (req < sdev->total_chns) | 1018 | schan->dev_id = slave_id; |
1028 | return req == schan->chn_num + 1; | 1019 | return true; |
1029 | else | ||
1030 | return false; | ||
1031 | } | 1020 | } |
1032 | 1021 | ||
1033 | static int sprd_dma_probe(struct platform_device *pdev) | 1022 | static int sprd_dma_probe(struct platform_device *pdev) |
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index 07c20aa2e955..bc7a1de3f29b 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c | |||
@@ -243,8 +243,7 @@ static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan, | |||
243 | struct st_fdma_desc *fdesc; | 243 | struct st_fdma_desc *fdesc; |
244 | int i; | 244 | int i; |
245 | 245 | ||
246 | fdesc = kzalloc(sizeof(*fdesc) + | 246 | fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT); |
247 | sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT); | ||
248 | if (!fdesc) | 247 | if (!fdesc) |
249 | return NULL; | 248 | return NULL; |
250 | 249 | ||
@@ -294,8 +293,6 @@ static void st_fdma_free_chan_res(struct dma_chan *chan) | |||
294 | struct rproc *rproc = fchan->fdev->slim_rproc->rproc; | 293 | struct rproc *rproc = fchan->fdev->slim_rproc->rproc; |
295 | unsigned long flags; | 294 | unsigned long flags; |
296 | 295 | ||
297 | LIST_HEAD(head); | ||
298 | |||
299 | dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", | 296 | dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", |
300 | __func__, fchan->vchan.chan.chan_id); | 297 | __func__, fchan->vchan.chan.chan_id); |
301 | 298 | ||
@@ -626,7 +623,6 @@ static void st_fdma_issue_pending(struct dma_chan *chan) | |||
626 | static int st_fdma_pause(struct dma_chan *chan) | 623 | static int st_fdma_pause(struct dma_chan *chan) |
627 | { | 624 | { |
628 | unsigned long flags; | 625 | unsigned long flags; |
629 | LIST_HEAD(head); | ||
630 | struct st_fdma_chan *fchan = to_st_fdma_chan(chan); | 626 | struct st_fdma_chan *fchan = to_st_fdma_chan(chan); |
631 | int ch_id = fchan->vchan.chan.chan_id; | 627 | int ch_id = fchan->vchan.chan.chan_id; |
632 | unsigned long cmd = FDMA_CMD_PAUSE(ch_id); | 628 | unsigned long cmd = FDMA_CMD_PAUSE(ch_id); |
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 4903a408fc14..ba239b529fa9 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
24 | #include <linux/of_dma.h> | 24 | #include <linux/of_dma.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> | ||
26 | #include <linux/reset.h> | 27 | #include <linux/reset.h> |
27 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
@@ -641,12 +642,13 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |||
641 | { | 642 | { |
642 | struct stm32_dma_chan *chan = devid; | 643 | struct stm32_dma_chan *chan = devid; |
643 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | 644 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
644 | u32 status, scr; | 645 | u32 status, scr, sfcr; |
645 | 646 | ||
646 | spin_lock(&chan->vchan.lock); | 647 | spin_lock(&chan->vchan.lock); |
647 | 648 | ||
648 | status = stm32_dma_irq_status(chan); | 649 | status = stm32_dma_irq_status(chan); |
649 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | 650 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
651 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | ||
650 | 652 | ||
651 | if (status & STM32_DMA_TCI) { | 653 | if (status & STM32_DMA_TCI) { |
652 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); | 654 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
@@ -661,10 +663,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |||
661 | if (status & STM32_DMA_FEI) { | 663 | if (status & STM32_DMA_FEI) { |
662 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); | 664 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); |
663 | status &= ~STM32_DMA_FEI; | 665 | status &= ~STM32_DMA_FEI; |
664 | if (!(scr & STM32_DMA_SCR_EN)) | 666 | if (sfcr & STM32_DMA_SFCR_FEIE) { |
665 | dev_err(chan2dev(chan), "FIFO Error\n"); | 667 | if (!(scr & STM32_DMA_SCR_EN)) |
666 | else | 668 | dev_err(chan2dev(chan), "FIFO Error\n"); |
667 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | 669 | else |
670 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | ||
671 | } | ||
668 | } | 672 | } |
669 | if (status) { | 673 | if (status) { |
670 | stm32_dma_irq_clear(chan, status); | 674 | stm32_dma_irq_clear(chan, status); |
@@ -1112,15 +1116,14 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c) | |||
1112 | int ret; | 1116 | int ret; |
1113 | 1117 | ||
1114 | chan->config_init = false; | 1118 | chan->config_init = false; |
1115 | ret = clk_prepare_enable(dmadev->clk); | 1119 | |
1116 | if (ret < 0) { | 1120 | ret = pm_runtime_get_sync(dmadev->ddev.dev); |
1117 | dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); | 1121 | if (ret < 0) |
1118 | return ret; | 1122 | return ret; |
1119 | } | ||
1120 | 1123 | ||
1121 | ret = stm32_dma_disable_chan(chan); | 1124 | ret = stm32_dma_disable_chan(chan); |
1122 | if (ret < 0) | 1125 | if (ret < 0) |
1123 | clk_disable_unprepare(dmadev->clk); | 1126 | pm_runtime_put(dmadev->ddev.dev); |
1124 | 1127 | ||
1125 | return ret; | 1128 | return ret; |
1126 | } | 1129 | } |
@@ -1140,7 +1143,7 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c) | |||
1140 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | 1143 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
1141 | } | 1144 | } |
1142 | 1145 | ||
1143 | clk_disable_unprepare(dmadev->clk); | 1146 | pm_runtime_put(dmadev->ddev.dev); |
1144 | 1147 | ||
1145 | vchan_free_chan_resources(to_virt_chan(c)); | 1148 | vchan_free_chan_resources(to_virt_chan(c)); |
1146 | } | 1149 | } |
@@ -1240,6 +1243,12 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
1240 | return PTR_ERR(dmadev->clk); | 1243 | return PTR_ERR(dmadev->clk); |
1241 | } | 1244 | } |
1242 | 1245 | ||
1246 | ret = clk_prepare_enable(dmadev->clk); | ||
1247 | if (ret < 0) { | ||
1248 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | ||
1249 | return ret; | ||
1250 | } | ||
1251 | |||
1243 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, | 1252 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, |
1244 | "st,mem2mem"); | 1253 | "st,mem2mem"); |
1245 | 1254 | ||
@@ -1289,7 +1298,7 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
1289 | 1298 | ||
1290 | ret = dma_async_device_register(dd); | 1299 | ret = dma_async_device_register(dd); |
1291 | if (ret) | 1300 | if (ret) |
1292 | return ret; | 1301 | goto clk_free; |
1293 | 1302 | ||
1294 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | 1303 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { |
1295 | chan = &dmadev->chan[i]; | 1304 | chan = &dmadev->chan[i]; |
@@ -1321,20 +1330,58 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
1321 | 1330 | ||
1322 | platform_set_drvdata(pdev, dmadev); | 1331 | platform_set_drvdata(pdev, dmadev); |
1323 | 1332 | ||
1333 | pm_runtime_set_active(&pdev->dev); | ||
1334 | pm_runtime_enable(&pdev->dev); | ||
1335 | pm_runtime_get_noresume(&pdev->dev); | ||
1336 | pm_runtime_put(&pdev->dev); | ||
1337 | |||
1324 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); | 1338 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); |
1325 | 1339 | ||
1326 | return 0; | 1340 | return 0; |
1327 | 1341 | ||
1328 | err_unregister: | 1342 | err_unregister: |
1329 | dma_async_device_unregister(dd); | 1343 | dma_async_device_unregister(dd); |
1344 | clk_free: | ||
1345 | clk_disable_unprepare(dmadev->clk); | ||
1330 | 1346 | ||
1331 | return ret; | 1347 | return ret; |
1332 | } | 1348 | } |
1333 | 1349 | ||
1350 | #ifdef CONFIG_PM | ||
1351 | static int stm32_dma_runtime_suspend(struct device *dev) | ||
1352 | { | ||
1353 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | ||
1354 | |||
1355 | clk_disable_unprepare(dmadev->clk); | ||
1356 | |||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static int stm32_dma_runtime_resume(struct device *dev) | ||
1361 | { | ||
1362 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | ||
1363 | int ret; | ||
1364 | |||
1365 | ret = clk_prepare_enable(dmadev->clk); | ||
1366 | if (ret) { | ||
1367 | dev_err(dev, "failed to prepare_enable clock\n"); | ||
1368 | return ret; | ||
1369 | } | ||
1370 | |||
1371 | return 0; | ||
1372 | } | ||
1373 | #endif | ||
1374 | |||
1375 | static const struct dev_pm_ops stm32_dma_pm_ops = { | ||
1376 | SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, | ||
1377 | stm32_dma_runtime_resume, NULL) | ||
1378 | }; | ||
1379 | |||
1334 | static struct platform_driver stm32_dma_driver = { | 1380 | static struct platform_driver stm32_dma_driver = { |
1335 | .driver = { | 1381 | .driver = { |
1336 | .name = "stm32-dma", | 1382 | .name = "stm32-dma", |
1337 | .of_match_table = stm32_dma_of_match, | 1383 | .of_match_table = stm32_dma_of_match, |
1384 | .pm = &stm32_dma_pm_ops, | ||
1338 | }, | 1385 | }, |
1339 | }; | 1386 | }; |
1340 | 1387 | ||
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index b922db90939a..a67119199c45 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
30 | #include <linux/of_dma.h> | 30 | #include <linux/of_dma.h> |
31 | #include <linux/pm_runtime.h> | ||
31 | #include <linux/reset.h> | 32 | #include <linux/reset.h> |
32 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
33 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
@@ -79,8 +80,7 @@ static void stm32_dmamux_free(struct device *dev, void *route_data) | |||
79 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); | 80 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); |
80 | clear_bit(mux->chan_id, dmamux->dma_inuse); | 81 | clear_bit(mux->chan_id, dmamux->dma_inuse); |
81 | 82 | ||
82 | if (!IS_ERR(dmamux->clk)) | 83 | pm_runtime_put_sync(dev); |
83 | clk_disable(dmamux->clk); | ||
84 | 84 | ||
85 | spin_unlock_irqrestore(&dmamux->lock, flags); | 85 | spin_unlock_irqrestore(&dmamux->lock, flags); |
86 | 86 | ||
@@ -146,13 +146,10 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, | |||
146 | 146 | ||
147 | /* Set dma request */ | 147 | /* Set dma request */ |
148 | spin_lock_irqsave(&dmamux->lock, flags); | 148 | spin_lock_irqsave(&dmamux->lock, flags); |
149 | if (!IS_ERR(dmamux->clk)) { | 149 | ret = pm_runtime_get_sync(&pdev->dev); |
150 | ret = clk_enable(dmamux->clk); | 150 | if (ret < 0) { |
151 | if (ret < 0) { | 151 | spin_unlock_irqrestore(&dmamux->lock, flags); |
152 | spin_unlock_irqrestore(&dmamux->lock, flags); | 152 | goto error; |
153 | dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret); | ||
154 | goto error; | ||
155 | } | ||
156 | } | 153 | } |
157 | spin_unlock_irqrestore(&dmamux->lock, flags); | 154 | spin_unlock_irqrestore(&dmamux->lock, flags); |
158 | 155 | ||
@@ -254,6 +251,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
254 | dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", | 251 | dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", |
255 | stm32_dmamux->dmamux_requests); | 252 | stm32_dmamux->dmamux_requests); |
256 | } | 253 | } |
254 | pm_runtime_get_noresume(&pdev->dev); | ||
257 | 255 | ||
258 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 256 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
259 | iomem = devm_ioremap_resource(&pdev->dev, res); | 257 | iomem = devm_ioremap_resource(&pdev->dev, res); |
@@ -282,6 +280,8 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
282 | stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; | 280 | stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; |
283 | 281 | ||
284 | platform_set_drvdata(pdev, stm32_dmamux); | 282 | platform_set_drvdata(pdev, stm32_dmamux); |
283 | pm_runtime_set_active(&pdev->dev); | ||
284 | pm_runtime_enable(&pdev->dev); | ||
285 | 285 | ||
286 | if (!IS_ERR(stm32_dmamux->clk)) { | 286 | if (!IS_ERR(stm32_dmamux->clk)) { |
287 | ret = clk_prepare_enable(stm32_dmamux->clk); | 287 | ret = clk_prepare_enable(stm32_dmamux->clk); |
@@ -291,17 +291,52 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
291 | } | 291 | } |
292 | } | 292 | } |
293 | 293 | ||
294 | pm_runtime_get_noresume(&pdev->dev); | ||
295 | |||
294 | /* Reset the dmamux */ | 296 | /* Reset the dmamux */ |
295 | for (i = 0; i < stm32_dmamux->dma_requests; i++) | 297 | for (i = 0; i < stm32_dmamux->dma_requests; i++) |
296 | stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); | 298 | stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); |
297 | 299 | ||
298 | if (!IS_ERR(stm32_dmamux->clk)) | 300 | pm_runtime_put(&pdev->dev); |
299 | clk_disable(stm32_dmamux->clk); | ||
300 | 301 | ||
301 | return of_dma_router_register(node, stm32_dmamux_route_allocate, | 302 | return of_dma_router_register(node, stm32_dmamux_route_allocate, |
302 | &stm32_dmamux->dmarouter); | 303 | &stm32_dmamux->dmarouter); |
303 | } | 304 | } |
304 | 305 | ||
306 | #ifdef CONFIG_PM | ||
307 | static int stm32_dmamux_runtime_suspend(struct device *dev) | ||
308 | { | ||
309 | struct platform_device *pdev = | ||
310 | container_of(dev, struct platform_device, dev); | ||
311 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); | ||
312 | |||
313 | clk_disable_unprepare(stm32_dmamux->clk); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int stm32_dmamux_runtime_resume(struct device *dev) | ||
319 | { | ||
320 | struct platform_device *pdev = | ||
321 | container_of(dev, struct platform_device, dev); | ||
322 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); | ||
323 | int ret; | ||
324 | |||
325 | ret = clk_prepare_enable(stm32_dmamux->clk); | ||
326 | if (ret) { | ||
327 | dev_err(&pdev->dev, "failed to prepare_enable clock\n"); | ||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | #endif | ||
334 | |||
335 | static const struct dev_pm_ops stm32_dmamux_pm_ops = { | ||
336 | SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend, | ||
337 | stm32_dmamux_runtime_resume, NULL) | ||
338 | }; | ||
339 | |||
305 | static const struct of_device_id stm32_dmamux_match[] = { | 340 | static const struct of_device_id stm32_dmamux_match[] = { |
306 | { .compatible = "st,stm32h7-dmamux" }, | 341 | { .compatible = "st,stm32h7-dmamux" }, |
307 | {}, | 342 | {}, |
@@ -312,6 +347,7 @@ static struct platform_driver stm32_dmamux_driver = { | |||
312 | .driver = { | 347 | .driver = { |
313 | .name = "stm32-dmamux", | 348 | .name = "stm32-dmamux", |
314 | .of_match_table = stm32_dmamux_match, | 349 | .of_match_table = stm32_dmamux_match, |
350 | .pm = &stm32_dmamux_pm_ops, | ||
315 | }, | 351 | }, |
316 | }; | 352 | }; |
317 | 353 | ||
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 390e4cae0e1a..4e0eede599a8 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/of_device.h> | 37 | #include <linux/of_device.h> |
38 | #include <linux/of_dma.h> | 38 | #include <linux/of_dma.h> |
39 | #include <linux/platform_device.h> | 39 | #include <linux/platform_device.h> |
40 | #include <linux/pm_runtime.h> | ||
40 | #include <linux/reset.h> | 41 | #include <linux/reset.h> |
41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
42 | 43 | ||
@@ -1456,15 +1457,13 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c) | |||
1456 | return -ENOMEM; | 1457 | return -ENOMEM; |
1457 | } | 1458 | } |
1458 | 1459 | ||
1459 | ret = clk_prepare_enable(dmadev->clk); | 1460 | ret = pm_runtime_get_sync(dmadev->ddev.dev); |
1460 | if (ret < 0) { | 1461 | if (ret < 0) |
1461 | dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); | ||
1462 | return ret; | 1462 | return ret; |
1463 | } | ||
1464 | 1463 | ||
1465 | ret = stm32_mdma_disable_chan(chan); | 1464 | ret = stm32_mdma_disable_chan(chan); |
1466 | if (ret < 0) | 1465 | if (ret < 0) |
1467 | clk_disable_unprepare(dmadev->clk); | 1466 | pm_runtime_put(dmadev->ddev.dev); |
1468 | 1467 | ||
1469 | return ret; | 1468 | return ret; |
1470 | } | 1469 | } |
@@ -1484,7 +1483,7 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c) | |||
1484 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | 1483 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
1485 | } | 1484 | } |
1486 | 1485 | ||
1487 | clk_disable_unprepare(dmadev->clk); | 1486 | pm_runtime_put(dmadev->ddev.dev); |
1488 | vchan_free_chan_resources(to_virt_chan(c)); | 1487 | vchan_free_chan_resources(to_virt_chan(c)); |
1489 | dmam_pool_destroy(chan->desc_pool); | 1488 | dmam_pool_destroy(chan->desc_pool); |
1490 | chan->desc_pool = NULL; | 1489 | chan->desc_pool = NULL; |
@@ -1579,9 +1578,11 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
1579 | 1578 | ||
1580 | dmadev->nr_channels = nr_channels; | 1579 | dmadev->nr_channels = nr_channels; |
1581 | dmadev->nr_requests = nr_requests; | 1580 | dmadev->nr_requests = nr_requests; |
1582 | device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", | 1581 | ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", |
1583 | dmadev->ahb_addr_masks, | 1582 | dmadev->ahb_addr_masks, |
1584 | count); | 1583 | count); |
1584 | if (ret) | ||
1585 | return ret; | ||
1585 | dmadev->nr_ahb_addr_masks = count; | 1586 | dmadev->nr_ahb_addr_masks = count; |
1586 | 1587 | ||
1587 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1588 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -1597,6 +1598,12 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
1597 | return ret; | 1598 | return ret; |
1598 | } | 1599 | } |
1599 | 1600 | ||
1601 | ret = clk_prepare_enable(dmadev->clk); | ||
1602 | if (ret < 0) { | ||
1603 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | ||
1604 | return ret; | ||
1605 | } | ||
1606 | |||
1600 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); | 1607 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); |
1601 | if (!IS_ERR(dmadev->rst)) { | 1608 | if (!IS_ERR(dmadev->rst)) { |
1602 | reset_control_assert(dmadev->rst); | 1609 | reset_control_assert(dmadev->rst); |
@@ -1668,6 +1675,10 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
1668 | } | 1675 | } |
1669 | 1676 | ||
1670 | platform_set_drvdata(pdev, dmadev); | 1677 | platform_set_drvdata(pdev, dmadev); |
1678 | pm_runtime_set_active(&pdev->dev); | ||
1679 | pm_runtime_enable(&pdev->dev); | ||
1680 | pm_runtime_get_noresume(&pdev->dev); | ||
1681 | pm_runtime_put(&pdev->dev); | ||
1671 | 1682 | ||
1672 | dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); | 1683 | dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); |
1673 | 1684 | ||
@@ -1677,11 +1688,42 @@ err_unregister: | |||
1677 | return ret; | 1688 | return ret; |
1678 | } | 1689 | } |
1679 | 1690 | ||
1691 | #ifdef CONFIG_PM | ||
1692 | static int stm32_mdma_runtime_suspend(struct device *dev) | ||
1693 | { | ||
1694 | struct stm32_mdma_device *dmadev = dev_get_drvdata(dev); | ||
1695 | |||
1696 | clk_disable_unprepare(dmadev->clk); | ||
1697 | |||
1698 | return 0; | ||
1699 | } | ||
1700 | |||
1701 | static int stm32_mdma_runtime_resume(struct device *dev) | ||
1702 | { | ||
1703 | struct stm32_mdma_device *dmadev = dev_get_drvdata(dev); | ||
1704 | int ret; | ||
1705 | |||
1706 | ret = clk_prepare_enable(dmadev->clk); | ||
1707 | if (ret) { | ||
1708 | dev_err(dev, "failed to prepare_enable clock\n"); | ||
1709 | return ret; | ||
1710 | } | ||
1711 | |||
1712 | return 0; | ||
1713 | } | ||
1714 | #endif | ||
1715 | |||
1716 | static const struct dev_pm_ops stm32_mdma_pm_ops = { | ||
1717 | SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend, | ||
1718 | stm32_mdma_runtime_resume, NULL) | ||
1719 | }; | ||
1720 | |||
1680 | static struct platform_driver stm32_mdma_driver = { | 1721 | static struct platform_driver stm32_mdma_driver = { |
1681 | .probe = stm32_mdma_probe, | 1722 | .probe = stm32_mdma_probe, |
1682 | .driver = { | 1723 | .driver = { |
1683 | .name = "stm32-mdma", | 1724 | .name = "stm32-mdma", |
1684 | .of_match_table = stm32_mdma_of_match, | 1725 | .of_match_table = stm32_mdma_of_match, |
1726 | .pm = &stm32_mdma_pm_ops, | ||
1685 | }, | 1727 | }, |
1686 | }; | 1728 | }; |
1687 | 1729 | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 9a558e30c461..cf462b1abc0b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -38,6 +38,9 @@ | |||
38 | 38 | ||
39 | #include "dmaengine.h" | 39 | #include "dmaengine.h" |
40 | 40 | ||
41 | #define CREATE_TRACE_POINTS | ||
42 | #include <trace/events/tegra_apb_dma.h> | ||
43 | |||
41 | #define TEGRA_APBDMA_GENERAL 0x0 | 44 | #define TEGRA_APBDMA_GENERAL 0x0 |
42 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) | 45 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) |
43 | 46 | ||
@@ -146,7 +149,7 @@ struct tegra_dma_channel_regs { | |||
146 | }; | 149 | }; |
147 | 150 | ||
148 | /* | 151 | /* |
149 | * tegra_dma_sg_req: Dma request details to configure hardware. This | 152 | * tegra_dma_sg_req: DMA request details to configure hardware. This |
150 | * contains the details for one transfer to configure DMA hw. | 153 | * contains the details for one transfer to configure DMA hw. |
151 | * The client's request for data transfer can be broken into multiple | 154 | * The client's request for data transfer can be broken into multiple |
152 | * sub-transfer as per requester details and hw support. | 155 | * sub-transfer as per requester details and hw support. |
@@ -155,7 +158,7 @@ struct tegra_dma_channel_regs { | |||
155 | */ | 158 | */ |
156 | struct tegra_dma_sg_req { | 159 | struct tegra_dma_sg_req { |
157 | struct tegra_dma_channel_regs ch_regs; | 160 | struct tegra_dma_channel_regs ch_regs; |
158 | int req_len; | 161 | unsigned int req_len; |
159 | bool configured; | 162 | bool configured; |
160 | bool last_sg; | 163 | bool last_sg; |
161 | struct list_head node; | 164 | struct list_head node; |
@@ -169,8 +172,8 @@ struct tegra_dma_sg_req { | |||
169 | */ | 172 | */ |
170 | struct tegra_dma_desc { | 173 | struct tegra_dma_desc { |
171 | struct dma_async_tx_descriptor txd; | 174 | struct dma_async_tx_descriptor txd; |
172 | int bytes_requested; | 175 | unsigned int bytes_requested; |
173 | int bytes_transferred; | 176 | unsigned int bytes_transferred; |
174 | enum dma_status dma_status; | 177 | enum dma_status dma_status; |
175 | struct list_head node; | 178 | struct list_head node; |
176 | struct list_head tx_list; | 179 | struct list_head tx_list; |
@@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | |||
186 | /* tegra_dma_channel: Channel specific information */ | 189 | /* tegra_dma_channel: Channel specific information */ |
187 | struct tegra_dma_channel { | 190 | struct tegra_dma_channel { |
188 | struct dma_chan dma_chan; | 191 | struct dma_chan dma_chan; |
189 | char name[30]; | 192 | char name[12]; |
190 | bool config_init; | 193 | bool config_init; |
191 | int id; | 194 | int id; |
192 | int irq; | 195 | int irq; |
@@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | |||
574 | struct tegra_dma_sg_req *hsgreq = NULL; | 577 | struct tegra_dma_sg_req *hsgreq = NULL; |
575 | 578 | ||
576 | if (list_empty(&tdc->pending_sg_req)) { | 579 | if (list_empty(&tdc->pending_sg_req)) { |
577 | dev_err(tdc2dev(tdc), "Dma is running without req\n"); | 580 | dev_err(tdc2dev(tdc), "DMA is running without req\n"); |
578 | tegra_dma_stop(tdc); | 581 | tegra_dma_stop(tdc); |
579 | return false; | 582 | return false; |
580 | } | 583 | } |
@@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | |||
587 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | 590 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); |
588 | if (!hsgreq->configured) { | 591 | if (!hsgreq->configured) { |
589 | tegra_dma_stop(tdc); | 592 | tegra_dma_stop(tdc); |
590 | dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); | 593 | dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n"); |
591 | tegra_dma_abort_all(tdc); | 594 | tegra_dma_abort_all(tdc); |
592 | return false; | 595 | return false; |
593 | } | 596 | } |
@@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | |||
636 | 639 | ||
637 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | 640 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); |
638 | dma_desc = sgreq->dma_desc; | 641 | dma_desc = sgreq->dma_desc; |
639 | dma_desc->bytes_transferred += sgreq->req_len; | 642 | /* if we dma for long enough the transfer count will wrap */ |
643 | dma_desc->bytes_transferred = | ||
644 | (dma_desc->bytes_transferred + sgreq->req_len) % | ||
645 | dma_desc->bytes_requested; | ||
640 | 646 | ||
641 | /* Callback need to be call */ | 647 | /* Callback need to be call */ |
642 | if (!dma_desc->cb_count) | 648 | if (!dma_desc->cb_count) |
@@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data) | |||
669 | dmaengine_desc_get_callback(&dma_desc->txd, &cb); | 675 | dmaengine_desc_get_callback(&dma_desc->txd, &cb); |
670 | cb_count = dma_desc->cb_count; | 676 | cb_count = dma_desc->cb_count; |
671 | dma_desc->cb_count = 0; | 677 | dma_desc->cb_count = 0; |
678 | trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count, | ||
679 | cb.callback); | ||
672 | spin_unlock_irqrestore(&tdc->lock, flags); | 680 | spin_unlock_irqrestore(&tdc->lock, flags); |
673 | while (cb_count--) | 681 | while (cb_count--) |
674 | dmaengine_desc_callback_invoke(&cb, NULL); | 682 | dmaengine_desc_callback_invoke(&cb, NULL); |
@@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id) | |||
685 | 693 | ||
686 | spin_lock_irqsave(&tdc->lock, flags); | 694 | spin_lock_irqsave(&tdc->lock, flags); |
687 | 695 | ||
696 | trace_tegra_dma_isr(&tdc->dma_chan, irq); | ||
688 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | 697 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
689 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | 698 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
690 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | 699 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); |
@@ -843,6 +852,7 @@ found: | |||
843 | dma_set_residue(txstate, residual); | 852 | dma_set_residue(txstate, residual); |
844 | } | 853 | } |
845 | 854 | ||
855 | trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate); | ||
846 | spin_unlock_irqrestore(&tdc->lock, flags); | 856 | spin_unlock_irqrestore(&tdc->lock, flags); |
847 | return ret; | 857 | return ret; |
848 | } | 858 | } |
@@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, | |||
919 | return 0; | 929 | return 0; |
920 | 930 | ||
921 | default: | 931 | default: |
922 | dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); | 932 | dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); |
923 | return -EINVAL; | 933 | return -EINVAL; |
924 | } | 934 | } |
925 | return -EINVAL; | 935 | return -EINVAL; |
@@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
952 | enum dma_slave_buswidth slave_bw; | 962 | enum dma_slave_buswidth slave_bw; |
953 | 963 | ||
954 | if (!tdc->config_init) { | 964 | if (!tdc->config_init) { |
955 | dev_err(tdc2dev(tdc), "dma channel is not configured\n"); | 965 | dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); |
956 | return NULL; | 966 | return NULL; |
957 | } | 967 | } |
958 | if (sg_len < 1) { | 968 | if (sg_len < 1) { |
@@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
985 | 995 | ||
986 | dma_desc = tegra_dma_desc_get(tdc); | 996 | dma_desc = tegra_dma_desc_get(tdc); |
987 | if (!dma_desc) { | 997 | if (!dma_desc) { |
988 | dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); | 998 | dev_err(tdc2dev(tdc), "DMA descriptors not available\n"); |
989 | return NULL; | 999 | return NULL; |
990 | } | 1000 | } |
991 | INIT_LIST_HEAD(&dma_desc->tx_list); | 1001 | INIT_LIST_HEAD(&dma_desc->tx_list); |
@@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
1005 | if ((len & 3) || (mem & 3) || | 1015 | if ((len & 3) || (mem & 3) || |
1006 | (len > tdc->tdma->chip_data->max_dma_count)) { | 1016 | (len > tdc->tdma->chip_data->max_dma_count)) { |
1007 | dev_err(tdc2dev(tdc), | 1017 | dev_err(tdc2dev(tdc), |
1008 | "Dma length/memory address is not supported\n"); | 1018 | "DMA length/memory address is not supported\n"); |
1009 | tegra_dma_desc_put(tdc, dma_desc); | 1019 | tegra_dma_desc_put(tdc, dma_desc); |
1010 | return NULL; | 1020 | return NULL; |
1011 | } | 1021 | } |
1012 | 1022 | ||
1013 | sg_req = tegra_dma_sg_req_get(tdc); | 1023 | sg_req = tegra_dma_sg_req_get(tdc); |
1014 | if (!sg_req) { | 1024 | if (!sg_req) { |
1015 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | 1025 | dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); |
1016 | tegra_dma_desc_put(tdc, dma_desc); | 1026 | tegra_dma_desc_put(tdc, dma_desc); |
1017 | return NULL; | 1027 | return NULL; |
1018 | } | 1028 | } |
@@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | |||
1087 | * terminating the DMA. | 1097 | * terminating the DMA. |
1088 | */ | 1098 | */ |
1089 | if (tdc->busy) { | 1099 | if (tdc->busy) { |
1090 | dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); | 1100 | dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n"); |
1091 | return NULL; | 1101 | return NULL; |
1092 | } | 1102 | } |
1093 | 1103 | ||
@@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | |||
1144 | while (remain_len) { | 1154 | while (remain_len) { |
1145 | sg_req = tegra_dma_sg_req_get(tdc); | 1155 | sg_req = tegra_dma_sg_req_get(tdc); |
1146 | if (!sg_req) { | 1156 | if (!sg_req) { |
1147 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | 1157 | dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); |
1148 | tegra_dma_desc_put(tdc, dma_desc); | 1158 | tegra_dma_desc_put(tdc, dma_desc); |
1149 | return NULL; | 1159 | return NULL; |
1150 | } | 1160 | } |
@@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1319 | return -ENODEV; | 1329 | return -ENODEV; |
1320 | } | 1330 | } |
1321 | 1331 | ||
1322 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | 1332 | tdma = devm_kzalloc(&pdev->dev, |
1323 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | 1333 | struct_size(tdma, channels, cdata->nr_channels), |
1334 | GFP_KERNEL); | ||
1324 | if (!tdma) | 1335 | if (!tdma) |
1325 | return -ENOMEM; | 1336 | return -ENOMEM; |
1326 | 1337 | ||
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b26256f23d67..5ec0dd97b397 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c | |||
@@ -678,8 +678,9 @@ static int tegra_adma_probe(struct platform_device *pdev) | |||
678 | return -ENODEV; | 678 | return -ENODEV; |
679 | } | 679 | } |
680 | 680 | ||
681 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | 681 | tdma = devm_kzalloc(&pdev->dev, |
682 | sizeof(struct tegra_adma_chan), GFP_KERNEL); | 682 | struct_size(tdma, channels, cdata->nr_channels), |
683 | GFP_KERNEL); | ||
683 | if (!tdma) | 684 | if (!tdma) |
684 | return -ENOMEM; | 685 | return -ENOMEM; |
685 | 686 | ||
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index fc0f9c8766a8..afbb1c95b721 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -643,8 +643,8 @@ static int td_probe(struct platform_device *pdev) | |||
643 | DRIVER_NAME)) | 643 | DRIVER_NAME)) |
644 | return -EBUSY; | 644 | return -EBUSY; |
645 | 645 | ||
646 | td = kzalloc(sizeof(struct timb_dma) + | 646 | td = kzalloc(struct_size(td, channels, pdata->nr_channels), |
647 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | 647 | GFP_KERNEL); |
648 | if (!td) { | 648 | if (!td) { |
649 | err = -ENOMEM; | 649 | err = -ENOMEM; |
650 | goto err_release_region; | 650 | goto err_release_region; |
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index cb20b411493e..c43c1a154604 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
@@ -86,6 +86,7 @@ | |||
86 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) | 86 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
87 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) | 87 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
88 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) | 88 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
89 | #define XILINX_DMA_DMASR_SG_MASK BIT(3) | ||
89 | #define XILINX_DMA_DMASR_IDLE BIT(1) | 90 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
90 | #define XILINX_DMA_DMASR_HALTED BIT(0) | 91 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
91 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) | 92 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
@@ -161,7 +162,9 @@ | |||
161 | #define XILINX_DMA_REG_BTT 0x28 | 162 | #define XILINX_DMA_REG_BTT 0x28 |
162 | 163 | ||
163 | /* AXI DMA Specific Masks/Bit fields */ | 164 | /* AXI DMA Specific Masks/Bit fields */ |
164 | #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) | 165 | #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 |
166 | #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 | ||
167 | #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 | ||
165 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) | 168 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) |
166 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) | 169 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) |
167 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 | 170 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 |
@@ -412,7 +415,6 @@ struct xilinx_dma_config { | |||
412 | * @dev: Device Structure | 415 | * @dev: Device Structure |
413 | * @common: DMA device structure | 416 | * @common: DMA device structure |
414 | * @chan: Driver specific DMA channel | 417 | * @chan: Driver specific DMA channel |
415 | * @has_sg: Specifies whether Scatter-Gather is present or not | ||
416 | * @mcdma: Specifies whether Multi-Channel is present or not | 418 | * @mcdma: Specifies whether Multi-Channel is present or not |
417 | * @flush_on_fsync: Flush on frame sync | 419 | * @flush_on_fsync: Flush on frame sync |
418 | * @ext_addr: Indicates 64 bit addressing is supported by dma device | 420 | * @ext_addr: Indicates 64 bit addressing is supported by dma device |
@@ -425,13 +427,13 @@ struct xilinx_dma_config { | |||
425 | * @rxs_clk: DMA s2mm stream clock | 427 | * @rxs_clk: DMA s2mm stream clock |
426 | * @nr_channels: Number of channels DMA device supports | 428 | * @nr_channels: Number of channels DMA device supports |
427 | * @chan_id: DMA channel identifier | 429 | * @chan_id: DMA channel identifier |
430 | * @max_buffer_len: Max buffer length | ||
428 | */ | 431 | */ |
429 | struct xilinx_dma_device { | 432 | struct xilinx_dma_device { |
430 | void __iomem *regs; | 433 | void __iomem *regs; |
431 | struct device *dev; | 434 | struct device *dev; |
432 | struct dma_device common; | 435 | struct dma_device common; |
433 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; | 436 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
434 | bool has_sg; | ||
435 | bool mcdma; | 437 | bool mcdma; |
436 | u32 flush_on_fsync; | 438 | u32 flush_on_fsync; |
437 | bool ext_addr; | 439 | bool ext_addr; |
@@ -444,6 +446,7 @@ struct xilinx_dma_device { | |||
444 | struct clk *rxs_clk; | 446 | struct clk *rxs_clk; |
445 | u32 nr_channels; | 447 | u32 nr_channels; |
446 | u32 chan_id; | 448 | u32 chan_id; |
449 | u32 max_buffer_len; | ||
447 | }; | 450 | }; |
448 | 451 | ||
449 | /* Macros */ | 452 | /* Macros */ |
@@ -960,6 +963,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
960 | } | 963 | } |
961 | 964 | ||
962 | /** | 965 | /** |
966 | * xilinx_dma_calc_copysize - Calculate the amount of data to copy | ||
967 | * @chan: Driver specific DMA channel | ||
968 | * @size: Total data that needs to be copied | ||
969 | * @done: Amount of data that has been already copied | ||
970 | * | ||
971 | * Return: Amount of data that has to be copied | ||
972 | */ | ||
973 | static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, | ||
974 | int size, int done) | ||
975 | { | ||
976 | size_t copy; | ||
977 | |||
978 | copy = min_t(size_t, size - done, | ||
979 | chan->xdev->max_buffer_len); | ||
980 | |||
981 | if ((copy + done < size) && | ||
982 | chan->xdev->common.copy_align) { | ||
983 | /* | ||
984 | * If this is not the last descriptor, make sure | ||
985 | * the next one will be properly aligned | ||
986 | */ | ||
987 | copy = rounddown(copy, | ||
988 | (1 << chan->xdev->common.copy_align)); | ||
989 | } | ||
990 | return copy; | ||
991 | } | ||
992 | |||
993 | /** | ||
963 | * xilinx_dma_tx_status - Get DMA transaction status | 994 | * xilinx_dma_tx_status - Get DMA transaction status |
964 | * @dchan: DMA channel | 995 | * @dchan: DMA channel |
965 | * @cookie: Transaction identifier | 996 | * @cookie: Transaction identifier |
@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, | |||
992 | list_for_each_entry(segment, &desc->segments, node) { | 1023 | list_for_each_entry(segment, &desc->segments, node) { |
993 | hw = &segment->hw; | 1024 | hw = &segment->hw; |
994 | residue += (hw->control - hw->status) & | 1025 | residue += (hw->control - hw->status) & |
995 | XILINX_DMA_MAX_TRANS_LEN; | 1026 | chan->xdev->max_buffer_len; |
996 | } | 1027 | } |
997 | } | 1028 | } |
998 | spin_unlock_irqrestore(&chan->lock, flags); | 1029 | spin_unlock_irqrestore(&chan->lock, flags); |
@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
1070 | struct xilinx_vdma_config *config = &chan->config; | 1101 | struct xilinx_vdma_config *config = &chan->config; |
1071 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; | 1102 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
1072 | u32 reg, j; | 1103 | u32 reg, j; |
1073 | struct xilinx_vdma_tx_segment *tail_segment; | 1104 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
1105 | int i = 0; | ||
1074 | 1106 | ||
1075 | /* This function was invoked with lock held */ | 1107 | /* This function was invoked with lock held */ |
1076 | if (chan->err) | 1108 | if (chan->err) |
@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
1087 | tail_desc = list_last_entry(&chan->pending_list, | 1119 | tail_desc = list_last_entry(&chan->pending_list, |
1088 | struct xilinx_dma_tx_descriptor, node); | 1120 | struct xilinx_dma_tx_descriptor, node); |
1089 | 1121 | ||
1090 | tail_segment = list_last_entry(&tail_desc->segments, | ||
1091 | struct xilinx_vdma_tx_segment, node); | ||
1092 | |||
1093 | /* | ||
1094 | * If hardware is idle, then all descriptors on the running lists are | ||
1095 | * done, start new transfers | ||
1096 | */ | ||
1097 | if (chan->has_sg) | ||
1098 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, | ||
1099 | desc->async_tx.phys); | ||
1100 | |||
1101 | /* Configure the hardware using info in the config structure */ | 1122 | /* Configure the hardware using info in the config structure */ |
1102 | if (chan->has_vflip) { | 1123 | if (chan->has_vflip) { |
1103 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); | 1124 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); |
@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
1114 | else | 1135 | else |
1115 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; | 1136 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
1116 | 1137 | ||
1117 | /* | 1138 | /* If not parking, enable circular mode */ |
1118 | * With SG, start with circular mode, so that BDs can be fetched. | ||
1119 | * In direct register mode, if not parking, enable circular mode | ||
1120 | */ | ||
1121 | if (chan->has_sg || !config->park) | ||
1122 | reg |= XILINX_DMA_DMACR_CIRC_EN; | ||
1123 | |||
1124 | if (config->park) | 1139 | if (config->park) |
1125 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; | 1140 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
1141 | else | ||
1142 | reg |= XILINX_DMA_DMACR_CIRC_EN; | ||
1126 | 1143 | ||
1127 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); | 1144 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
1128 | 1145 | ||
@@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
1144 | return; | 1161 | return; |
1145 | 1162 | ||
1146 | /* Start the transfer */ | 1163 | /* Start the transfer */ |
1147 | if (chan->has_sg) { | 1164 | if (chan->desc_submitcount < chan->num_frms) |
1148 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, | 1165 | i = chan->desc_submitcount; |
1149 | tail_segment->phys); | 1166 | |
1150 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1167 | list_for_each_entry(segment, &desc->segments, node) { |
1151 | chan->desc_pendingcount = 0; | 1168 | if (chan->ext_addr) |
1152 | } else { | 1169 | vdma_desc_write_64(chan, |
1153 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 1170 | XILINX_VDMA_REG_START_ADDRESS_64(i++), |
1154 | int i = 0; | 1171 | segment->hw.buf_addr, |
1155 | 1172 | segment->hw.buf_addr_msb); | |
1156 | if (chan->desc_submitcount < chan->num_frms) | 1173 | else |
1157 | i = chan->desc_submitcount; | 1174 | vdma_desc_write(chan, |
1158 | |||
1159 | list_for_each_entry(segment, &desc->segments, node) { | ||
1160 | if (chan->ext_addr) | ||
1161 | vdma_desc_write_64(chan, | ||
1162 | XILINX_VDMA_REG_START_ADDRESS_64(i++), | ||
1163 | segment->hw.buf_addr, | ||
1164 | segment->hw.buf_addr_msb); | ||
1165 | else | ||
1166 | vdma_desc_write(chan, | ||
1167 | XILINX_VDMA_REG_START_ADDRESS(i++), | 1175 | XILINX_VDMA_REG_START_ADDRESS(i++), |
1168 | segment->hw.buf_addr); | 1176 | segment->hw.buf_addr); |
1169 | 1177 | ||
1170 | last = segment; | 1178 | last = segment; |
1171 | } | 1179 | } |
1172 | 1180 | ||
1173 | if (!last) | 1181 | if (!last) |
1174 | return; | 1182 | return; |
1175 | 1183 | ||
1176 | /* HW expects these parameters to be same for one transaction */ | 1184 | /* HW expects these parameters to be same for one transaction */ |
1177 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); | 1185 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
1178 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, | 1186 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
1179 | last->hw.stride); | 1187 | last->hw.stride); |
1180 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); | 1188 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
1181 | 1189 | ||
1182 | chan->desc_submitcount++; | 1190 | chan->desc_submitcount++; |
1183 | chan->desc_pendingcount--; | 1191 | chan->desc_pendingcount--; |
1184 | list_del(&desc->node); | 1192 | list_del(&desc->node); |
1185 | list_add_tail(&desc->node, &chan->active_list); | 1193 | list_add_tail(&desc->node, &chan->active_list); |
1186 | if (chan->desc_submitcount == chan->num_frms) | 1194 | if (chan->desc_submitcount == chan->num_frms) |
1187 | chan->desc_submitcount = 0; | 1195 | chan->desc_submitcount = 0; |
1188 | } | ||
1189 | 1196 | ||
1190 | chan->idle = false; | 1197 | chan->idle = false; |
1191 | } | 1198 | } |
@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) | |||
1254 | 1261 | ||
1255 | /* Start the transfer */ | 1262 | /* Start the transfer */ |
1256 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1263 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
1257 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | 1264 | hw->control & chan->xdev->max_buffer_len); |
1258 | } | 1265 | } |
1259 | 1266 | ||
1260 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1267 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) | |||
1357 | 1364 | ||
1358 | /* Start the transfer */ | 1365 | /* Start the transfer */ |
1359 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1366 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
1360 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | 1367 | hw->control & chan->xdev->max_buffer_len); |
1361 | } | 1368 | } |
1362 | 1369 | ||
1363 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1370 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
1718 | struct xilinx_cdma_tx_segment *segment; | 1725 | struct xilinx_cdma_tx_segment *segment; |
1719 | struct xilinx_cdma_desc_hw *hw; | 1726 | struct xilinx_cdma_desc_hw *hw; |
1720 | 1727 | ||
1721 | if (!len || len > XILINX_DMA_MAX_TRANS_LEN) | 1728 | if (!len || len > chan->xdev->max_buffer_len) |
1722 | return NULL; | 1729 | return NULL; |
1723 | 1730 | ||
1724 | desc = xilinx_dma_alloc_tx_descriptor(chan); | 1731 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( | |||
1808 | * Calculate the maximum number of bytes to transfer, | 1815 | * Calculate the maximum number of bytes to transfer, |
1809 | * making sure it is less than the hw limit | 1816 | * making sure it is less than the hw limit |
1810 | */ | 1817 | */ |
1811 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | 1818 | copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), |
1812 | XILINX_DMA_MAX_TRANS_LEN); | 1819 | sg_used); |
1813 | hw = &segment->hw; | 1820 | hw = &segment->hw; |
1814 | 1821 | ||
1815 | /* Fill in the descriptor */ | 1822 | /* Fill in the descriptor */ |
@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( | |||
1913 | * Calculate the maximum number of bytes to transfer, | 1920 | * Calculate the maximum number of bytes to transfer, |
1914 | * making sure it is less than the hw limit | 1921 | * making sure it is less than the hw limit |
1915 | */ | 1922 | */ |
1916 | copy = min_t(size_t, period_len - sg_used, | 1923 | copy = xilinx_dma_calc_copysize(chan, period_len, |
1917 | XILINX_DMA_MAX_TRANS_LEN); | 1924 | sg_used); |
1918 | hw = &segment->hw; | 1925 | hw = &segment->hw; |
1919 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, | 1926 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, |
1920 | period_len * i); | 1927 | period_len * i); |
@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, | |||
2389 | 2396 | ||
2390 | chan->dev = xdev->dev; | 2397 | chan->dev = xdev->dev; |
2391 | chan->xdev = xdev; | 2398 | chan->xdev = xdev; |
2392 | chan->has_sg = xdev->has_sg; | ||
2393 | chan->desc_pendingcount = 0x0; | 2399 | chan->desc_pendingcount = 0x0; |
2394 | chan->ext_addr = xdev->ext_addr; | 2400 | chan->ext_addr = xdev->ext_addr; |
2395 | /* This variable ensures that descriptors are not | 2401 | /* This variable ensures that descriptors are not |
@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, | |||
2489 | chan->stop_transfer = xilinx_dma_stop_transfer; | 2495 | chan->stop_transfer = xilinx_dma_stop_transfer; |
2490 | } | 2496 | } |
2491 | 2497 | ||
2498 | /* check if SG is enabled (only for AXIDMA and CDMA) */ | ||
2499 | if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { | ||
2500 | if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & | ||
2501 | XILINX_DMA_DMASR_SG_MASK) | ||
2502 | chan->has_sg = true; | ||
2503 | dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, | ||
2504 | chan->has_sg ? "enabled" : "disabled"); | ||
2505 | } | ||
2506 | |||
2492 | /* Initialize the tasklet */ | 2507 | /* Initialize the tasklet */ |
2493 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, | 2508 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
2494 | (unsigned long)chan); | 2509 | (unsigned long)chan); |
@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) | |||
2596 | struct xilinx_dma_device *xdev; | 2611 | struct xilinx_dma_device *xdev; |
2597 | struct device_node *child, *np = pdev->dev.of_node; | 2612 | struct device_node *child, *np = pdev->dev.of_node; |
2598 | struct resource *io; | 2613 | struct resource *io; |
2599 | u32 num_frames, addr_width; | 2614 | u32 num_frames, addr_width, len_width; |
2600 | int i, err; | 2615 | int i, err; |
2601 | 2616 | ||
2602 | /* Allocate and initialize the DMA engine structure */ | 2617 | /* Allocate and initialize the DMA engine structure */ |
@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev) | |||
2627 | return PTR_ERR(xdev->regs); | 2642 | return PTR_ERR(xdev->regs); |
2628 | 2643 | ||
2629 | /* Retrieve the DMA engine properties from the device tree */ | 2644 | /* Retrieve the DMA engine properties from the device tree */ |
2630 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | 2645 | xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); |
2631 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) | 2646 | |
2647 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
2632 | xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); | 2648 | xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); |
2649 | if (!of_property_read_u32(node, "xlnx,sg-length-width", | ||
2650 | &len_width)) { | ||
2651 | if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || | ||
2652 | len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { | ||
2653 | dev_warn(xdev->dev, | ||
2654 | "invalid xlnx,sg-length-width property value. Using default width\n"); | ||
2655 | } else { | ||
2656 | if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) | ||
2657 | dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); | ||
2658 | xdev->max_buffer_len = | ||
2659 | GENMASK(len_width - 1, 0); | ||
2660 | } | ||
2661 | } | ||
2662 | } | ||
2633 | 2663 | ||
2634 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { | 2664 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
2635 | err = of_property_read_u32(node, "xlnx,num-fstores", | 2665 | err = of_property_read_u32(node, "xlnx,num-fstores", |
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 98dbc796353f..53ca9ba6ab4b 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c | |||
@@ -153,7 +153,6 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) | |||
153 | #ifdef CONFIG_SERIAL_8250_DMA | 153 | #ifdef CONFIG_SERIAL_8250_DMA |
154 | static const struct dw_dma_platform_data qrk_serial_dma_pdata = { | 154 | static const struct dw_dma_platform_data qrk_serial_dma_pdata = { |
155 | .nr_channels = 2, | 155 | .nr_channels = 2, |
156 | .is_private = true, | ||
157 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | 156 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, |
158 | .chan_priority = CHAN_PRIORITY_ASCENDING, | 157 | .chan_priority = CHAN_PRIORITY_ASCENDING, |
159 | .block_size = 4095, | 158 | .block_size = 4095, |
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index e166cac8e870..9752f3745f76 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller | 3 | * Driver for the Synopsys DesignWare DMA Controller |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Atmel Corporation | 5 | * Copyright (C) 2007 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
6 | * Copyright (C) 2014 Intel Corporation | 7 | * Copyright (C) 2014 Intel Corporation |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | 8 | */ |
12 | #ifndef _DMA_DW_H | 9 | #ifndef _DMA_DW_H |
13 | #define _DMA_DW_H | 10 | #define _DMA_DW_H |
@@ -45,9 +42,13 @@ struct dw_dma_chip { | |||
45 | #if IS_ENABLED(CONFIG_DW_DMAC_CORE) | 42 | #if IS_ENABLED(CONFIG_DW_DMAC_CORE) |
46 | int dw_dma_probe(struct dw_dma_chip *chip); | 43 | int dw_dma_probe(struct dw_dma_chip *chip); |
47 | int dw_dma_remove(struct dw_dma_chip *chip); | 44 | int dw_dma_remove(struct dw_dma_chip *chip); |
45 | int idma32_dma_probe(struct dw_dma_chip *chip); | ||
46 | int idma32_dma_remove(struct dw_dma_chip *chip); | ||
48 | #else | 47 | #else |
49 | static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } | 48 | static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } |
50 | static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } | 49 | static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } |
50 | static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } | ||
51 | static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; } | ||
51 | #endif /* CONFIG_DW_DMAC_CORE */ | 52 | #endif /* CONFIG_DW_DMAC_CORE */ |
52 | 53 | ||
53 | #endif /* _DMA_DW_H */ | 54 | #endif /* _DMA_DW_H */ |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 1a1d58ebffbf..f3eaf9ec00a1 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller | 3 | * Driver for the Synopsys DesignWare DMA Controller |
3 | * | 4 | * |
4 | * Copyright (C) 2007 Atmel Corporation | 5 | * Copyright (C) 2007 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | 7 | */ |
11 | #ifndef _PLATFORM_DATA_DMA_DW_H | 8 | #ifndef _PLATFORM_DATA_DMA_DW_H |
12 | #define _PLATFORM_DATA_DMA_DW_H | 9 | #define _PLATFORM_DATA_DMA_DW_H |
@@ -38,10 +35,6 @@ struct dw_dma_slave { | |||
38 | /** | 35 | /** |
39 | * struct dw_dma_platform_data - Controller configuration parameters | 36 | * struct dw_dma_platform_data - Controller configuration parameters |
40 | * @nr_channels: Number of channels supported by hardware (max 8) | 37 | * @nr_channels: Number of channels supported by hardware (max 8) |
41 | * @is_private: The device channels should be marked as private and not for | ||
42 | * by the general purpose DMA channel allocator. | ||
43 | * @is_memcpy: The device channels do support memory-to-memory transfers. | ||
44 | * @is_idma32: The type of the DMA controller is iDMA32 | ||
45 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | 38 | * @chan_allocation_order: Allocate channels starting from 0 or 7 |
46 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | 39 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. |
47 | * @block_size: Maximum block size supported by the controller | 40 | * @block_size: Maximum block size supported by the controller |
@@ -53,9 +46,6 @@ struct dw_dma_slave { | |||
53 | */ | 46 | */ |
54 | struct dw_dma_platform_data { | 47 | struct dw_dma_platform_data { |
55 | unsigned int nr_channels; | 48 | unsigned int nr_channels; |
56 | bool is_private; | ||
57 | bool is_memcpy; | ||
58 | bool is_idma32; | ||
59 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | 49 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ |
60 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | 50 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ |
61 | unsigned char chan_allocation_order; | 51 | unsigned char chan_allocation_order; |
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 7d964e787299..9daea8d42a10 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h | |||
@@ -55,6 +55,7 @@ struct imx_dma_data { | |||
55 | int dma_request2; /* secondary DMA request line */ | 55 | int dma_request2; /* secondary DMA request line */ |
56 | enum sdma_peripheral_type peripheral_type; | 56 | enum sdma_peripheral_type peripheral_type; |
57 | int priority; | 57 | int priority; |
58 | struct device_node *of_node; | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | static inline int imx_dma_is_ipu(struct dma_chan *chan) | 61 | static inline int imx_dma_is_ipu(struct dma_chan *chan) |
diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h new file mode 100644 index 000000000000..0818f6286110 --- /dev/null +++ b/include/trace/events/tegra_apb_dma.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_TEGRA_APM_DMA_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/dmaengine.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM tegra_apb_dma | ||
9 | |||
10 | TRACE_EVENT(tegra_dma_tx_status, | ||
11 | TP_PROTO(struct dma_chan *dc, dma_cookie_t cookie, struct dma_tx_state *state), | ||
12 | TP_ARGS(dc, cookie, state), | ||
13 | TP_STRUCT__entry( | ||
14 | __string(chan, dev_name(&dc->dev->device)) | ||
15 | __field(dma_cookie_t, cookie) | ||
16 | __field(__u32, residue) | ||
17 | ), | ||
18 | TP_fast_assign( | ||
19 | __assign_str(chan, dev_name(&dc->dev->device)); | ||
20 | __entry->cookie = cookie; | ||
21 | __entry->residue = state ? state->residue : (u32)-1; | ||
22 | ), | ||
23 | TP_printk("channel %s: dma cookie %d, residue %u", | ||
24 | __get_str(chan), __entry->cookie, __entry->residue) | ||
25 | ); | ||
26 | |||
27 | TRACE_EVENT(tegra_dma_complete_cb, | ||
28 | TP_PROTO(struct dma_chan *dc, int count, void *ptr), | ||
29 | TP_ARGS(dc, count, ptr), | ||
30 | TP_STRUCT__entry( | ||
31 | __string(chan, dev_name(&dc->dev->device)) | ||
32 | __field(int, count) | ||
33 | __field(void *, ptr) | ||
34 | ), | ||
35 | TP_fast_assign( | ||
36 | __assign_str(chan, dev_name(&dc->dev->device)); | ||
37 | __entry->count = count; | ||
38 | __entry->ptr = ptr; | ||
39 | ), | ||
40 | TP_printk("channel %s: done %d, ptr %p", | ||
41 | __get_str(chan), __entry->count, __entry->ptr) | ||
42 | ); | ||
43 | |||
44 | TRACE_EVENT(tegra_dma_isr, | ||
45 | TP_PROTO(struct dma_chan *dc, int irq), | ||
46 | TP_ARGS(dc, irq), | ||
47 | TP_STRUCT__entry( | ||
48 | __string(chan, dev_name(&dc->dev->device)) | ||
49 | __field(int, irq) | ||
50 | ), | ||
51 | TP_fast_assign( | ||
52 | __assign_str(chan, dev_name(&dc->dev->device)); | ||
53 | __entry->irq = irq; | ||
54 | ), | ||
55 | TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) | ||
56 | ); | ||
57 | |||
58 | #endif /* _TRACE_TEGRADMA_H */ | ||
59 | |||
60 | /* This part must be outside protection */ | ||
61 | #include <trace/define_trace.h> | ||