diff options
40 files changed, 1728 insertions, 1206 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-ioatdma b/Documentation/ABI/stable/sysfs-driver-dma-ioatdma new file mode 100644 index 000000000000..420c1d09e42f --- /dev/null +++ b/Documentation/ABI/stable/sysfs-driver-dma-ioatdma | |||
@@ -0,0 +1,30 @@ | |||
1 | What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap | ||
2 | Date: December 3, 2009 | ||
3 | KernelVersion: 2.6.32 | ||
4 | Contact: dmaengine@vger.kernel.org | ||
5 | Description: Capabilities the DMA supports.Currently there are DMA_PQ, DMA_PQ_VAL, | ||
6 | DMA_XOR,DMA_XOR_VAL,DMA_INTERRUPT. | ||
7 | |||
8 | What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active | ||
9 | Date: December 3, 2009 | ||
10 | KernelVersion: 2.6.32 | ||
11 | Contact: dmaengine@vger.kernel.org | ||
12 | Description: The number of descriptors active in the ring. | ||
13 | |||
14 | What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size | ||
15 | Date: December 3, 2009 | ||
16 | KernelVersion: 2.6.32 | ||
17 | Contact: dmaengine@vger.kernel.org | ||
18 | Description: Descriptor ring size, total number of descriptors available. | ||
19 | |||
20 | What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version | ||
21 | Date: December 3, 2009 | ||
22 | KernelVersion: 2.6.32 | ||
23 | Contact: dmaengine@vger.kernel.org | ||
24 | Description: Version of ioatdma device. | ||
25 | |||
26 | What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce | ||
27 | Date: August 8, 2017 | ||
28 | KernelVersion: 4.14 | ||
29 | Contact: dmaengine@vger.kernel.org | ||
30 | Description: Tune-able interrupt delay value per channel basis. | ||
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 79a204d50234..891db41e9420 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | |||
@@ -25,6 +25,7 @@ Required Properties: | |||
25 | - "renesas,dmac-r8a7794" (R-Car E2) | 25 | - "renesas,dmac-r8a7794" (R-Car E2) |
26 | - "renesas,dmac-r8a7795" (R-Car H3) | 26 | - "renesas,dmac-r8a7795" (R-Car H3) |
27 | - "renesas,dmac-r8a7796" (R-Car M3-W) | 27 | - "renesas,dmac-r8a7796" (R-Car M3-W) |
28 | - "renesas,dmac-r8a77970" (R-Car V3M) | ||
28 | 29 | ||
29 | - reg: base address and length of the registers block for the DMAC | 30 | - reg: base address and length of the registers block for the DMAC |
30 | 31 | ||
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt index e7780a186a36..1be6941ac1e5 100644 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt | |||
@@ -8,6 +8,7 @@ Required Properties: | |||
8 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) | 8 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) |
9 | - "renesas,r8a7794-usb-dmac" (R-Car E2) | 9 | - "renesas,r8a7794-usb-dmac" (R-Car E2) |
10 | - "renesas,r8a7795-usb-dmac" (R-Car H3) | 10 | - "renesas,r8a7795-usb-dmac" (R-Car H3) |
11 | - "renesas,r8a7796-usb-dmac" (R-Car M3-W) | ||
11 | - reg: base address and length of the registers block for the DMAC | 12 | - reg: base address and length of the registers block for the DMAC |
12 | - interrupts: interrupt specifiers for the DMAC, one for each entry in | 13 | - interrupts: interrupt specifiers for the DMAC, one for each entry in |
13 | interrupt-names. | 14 | interrupt-names. |
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt index 6b267045f522..98fbe1a5c6dd 100644 --- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt | |||
@@ -9,6 +9,7 @@ Required properties: | |||
9 | "allwinner,sun8i-a23-dma" | 9 | "allwinner,sun8i-a23-dma" |
10 | "allwinner,sun8i-a83t-dma" | 10 | "allwinner,sun8i-a83t-dma" |
11 | "allwinner,sun8i-h3-dma" | 11 | "allwinner,sun8i-h3-dma" |
12 | "allwinner,sun8i-v3s-dma" | ||
12 | - reg: Should contain the registers base address and length | 13 | - reg: Should contain the registers base address and length |
13 | - interrupts: Should contain a reference to the interrupt used by this device | 14 | - interrupts: Should contain a reference to the interrupt used by this device |
14 | - clocks: Should contain a reference to the parent AHB clock | 15 | - clocks: Should contain a reference to the parent AHB clock |
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index e33bc1c8ed2c..5dbe054a40ad 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt | |||
@@ -181,13 +181,6 @@ Currently, the types available are: | |||
181 | - Used by the client drivers to register a callback that will be | 181 | - Used by the client drivers to register a callback that will be |
182 | called on a regular basis through the DMA controller interrupt | 182 | called on a regular basis through the DMA controller interrupt |
183 | 183 | ||
184 | * DMA_SG | ||
185 | - The device supports memory to memory scatter-gather | ||
186 | transfers. | ||
187 | - Even though a plain memcpy can look like a particular case of a | ||
188 | scatter-gather transfer, with a single chunk to transfer, it's a | ||
189 | distinct transaction type in the mem2mem transfers case | ||
190 | |||
191 | * DMA_PRIVATE | 184 | * DMA_PRIVATE |
192 | - The devices only supports slave transfers, and as such isn't | 185 | - The devices only supports slave transfers, and as such isn't |
193 | available for async transfers. | 186 | available for async transfers. |
@@ -395,6 +388,13 @@ where to put them) | |||
395 | when DMA_CTRL_REUSE is already set | 388 | when DMA_CTRL_REUSE is already set |
396 | - Terminating the channel | 389 | - Terminating the channel |
397 | 390 | ||
391 | * DMA_PREP_CMD | ||
392 | - If set, the client driver tells DMA controller that passed data in DMA | ||
393 | API is command data. | ||
394 | - Interpretation of command data is DMA controller specific. It can be | ||
395 | used for issuing commands to other peripherals/register reads/register | ||
396 | writes for which the descriptor should be in different format from | ||
397 | normal data descriptors. | ||
398 | 398 | ||
399 | General Design Notes | 399 | General Design Notes |
400 | -------------------- | 400 | -------------------- |
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 901343dd513e..d608043c0280 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
@@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( | |||
502 | return &desc->tx_desc; | 502 | return &desc->tx_desc; |
503 | } | 503 | } |
504 | 504 | ||
505 | static struct dma_async_tx_descriptor *ccp_prep_dma_sg( | ||
506 | struct dma_chan *dma_chan, struct scatterlist *dst_sg, | ||
507 | unsigned int dst_nents, struct scatterlist *src_sg, | ||
508 | unsigned int src_nents, unsigned long flags) | ||
509 | { | ||
510 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
511 | dma_chan); | ||
512 | struct ccp_dma_desc *desc; | ||
513 | |||
514 | dev_dbg(chan->ccp->dev, | ||
515 | "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n", | ||
516 | __func__, src_sg, src_nents, dst_sg, dst_nents, flags); | ||
517 | |||
518 | desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents, | ||
519 | flags); | ||
520 | if (!desc) | ||
521 | return NULL; | ||
522 | |||
523 | return &desc->tx_desc; | ||
524 | } | ||
525 | |||
526 | static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( | 505 | static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( |
527 | struct dma_chan *dma_chan, unsigned long flags) | 506 | struct dma_chan *dma_chan, unsigned long flags) |
528 | { | 507 | { |
@@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | |||
704 | dma_dev->directions = DMA_MEM_TO_MEM; | 683 | dma_dev->directions = DMA_MEM_TO_MEM; |
705 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | 684 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
706 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | 685 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
707 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | ||
708 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | 686 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); |
709 | 687 | ||
710 | /* The DMA channels for this device can be set to public or private, | 688 | /* The DMA channels for this device can be set to public or private, |
@@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | |||
740 | 718 | ||
741 | dma_dev->device_free_chan_resources = ccp_free_chan_resources; | 719 | dma_dev->device_free_chan_resources = ccp_free_chan_resources; |
742 | dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; | 720 | dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; |
743 | dma_dev->device_prep_dma_sg = ccp_prep_dma_sg; | ||
744 | dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; | 721 | dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; |
745 | dma_dev->device_issue_pending = ccp_issue_pending; | 722 | dma_dev->device_issue_pending = ccp_issue_pending; |
746 | dma_dev->device_tx_status = ccp_tx_status; | 723 | dma_dev->device_tx_status = ccp_tx_status; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fa8f9c07ce73..fadc4d8783bd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -56,6 +56,12 @@ config DMA_OF | |||
56 | select DMA_ENGINE | 56 | select DMA_ENGINE |
57 | 57 | ||
58 | #devices | 58 | #devices |
59 | config ALTERA_MSGDMA | ||
60 | tristate "Altera / Intel mSGDMA Engine" | ||
61 | select DMA_ENGINE | ||
62 | help | ||
63 | Enable support for Altera / Intel mSGDMA controller. | ||
64 | |||
59 | config AMBA_PL08X | 65 | config AMBA_PL08X |
60 | bool "ARM PrimeCell PL080 or PL081 support" | 66 | bool "ARM PrimeCell PL080 or PL081 support" |
61 | depends on ARM_AMBA | 67 | depends on ARM_AMBA |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index d12ab2985ed1..f08f8de1b567 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -12,6 +12,7 @@ obj-$(CONFIG_DMA_OF) += of-dma.o | |||
12 | obj-$(CONFIG_DMATEST) += dmatest.o | 12 | obj-$(CONFIG_DMATEST) += dmatest.o |
13 | 13 | ||
14 | #devices | 14 | #devices |
15 | obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o | ||
15 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 16 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
16 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 17 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
17 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 18 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c new file mode 100644 index 000000000000..32905d5606ac --- /dev/null +++ b/drivers/dma/altera-msgdma.c | |||
@@ -0,0 +1,927 @@ | |||
1 | /* | ||
2 | * DMA driver for Altera mSGDMA IP core | ||
3 | * | ||
4 | * Copyright (C) 2017 Stefan Roese <sr@denx.de> | ||
5 | * | ||
6 | * Based on drivers/dma/xilinx/zynqmp_dma.c, which is: | ||
7 | * Copyright (C) 2016 Xilinx, Inc. All rights reserved. | ||
8 | * | ||
9 | * This program is free software: you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation, either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/iopoll.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | #include "dmaengine.h" | ||
28 | |||
29 | #define MSGDMA_MAX_TRANS_LEN U32_MAX | ||
30 | #define MSGDMA_DESC_NUM 1024 | ||
31 | |||
32 | /** | ||
33 | * struct msgdma_extended_desc - implements an extended descriptor | ||
34 | * @read_addr_lo: data buffer source address low bits | ||
35 | * @write_addr_lo: data buffer destination address low bits | ||
36 | * @len: the number of bytes to transfer per descriptor | ||
37 | * @burst_seq_num: bit 31:24 write burst | ||
38 | * bit 23:16 read burst | ||
39 | * bit 15:00 sequence number | ||
40 | * @stride: bit 31:16 write stride | ||
41 | * bit 15:00 read stride | ||
42 | * @read_addr_hi: data buffer source address high bits | ||
43 | * @write_addr_hi: data buffer destination address high bits | ||
44 | * @control: characteristics of the transfer | ||
45 | */ | ||
46 | struct msgdma_extended_desc { | ||
47 | u32 read_addr_lo; | ||
48 | u32 write_addr_lo; | ||
49 | u32 len; | ||
50 | u32 burst_seq_num; | ||
51 | u32 stride; | ||
52 | u32 read_addr_hi; | ||
53 | u32 write_addr_hi; | ||
54 | u32 control; | ||
55 | }; | ||
56 | |||
57 | /* mSGDMA descriptor control field bit definitions */ | ||
58 | #define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff) | ||
59 | #define MSGDMA_DESC_CTL_GEN_SOP BIT(8) | ||
60 | #define MSGDMA_DESC_CTL_GEN_EOP BIT(9) | ||
61 | #define MSGDMA_DESC_CTL_PARK_READS BIT(10) | ||
62 | #define MSGDMA_DESC_CTL_PARK_WRITES BIT(11) | ||
63 | #define MSGDMA_DESC_CTL_END_ON_EOP BIT(12) | ||
64 | #define MSGDMA_DESC_CTL_END_ON_LEN BIT(13) | ||
65 | #define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14) | ||
66 | #define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15) | ||
67 | #define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16) | ||
68 | #define MSGDMA_DESC_CTL_EARLY_DONE BIT(24) | ||
69 | |||
70 | /* | ||
71 | * Writing "1" the "go" bit commits the entire descriptor into the | ||
72 | * descriptor FIFO(s) | ||
73 | */ | ||
74 | #define MSGDMA_DESC_CTL_GO BIT(31) | ||
75 | |||
76 | /* Tx buffer control flags */ | ||
77 | #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ | ||
78 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
79 | MSGDMA_DESC_CTL_GO) | ||
80 | |||
81 | #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
82 | MSGDMA_DESC_CTL_GO) | ||
83 | |||
84 | #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ | ||
85 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ | ||
86 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
87 | MSGDMA_DESC_CTL_GO) | ||
88 | |||
89 | #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ | ||
90 | MSGDMA_DESC_CTL_GEN_EOP | \ | ||
91 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ | ||
92 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
93 | MSGDMA_DESC_CTL_GO) | ||
94 | |||
95 | #define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \ | ||
96 | MSGDMA_DESC_CTL_END_ON_LEN | \ | ||
97 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ | ||
98 | MSGDMA_DESC_CTL_EARLY_IRQ | \ | ||
99 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
100 | MSGDMA_DESC_CTL_GO) | ||
101 | |||
102 | /* mSGDMA extended descriptor stride definitions */ | ||
103 | #define MSGDMA_DESC_STRIDE_RD 0x00000001 | ||
104 | #define MSGDMA_DESC_STRIDE_WR 0x00010000 | ||
105 | #define MSGDMA_DESC_STRIDE_RW 0x00010001 | ||
106 | |||
107 | /* mSGDMA dispatcher control and status register map */ | ||
108 | #define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */ | ||
109 | #define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */ | ||
110 | #define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */ | ||
111 | /* 15:00 - read fill level */ | ||
112 | #define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */ | ||
113 | #define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */ | ||
114 | /* 15:00 - read seq number */ | ||
115 | |||
116 | /* mSGDMA CSR status register bit definitions */ | ||
117 | #define MSGDMA_CSR_STAT_BUSY BIT(0) | ||
118 | #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1) | ||
119 | #define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2) | ||
120 | #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3) | ||
121 | #define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4) | ||
122 | #define MSGDMA_CSR_STAT_STOPPED BIT(5) | ||
123 | #define MSGDMA_CSR_STAT_RESETTING BIT(6) | ||
124 | #define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7) | ||
125 | #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8) | ||
126 | #define MSGDMA_CSR_STAT_IRQ BIT(9) | ||
127 | #define MSGDMA_CSR_STAT_MASK GENMASK(9, 0) | ||
128 | #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0) | ||
129 | |||
130 | #define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \ | ||
131 | MSGDMA_CSR_STAT_RESP_BUF_EMPTY) | ||
132 | |||
133 | /* mSGDMA CSR control register bit definitions */ | ||
134 | #define MSGDMA_CSR_CTL_STOP BIT(0) | ||
135 | #define MSGDMA_CSR_CTL_RESET BIT(1) | ||
136 | #define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2) | ||
137 | #define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3) | ||
138 | #define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4) | ||
139 | #define MSGDMA_CSR_CTL_STOP_DESCS BIT(5) | ||
140 | |||
141 | /* mSGDMA CSR fill level bits */ | ||
142 | #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16) | ||
143 | #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) | ||
144 | #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) | ||
145 | |||
146 | #define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16) | ||
147 | |||
148 | /* mSGDMA response register map */ | ||
149 | #define MSGDMA_RESP_BYTES_TRANSFERRED 0x00 | ||
150 | #define MSGDMA_RESP_STATUS 0x04 | ||
151 | |||
152 | /* mSGDMA response register bit definitions */ | ||
153 | #define MSGDMA_RESP_EARLY_TERM BIT(8) | ||
154 | #define MSGDMA_RESP_ERR_MASK 0xff | ||
155 | |||
156 | /** | ||
157 | * struct msgdma_sw_desc - implements a sw descriptor | ||
158 | * @async_tx: support for the async_tx api | ||
159 | * @hw_desc: assosiated HW descriptor | ||
160 | * @free_list: node of the free SW descriprots list | ||
161 | */ | ||
162 | struct msgdma_sw_desc { | ||
163 | struct dma_async_tx_descriptor async_tx; | ||
164 | struct msgdma_extended_desc hw_desc; | ||
165 | struct list_head node; | ||
166 | struct list_head tx_list; | ||
167 | }; | ||
168 | |||
169 | /** | ||
170 | * struct msgdma_device - DMA device structure | ||
171 | */ | ||
172 | struct msgdma_device { | ||
173 | spinlock_t lock; | ||
174 | struct device *dev; | ||
175 | struct tasklet_struct irq_tasklet; | ||
176 | struct list_head pending_list; | ||
177 | struct list_head free_list; | ||
178 | struct list_head active_list; | ||
179 | struct list_head done_list; | ||
180 | u32 desc_free_cnt; | ||
181 | bool idle; | ||
182 | |||
183 | struct dma_device dmadev; | ||
184 | struct dma_chan dmachan; | ||
185 | dma_addr_t hw_desq; | ||
186 | struct msgdma_sw_desc *sw_desq; | ||
187 | unsigned int npendings; | ||
188 | |||
189 | struct dma_slave_config slave_cfg; | ||
190 | |||
191 | int irq; | ||
192 | |||
193 | /* mSGDMA controller */ | ||
194 | void __iomem *csr; | ||
195 | |||
196 | /* mSGDMA descriptors */ | ||
197 | void __iomem *desc; | ||
198 | |||
199 | /* mSGDMA response */ | ||
200 | void __iomem *resp; | ||
201 | }; | ||
202 | |||
203 | #define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan) | ||
204 | #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx) | ||
205 | |||
206 | /** | ||
207 | * msgdma_get_descriptor - Get the sw descriptor from the pool | ||
208 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
209 | * | ||
210 | * Return: The sw descriptor | ||
211 | */ | ||
212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) | ||
213 | { | ||
214 | struct msgdma_sw_desc *desc; | ||
215 | |||
216 | spin_lock_bh(&mdev->lock); | ||
217 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); | ||
218 | list_del(&desc->node); | ||
219 | spin_unlock_bh(&mdev->lock); | ||
220 | |||
221 | INIT_LIST_HEAD(&desc->tx_list); | ||
222 | |||
223 | return desc; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * msgdma_free_descriptor - Issue pending transactions | ||
228 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
229 | * @desc: Transaction descriptor pointer | ||
230 | */ | ||
231 | static void msgdma_free_descriptor(struct msgdma_device *mdev, | ||
232 | struct msgdma_sw_desc *desc) | ||
233 | { | ||
234 | struct msgdma_sw_desc *child, *next; | ||
235 | |||
236 | mdev->desc_free_cnt++; | ||
237 | list_add_tail(&desc->node, &mdev->free_list); | ||
238 | list_for_each_entry_safe(child, next, &desc->tx_list, node) { | ||
239 | mdev->desc_free_cnt++; | ||
240 | list_move_tail(&child->node, &mdev->free_list); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * msgdma_free_desc_list - Free descriptors list | ||
246 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
247 | * @list: List to parse and delete the descriptor | ||
248 | */ | ||
249 | static void msgdma_free_desc_list(struct msgdma_device *mdev, | ||
250 | struct list_head *list) | ||
251 | { | ||
252 | struct msgdma_sw_desc *desc, *next; | ||
253 | |||
254 | list_for_each_entry_safe(desc, next, list, node) | ||
255 | msgdma_free_descriptor(mdev, desc); | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * msgdma_desc_config - Configure the descriptor | ||
260 | * @desc: Hw descriptor pointer | ||
261 | * @dst: Destination buffer address | ||
262 | * @src: Source buffer address | ||
263 | * @len: Transfer length | ||
264 | */ | ||
265 | static void msgdma_desc_config(struct msgdma_extended_desc *desc, | ||
266 | dma_addr_t dst, dma_addr_t src, size_t len, | ||
267 | u32 stride) | ||
268 | { | ||
269 | /* Set lower 32bits of src & dst addresses in the descriptor */ | ||
270 | desc->read_addr_lo = lower_32_bits(src); | ||
271 | desc->write_addr_lo = lower_32_bits(dst); | ||
272 | |||
273 | /* Set upper 32bits of src & dst addresses in the descriptor */ | ||
274 | desc->read_addr_hi = upper_32_bits(src); | ||
275 | desc->write_addr_hi = upper_32_bits(dst); | ||
276 | |||
277 | desc->len = len; | ||
278 | desc->stride = stride; | ||
279 | desc->burst_seq_num = 0; /* 0 will result in max burst length */ | ||
280 | |||
281 | /* | ||
282 | * Don't set interrupt on xfer end yet, this will be done later | ||
283 | * for the "last" descriptor | ||
284 | */ | ||
285 | desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO | | ||
286 | MSGDMA_DESC_CTL_END_ON_LEN; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * msgdma_desc_config_eod - Mark the descriptor as end descriptor | ||
291 | * @desc: Hw descriptor pointer | ||
292 | */ | ||
293 | static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc) | ||
294 | { | ||
295 | desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * msgdma_tx_submit - Submit DMA transaction | ||
300 | * @tx: Async transaction descriptor pointer | ||
301 | * | ||
302 | * Return: cookie value | ||
303 | */ | ||
304 | static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
305 | { | ||
306 | struct msgdma_device *mdev = to_mdev(tx->chan); | ||
307 | struct msgdma_sw_desc *new; | ||
308 | dma_cookie_t cookie; | ||
309 | |||
310 | new = tx_to_desc(tx); | ||
311 | spin_lock_bh(&mdev->lock); | ||
312 | cookie = dma_cookie_assign(tx); | ||
313 | |||
314 | list_add_tail(&new->node, &mdev->pending_list); | ||
315 | spin_unlock_bh(&mdev->lock); | ||
316 | |||
317 | return cookie; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * msgdma_prep_memcpy - prepare descriptors for memcpy transaction | ||
322 | * @dchan: DMA channel | ||
323 | * @dma_dst: Destination buffer address | ||
324 | * @dma_src: Source buffer address | ||
325 | * @len: Transfer length | ||
326 | * @flags: transfer ack flags | ||
327 | * | ||
328 | * Return: Async transaction descriptor on success and NULL on failure | ||
329 | */ | ||
330 | static struct dma_async_tx_descriptor * | ||
331 | msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | ||
332 | dma_addr_t dma_src, size_t len, ulong flags) | ||
333 | { | ||
334 | struct msgdma_device *mdev = to_mdev(dchan); | ||
335 | struct msgdma_sw_desc *new, *first = NULL; | ||
336 | struct msgdma_extended_desc *desc; | ||
337 | size_t copy; | ||
338 | u32 desc_cnt; | ||
339 | |||
340 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); | ||
341 | |||
342 | spin_lock_bh(&mdev->lock); | ||
343 | if (desc_cnt > mdev->desc_free_cnt) { | ||
344 | spin_unlock_bh(&mdev->lock); | ||
345 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | ||
346 | return NULL; | ||
347 | } | ||
348 | mdev->desc_free_cnt -= desc_cnt; | ||
349 | spin_unlock_bh(&mdev->lock); | ||
350 | |||
351 | do { | ||
352 | /* Allocate and populate the descriptor */ | ||
353 | new = msgdma_get_descriptor(mdev); | ||
354 | |||
355 | copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN); | ||
356 | desc = &new->hw_desc; | ||
357 | msgdma_desc_config(desc, dma_dst, dma_src, copy, | ||
358 | MSGDMA_DESC_STRIDE_RW); | ||
359 | len -= copy; | ||
360 | dma_src += copy; | ||
361 | dma_dst += copy; | ||
362 | if (!first) | ||
363 | first = new; | ||
364 | else | ||
365 | list_add_tail(&new->node, &first->tx_list); | ||
366 | } while (len); | ||
367 | |||
368 | msgdma_desc_config_eod(desc); | ||
369 | async_tx_ack(&first->async_tx); | ||
370 | first->async_tx.flags = flags; | ||
371 | |||
372 | return &first->async_tx; | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction | ||
377 | * | ||
378 | * @dchan: DMA channel | ||
379 | * @sgl: Destination scatter list | ||
380 | * @sg_len: Number of entries in destination scatter list | ||
381 | * @dir: DMA transfer direction | ||
382 | * @flags: transfer ack flags | ||
383 | * @context: transfer context (unused) | ||
384 | */ | ||
385 | static struct dma_async_tx_descriptor * | ||
386 | msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | ||
387 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
388 | unsigned long flags, void *context) | ||
389 | |||
390 | { | ||
391 | struct msgdma_device *mdev = to_mdev(dchan); | ||
392 | struct dma_slave_config *cfg = &mdev->slave_cfg; | ||
393 | struct msgdma_sw_desc *new, *first = NULL; | ||
394 | void *desc = NULL; | ||
395 | size_t len, avail; | ||
396 | dma_addr_t dma_dst, dma_src; | ||
397 | u32 desc_cnt = 0, i; | ||
398 | struct scatterlist *sg; | ||
399 | u32 stride; | ||
400 | |||
401 | for_each_sg(sgl, sg, sg_len, i) | ||
402 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); | ||
403 | |||
404 | spin_lock_bh(&mdev->lock); | ||
405 | if (desc_cnt > mdev->desc_free_cnt) { | ||
406 | spin_unlock_bh(&mdev->lock); | ||
407 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | ||
408 | return NULL; | ||
409 | } | ||
410 | mdev->desc_free_cnt -= desc_cnt; | ||
411 | spin_unlock_bh(&mdev->lock); | ||
412 | |||
413 | avail = sg_dma_len(sgl); | ||
414 | |||
415 | /* Run until we are out of scatterlist entries */ | ||
416 | while (true) { | ||
417 | /* Allocate and populate the descriptor */ | ||
418 | new = msgdma_get_descriptor(mdev); | ||
419 | |||
420 | desc = &new->hw_desc; | ||
421 | len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN); | ||
422 | |||
423 | if (dir == DMA_MEM_TO_DEV) { | ||
424 | dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; | ||
425 | dma_dst = cfg->dst_addr; | ||
426 | stride = MSGDMA_DESC_STRIDE_RD; | ||
427 | } else { | ||
428 | dma_src = cfg->src_addr; | ||
429 | dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; | ||
430 | stride = MSGDMA_DESC_STRIDE_WR; | ||
431 | } | ||
432 | msgdma_desc_config(desc, dma_dst, dma_src, len, stride); | ||
433 | avail -= len; | ||
434 | |||
435 | if (!first) | ||
436 | first = new; | ||
437 | else | ||
438 | list_add_tail(&new->node, &first->tx_list); | ||
439 | |||
440 | /* Fetch the next scatterlist entry */ | ||
441 | if (avail == 0) { | ||
442 | if (sg_len == 0) | ||
443 | break; | ||
444 | sgl = sg_next(sgl); | ||
445 | if (sgl == NULL) | ||
446 | break; | ||
447 | sg_len--; | ||
448 | avail = sg_dma_len(sgl); | ||
449 | } | ||
450 | } | ||
451 | |||
452 | msgdma_desc_config_eod(desc); | ||
453 | first->async_tx.flags = flags; | ||
454 | |||
455 | return &first->async_tx; | ||
456 | } | ||
457 | |||
458 | static int msgdma_dma_config(struct dma_chan *dchan, | ||
459 | struct dma_slave_config *config) | ||
460 | { | ||
461 | struct msgdma_device *mdev = to_mdev(dchan); | ||
462 | |||
463 | memcpy(&mdev->slave_cfg, config, sizeof(*config)); | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | static void msgdma_reset(struct msgdma_device *mdev) | ||
469 | { | ||
470 | u32 val; | ||
471 | int ret; | ||
472 | |||
473 | /* Reset mSGDMA */ | ||
474 | iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); | ||
475 | iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); | ||
476 | |||
477 | ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, | ||
478 | (val & MSGDMA_CSR_STAT_RESETTING) == 0, | ||
479 | 1, 10000); | ||
480 | if (ret) | ||
481 | dev_err(mdev->dev, "DMA channel did not reset\n"); | ||
482 | |||
483 | /* Clear all status bits */ | ||
484 | iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); | ||
485 | |||
486 | /* Enable the DMA controller including interrupts */ | ||
487 | iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY | | ||
488 | MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); | ||
489 | |||
490 | mdev->idle = true; | ||
491 | }; | ||
492 | |||
493 | static void msgdma_copy_one(struct msgdma_device *mdev, | ||
494 | struct msgdma_sw_desc *desc) | ||
495 | { | ||
496 | void __iomem *hw_desc = mdev->desc; | ||
497 | |||
498 | /* | ||
499 | * Check if the DESC FIFO it not full. If its full, we need to wait | ||
500 | * for at least one entry to become free again | ||
501 | */ | ||
502 | while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & | ||
503 | MSGDMA_CSR_STAT_DESC_BUF_FULL) | ||
504 | mdelay(1); | ||
505 | |||
506 | /* | ||
507 | * The descriptor needs to get copied into the descriptor FIFO | ||
508 | * of the DMA controller. The descriptor will get flushed to the | ||
509 | * FIFO, once the last word (control word) is written. Since we | ||
510 | * are not 100% sure that memcpy() writes all word in the "correct" | ||
511 | * oder (address from low to high) on all architectures, we make | ||
512 | * sure this control word is written last by single coding it and | ||
513 | * adding some write-barriers here. | ||
514 | */ | ||
515 | memcpy((void __force *)hw_desc, &desc->hw_desc, | ||
516 | sizeof(desc->hw_desc) - sizeof(u32)); | ||
517 | |||
518 | /* Write control word last to flush this descriptor into the FIFO */ | ||
519 | mdev->idle = false; | ||
520 | wmb(); | ||
521 | iowrite32(desc->hw_desc.control, hw_desc + | ||
522 | offsetof(struct msgdma_extended_desc, control)); | ||
523 | wmb(); | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO | ||
528 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
529 | * @desc: Transaction descriptor pointer | ||
530 | */ | ||
531 | static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev, | ||
532 | struct msgdma_sw_desc *desc) | ||
533 | { | ||
534 | struct msgdma_sw_desc *sdesc, *next; | ||
535 | |||
536 | msgdma_copy_one(mdev, desc); | ||
537 | |||
538 | list_for_each_entry_safe(sdesc, next, &desc->tx_list, node) | ||
539 | msgdma_copy_one(mdev, sdesc); | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * msgdma_start_transfer - Initiate the new transfer | ||
544 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
545 | */ | ||
546 | static void msgdma_start_transfer(struct msgdma_device *mdev) | ||
547 | { | ||
548 | struct msgdma_sw_desc *desc; | ||
549 | |||
550 | if (!mdev->idle) | ||
551 | return; | ||
552 | |||
553 | desc = list_first_entry_or_null(&mdev->pending_list, | ||
554 | struct msgdma_sw_desc, node); | ||
555 | if (!desc) | ||
556 | return; | ||
557 | |||
558 | list_splice_tail_init(&mdev->pending_list, &mdev->active_list); | ||
559 | msgdma_copy_desc_to_fifo(mdev, desc); | ||
560 | } | ||
561 | |||
562 | /** | ||
563 | * msgdma_issue_pending - Issue pending transactions | ||
564 | * @chan: DMA channel pointer | ||
565 | */ | ||
566 | static void msgdma_issue_pending(struct dma_chan *chan) | ||
567 | { | ||
568 | struct msgdma_device *mdev = to_mdev(chan); | ||
569 | |||
570 | spin_lock_bh(&mdev->lock); | ||
571 | msgdma_start_transfer(mdev); | ||
572 | spin_unlock_bh(&mdev->lock); | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * msgdma_chan_desc_cleanup - Cleanup the completed descriptors | ||
577 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
578 | */ | ||
579 | static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) | ||
580 | { | ||
581 | struct msgdma_sw_desc *desc, *next; | ||
582 | |||
583 | list_for_each_entry_safe(desc, next, &mdev->done_list, node) { | ||
584 | dma_async_tx_callback callback; | ||
585 | void *callback_param; | ||
586 | |||
587 | list_del(&desc->node); | ||
588 | |||
589 | callback = desc->async_tx.callback; | ||
590 | callback_param = desc->async_tx.callback_param; | ||
591 | if (callback) { | ||
592 | spin_unlock(&mdev->lock); | ||
593 | callback(callback_param); | ||
594 | spin_lock(&mdev->lock); | ||
595 | } | ||
596 | |||
597 | /* Run any dependencies, then free the descriptor */ | ||
598 | msgdma_free_descriptor(mdev, desc); | ||
599 | } | ||
600 | } | ||
601 | |||
602 | /** | ||
603 | * msgdma_complete_descriptor - Mark the active descriptor as complete | ||
604 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
605 | */ | ||
606 | static void msgdma_complete_descriptor(struct msgdma_device *mdev) | ||
607 | { | ||
608 | struct msgdma_sw_desc *desc; | ||
609 | |||
610 | desc = list_first_entry_or_null(&mdev->active_list, | ||
611 | struct msgdma_sw_desc, node); | ||
612 | if (!desc) | ||
613 | return; | ||
614 | list_del(&desc->node); | ||
615 | dma_cookie_complete(&desc->async_tx); | ||
616 | list_add_tail(&desc->node, &mdev->done_list); | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * msgdma_free_descriptors - Free channel descriptors | ||
621 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
622 | */ | ||
623 | static void msgdma_free_descriptors(struct msgdma_device *mdev) | ||
624 | { | ||
625 | msgdma_free_desc_list(mdev, &mdev->active_list); | ||
626 | msgdma_free_desc_list(mdev, &mdev->pending_list); | ||
627 | msgdma_free_desc_list(mdev, &mdev->done_list); | ||
628 | } | ||
629 | |||
630 | /** | ||
631 | * msgdma_free_chan_resources - Free channel resources | ||
632 | * @dchan: DMA channel pointer | ||
633 | */ | ||
634 | static void msgdma_free_chan_resources(struct dma_chan *dchan) | ||
635 | { | ||
636 | struct msgdma_device *mdev = to_mdev(dchan); | ||
637 | |||
638 | spin_lock_bh(&mdev->lock); | ||
639 | msgdma_free_descriptors(mdev); | ||
640 | spin_unlock_bh(&mdev->lock); | ||
641 | kfree(mdev->sw_desq); | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * msgdma_alloc_chan_resources - Allocate channel resources | ||
646 | * @dchan: DMA channel | ||
647 | * | ||
648 | * Return: Number of descriptors on success and failure value on error | ||
649 | */ | ||
650 | static int msgdma_alloc_chan_resources(struct dma_chan *dchan) | ||
651 | { | ||
652 | struct msgdma_device *mdev = to_mdev(dchan); | ||
653 | struct msgdma_sw_desc *desc; | ||
654 | int i; | ||
655 | |||
656 | mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); | ||
657 | if (!mdev->sw_desq) | ||
658 | return -ENOMEM; | ||
659 | |||
660 | mdev->idle = true; | ||
661 | mdev->desc_free_cnt = MSGDMA_DESC_NUM; | ||
662 | |||
663 | INIT_LIST_HEAD(&mdev->free_list); | ||
664 | |||
665 | for (i = 0; i < MSGDMA_DESC_NUM; i++) { | ||
666 | desc = mdev->sw_desq + i; | ||
667 | dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); | ||
668 | desc->async_tx.tx_submit = msgdma_tx_submit; | ||
669 | list_add_tail(&desc->node, &mdev->free_list); | ||
670 | } | ||
671 | |||
672 | return MSGDMA_DESC_NUM; | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * msgdma_tasklet - Schedule completion tasklet | ||
677 | * @data: Pointer to the Altera sSGDMA channel structure | ||
678 | */ | ||
679 | static void msgdma_tasklet(unsigned long data) | ||
680 | { | ||
681 | struct msgdma_device *mdev = (struct msgdma_device *)data; | ||
682 | u32 count; | ||
683 | u32 __maybe_unused size; | ||
684 | u32 __maybe_unused status; | ||
685 | |||
686 | spin_lock(&mdev->lock); | ||
687 | |||
688 | /* Read number of responses that are available */ | ||
689 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); | ||
690 | dev_dbg(mdev->dev, "%s (%d): response count=%d\n", | ||
691 | __func__, __LINE__, count); | ||
692 | |||
693 | while (count--) { | ||
694 | /* | ||
695 | * Read both longwords to purge this response from the FIFO | ||
696 | * On Avalon-MM implementations, size and status do not | ||
697 | * have any real values, like transferred bytes or error | ||
698 | * bits. So we need to just drop these values. | ||
699 | */ | ||
700 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); | ||
701 | status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); | ||
702 | |||
703 | msgdma_complete_descriptor(mdev); | ||
704 | msgdma_chan_desc_cleanup(mdev); | ||
705 | } | ||
706 | |||
707 | spin_unlock(&mdev->lock); | ||
708 | } | ||
709 | |||
710 | /** | ||
711 | * msgdma_irq_handler - Altera mSGDMA Interrupt handler | ||
712 | * @irq: IRQ number | ||
713 | * @data: Pointer to the Altera mSGDMA device structure | ||
714 | * | ||
715 | * Return: IRQ_HANDLED/IRQ_NONE | ||
716 | */ | ||
717 | static irqreturn_t msgdma_irq_handler(int irq, void *data) | ||
718 | { | ||
719 | struct msgdma_device *mdev = data; | ||
720 | u32 status; | ||
721 | |||
722 | status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); | ||
723 | if ((status & MSGDMA_CSR_STAT_BUSY) == 0) { | ||
724 | /* Start next transfer if the DMA controller is idle */ | ||
725 | spin_lock(&mdev->lock); | ||
726 | mdev->idle = true; | ||
727 | msgdma_start_transfer(mdev); | ||
728 | spin_unlock(&mdev->lock); | ||
729 | } | ||
730 | |||
731 | tasklet_schedule(&mdev->irq_tasklet); | ||
732 | |||
733 | /* Clear interrupt in mSGDMA controller */ | ||
734 | iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); | ||
735 | |||
736 | return IRQ_HANDLED; | ||
737 | } | ||
738 | |||
739 | /** | ||
740 | * msgdma_chan_remove - Channel remove function | ||
741 | * @mdev: Pointer to the Altera mSGDMA device structure | ||
742 | */ | ||
743 | static void msgdma_dev_remove(struct msgdma_device *mdev) | ||
744 | { | ||
745 | if (!mdev) | ||
746 | return; | ||
747 | |||
748 | devm_free_irq(mdev->dev, mdev->irq, mdev); | ||
749 | tasklet_kill(&mdev->irq_tasklet); | ||
750 | list_del(&mdev->dmachan.device_node); | ||
751 | } | ||
752 | |||
753 | static int request_and_map(struct platform_device *pdev, const char *name, | ||
754 | struct resource **res, void __iomem **ptr) | ||
755 | { | ||
756 | struct resource *region; | ||
757 | struct device *device = &pdev->dev; | ||
758 | |||
759 | *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | ||
760 | if (*res == NULL) { | ||
761 | dev_err(device, "resource %s not defined\n", name); | ||
762 | return -ENODEV; | ||
763 | } | ||
764 | |||
765 | region = devm_request_mem_region(device, (*res)->start, | ||
766 | resource_size(*res), dev_name(device)); | ||
767 | if (region == NULL) { | ||
768 | dev_err(device, "unable to request %s\n", name); | ||
769 | return -EBUSY; | ||
770 | } | ||
771 | |||
772 | *ptr = devm_ioremap_nocache(device, region->start, | ||
773 | resource_size(region)); | ||
774 | if (*ptr == NULL) { | ||
775 | dev_err(device, "ioremap_nocache of %s failed!", name); | ||
776 | return -ENOMEM; | ||
777 | } | ||
778 | |||
779 | return 0; | ||
780 | } | ||
781 | |||
782 | /** | ||
783 | * msgdma_probe - Driver probe function | ||
784 | * @pdev: Pointer to the platform_device structure | ||
785 | * | ||
786 | * Return: '0' on success and failure value on error | ||
787 | */ | ||
788 | static int msgdma_probe(struct platform_device *pdev) | ||
789 | { | ||
790 | struct msgdma_device *mdev; | ||
791 | struct dma_device *dma_dev; | ||
792 | struct resource *dma_res; | ||
793 | int ret; | ||
794 | |||
795 | mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); | ||
796 | if (!mdev) | ||
797 | return -ENOMEM; | ||
798 | |||
799 | mdev->dev = &pdev->dev; | ||
800 | |||
801 | /* Map CSR space */ | ||
802 | ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr); | ||
803 | if (ret) | ||
804 | return ret; | ||
805 | |||
806 | /* Map (extended) descriptor space */ | ||
807 | ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc); | ||
808 | if (ret) | ||
809 | return ret; | ||
810 | |||
811 | /* Map response space */ | ||
812 | ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp); | ||
813 | if (ret) | ||
814 | return ret; | ||
815 | |||
816 | platform_set_drvdata(pdev, mdev); | ||
817 | |||
818 | /* Get interrupt nr from platform data */ | ||
819 | mdev->irq = platform_get_irq(pdev, 0); | ||
820 | if (mdev->irq < 0) | ||
821 | return -ENXIO; | ||
822 | |||
823 | ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, | ||
824 | 0, dev_name(&pdev->dev), mdev); | ||
825 | if (ret) | ||
826 | return ret; | ||
827 | |||
828 | tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev); | ||
829 | |||
830 | dma_cookie_init(&mdev->dmachan); | ||
831 | |||
832 | spin_lock_init(&mdev->lock); | ||
833 | |||
834 | INIT_LIST_HEAD(&mdev->active_list); | ||
835 | INIT_LIST_HEAD(&mdev->pending_list); | ||
836 | INIT_LIST_HEAD(&mdev->done_list); | ||
837 | INIT_LIST_HEAD(&mdev->free_list); | ||
838 | |||
839 | dma_dev = &mdev->dmadev; | ||
840 | |||
841 | /* Set DMA capabilities */ | ||
842 | dma_cap_zero(dma_dev->cap_mask); | ||
843 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
844 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
845 | |||
846 | dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
847 | dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
848 | dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) | | ||
849 | BIT(DMA_MEM_TO_MEM); | ||
850 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
851 | |||
852 | /* Init DMA link list */ | ||
853 | INIT_LIST_HEAD(&dma_dev->channels); | ||
854 | |||
855 | /* Set base routines */ | ||
856 | dma_dev->device_tx_status = dma_cookie_status; | ||
857 | dma_dev->device_issue_pending = msgdma_issue_pending; | ||
858 | dma_dev->dev = &pdev->dev; | ||
859 | |||
860 | dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES; | ||
861 | dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy; | ||
862 | dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg; | ||
863 | dma_dev->device_config = msgdma_dma_config; | ||
864 | |||
865 | dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources; | ||
866 | dma_dev->device_free_chan_resources = msgdma_free_chan_resources; | ||
867 | |||
868 | mdev->dmachan.device = dma_dev; | ||
869 | list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); | ||
870 | |||
871 | /* Set DMA mask to 64 bits */ | ||
872 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | ||
873 | if (ret) { | ||
874 | dev_warn(&pdev->dev, "unable to set coherent mask to 64"); | ||
875 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | ||
876 | if (ret) | ||
877 | goto fail; | ||
878 | } | ||
879 | |||
880 | msgdma_reset(mdev); | ||
881 | |||
882 | ret = dma_async_device_register(dma_dev); | ||
883 | if (ret) | ||
884 | goto fail; | ||
885 | |||
886 | dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n"); | ||
887 | |||
888 | return 0; | ||
889 | |||
890 | fail: | ||
891 | msgdma_dev_remove(mdev); | ||
892 | |||
893 | return ret; | ||
894 | } | ||
895 | |||
896 | /** | ||
897 | * msgdma_dma_remove - Driver remove function | ||
898 | * @pdev: Pointer to the platform_device structure | ||
899 | * | ||
900 | * Return: Always '0' | ||
901 | */ | ||
902 | static int msgdma_remove(struct platform_device *pdev) | ||
903 | { | ||
904 | struct msgdma_device *mdev = platform_get_drvdata(pdev); | ||
905 | |||
906 | dma_async_device_unregister(&mdev->dmadev); | ||
907 | msgdma_dev_remove(mdev); | ||
908 | |||
909 | dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n"); | ||
910 | |||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | static struct platform_driver msgdma_driver = { | ||
915 | .driver = { | ||
916 | .name = "altera-msgdma", | ||
917 | }, | ||
918 | .probe = msgdma_probe, | ||
919 | .remove = msgdma_remove, | ||
920 | }; | ||
921 | |||
922 | module_platform_driver(msgdma_driver); | ||
923 | |||
924 | MODULE_ALIAS("platform:altera-msgdma"); | ||
925 | MODULE_DESCRIPTION("Altera mSGDMA driver"); | ||
926 | MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); | ||
927 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 13cc95c0474c..b52b0d55247e 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -3033,7 +3033,7 @@ static struct vendor_data vendor_ftdmac020 = { | |||
3033 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 3033 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
3034 | }; | 3034 | }; |
3035 | 3035 | ||
3036 | static struct amba_id pl08x_ids[] = { | 3036 | static const struct amba_id pl08x_ids[] = { |
3037 | /* Samsung PL080S variant */ | 3037 | /* Samsung PL080S variant */ |
3038 | { | 3038 | { |
3039 | .id = 0x0a141080, | 3039 | .id = 0x0a141080, |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1baf3404a365..fbab271b3bf9 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -1203,138 +1203,6 @@ err: | |||
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | /** | 1205 | /** |
1206 | * atc_prep_dma_sg - prepare memory to memory scather-gather operation | ||
1207 | * @chan: the channel to prepare operation on | ||
1208 | * @dst_sg: destination scatterlist | ||
1209 | * @dst_nents: number of destination scatterlist entries | ||
1210 | * @src_sg: source scatterlist | ||
1211 | * @src_nents: number of source scatterlist entries | ||
1212 | * @flags: tx descriptor status flags | ||
1213 | */ | ||
1214 | static struct dma_async_tx_descriptor * | ||
1215 | atc_prep_dma_sg(struct dma_chan *chan, | ||
1216 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
1217 | struct scatterlist *src_sg, unsigned int src_nents, | ||
1218 | unsigned long flags) | ||
1219 | { | ||
1220 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
1221 | struct at_desc *desc = NULL; | ||
1222 | struct at_desc *first = NULL; | ||
1223 | struct at_desc *prev = NULL; | ||
1224 | unsigned int src_width; | ||
1225 | unsigned int dst_width; | ||
1226 | size_t xfer_count; | ||
1227 | u32 ctrla; | ||
1228 | u32 ctrlb; | ||
1229 | size_t dst_len = 0, src_len = 0; | ||
1230 | dma_addr_t dst = 0, src = 0; | ||
1231 | size_t len = 0, total_len = 0; | ||
1232 | |||
1233 | if (unlikely(dst_nents == 0 || src_nents == 0)) | ||
1234 | return NULL; | ||
1235 | |||
1236 | if (unlikely(dst_sg == NULL || src_sg == NULL)) | ||
1237 | return NULL; | ||
1238 | |||
1239 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | ||
1240 | | ATC_SRC_ADDR_MODE_INCR | ||
1241 | | ATC_DST_ADDR_MODE_INCR | ||
1242 | | ATC_FC_MEM2MEM; | ||
1243 | |||
1244 | /* | ||
1245 | * loop until there is either no more source or no more destination | ||
1246 | * scatterlist entry | ||
1247 | */ | ||
1248 | while (true) { | ||
1249 | |||
1250 | /* prepare the next transfer */ | ||
1251 | if (dst_len == 0) { | ||
1252 | |||
1253 | /* no more destination scatterlist entries */ | ||
1254 | if (!dst_sg || !dst_nents) | ||
1255 | break; | ||
1256 | |||
1257 | dst = sg_dma_address(dst_sg); | ||
1258 | dst_len = sg_dma_len(dst_sg); | ||
1259 | |||
1260 | dst_sg = sg_next(dst_sg); | ||
1261 | dst_nents--; | ||
1262 | } | ||
1263 | |||
1264 | if (src_len == 0) { | ||
1265 | |||
1266 | /* no more source scatterlist entries */ | ||
1267 | if (!src_sg || !src_nents) | ||
1268 | break; | ||
1269 | |||
1270 | src = sg_dma_address(src_sg); | ||
1271 | src_len = sg_dma_len(src_sg); | ||
1272 | |||
1273 | src_sg = sg_next(src_sg); | ||
1274 | src_nents--; | ||
1275 | } | ||
1276 | |||
1277 | len = min_t(size_t, src_len, dst_len); | ||
1278 | if (len == 0) | ||
1279 | continue; | ||
1280 | |||
1281 | /* take care for the alignment */ | ||
1282 | src_width = dst_width = atc_get_xfer_width(src, dst, len); | ||
1283 | |||
1284 | ctrla = ATC_SRC_WIDTH(src_width) | | ||
1285 | ATC_DST_WIDTH(dst_width); | ||
1286 | |||
1287 | /* | ||
1288 | * The number of transfers to set up refer to the source width | ||
1289 | * that depends on the alignment. | ||
1290 | */ | ||
1291 | xfer_count = len >> src_width; | ||
1292 | if (xfer_count > ATC_BTSIZE_MAX) { | ||
1293 | xfer_count = ATC_BTSIZE_MAX; | ||
1294 | len = ATC_BTSIZE_MAX << src_width; | ||
1295 | } | ||
1296 | |||
1297 | /* create the transfer */ | ||
1298 | desc = atc_desc_get(atchan); | ||
1299 | if (!desc) | ||
1300 | goto err_desc_get; | ||
1301 | |||
1302 | desc->lli.saddr = src; | ||
1303 | desc->lli.daddr = dst; | ||
1304 | desc->lli.ctrla = ctrla | xfer_count; | ||
1305 | desc->lli.ctrlb = ctrlb; | ||
1306 | |||
1307 | desc->txd.cookie = 0; | ||
1308 | desc->len = len; | ||
1309 | |||
1310 | atc_desc_chain(&first, &prev, desc); | ||
1311 | |||
1312 | /* update the lengths and addresses for the next loop cycle */ | ||
1313 | dst_len -= len; | ||
1314 | src_len -= len; | ||
1315 | dst += len; | ||
1316 | src += len; | ||
1317 | |||
1318 | total_len += len; | ||
1319 | } | ||
1320 | |||
1321 | /* First descriptor of the chain embedds additional information */ | ||
1322 | first->txd.cookie = -EBUSY; | ||
1323 | first->total_len = total_len; | ||
1324 | |||
1325 | /* set end-of-link to the last link descriptor of list*/ | ||
1326 | set_desc_eol(desc); | ||
1327 | |||
1328 | first->txd.flags = flags; /* client is in control of this ack */ | ||
1329 | |||
1330 | return &first->txd; | ||
1331 | |||
1332 | err_desc_get: | ||
1333 | atc_desc_put(atchan, first); | ||
1334 | return NULL; | ||
1335 | } | ||
1336 | |||
1337 | /** | ||
1338 | * atc_dma_cyclic_check_values | 1206 | * atc_dma_cyclic_check_values |
1339 | * Check for too big/unaligned periods and unaligned DMA buffer | 1207 | * Check for too big/unaligned periods and unaligned DMA buffer |
1340 | */ | 1208 | */ |
@@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1933 | 1801 | ||
1934 | /* setup platform data for each SoC */ | 1802 | /* setup platform data for each SoC */ |
1935 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | 1803 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
1936 | dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); | ||
1937 | dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); | 1804 | dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); |
1938 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); | 1805 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1939 | dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); | 1806 | dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); |
1940 | dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); | 1807 | dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); |
1941 | dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); | 1808 | dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); |
1942 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | 1809 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
1943 | dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); | ||
1944 | 1810 | ||
1945 | /* get DMA parameters from controller type */ | 1811 | /* get DMA parameters from controller type */ |
1946 | plat_dat = at_dma_get_driver_data(pdev); | 1812 | plat_dat = at_dma_get_driver_data(pdev); |
@@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
2078 | atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1944 | atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
2079 | } | 1945 | } |
2080 | 1946 | ||
2081 | if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)) | ||
2082 | atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg; | ||
2083 | |||
2084 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1947 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
2085 | 1948 | ||
2086 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n", | 1949 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", |
2087 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 1950 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
2088 | dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", | 1951 | dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", |
2089 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | 1952 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
2090 | dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "", | ||
2091 | plat_dat->nr_channels); | 1953 | plat_dat->nr_channels); |
2092 | 1954 | ||
2093 | dma_async_device_register(&atdma->dma_common); | 1955 | dma_async_device_register(&atdma->dma_common); |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7d4e0bcda9af..c00e3923d7d8 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -875,7 +875,7 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
875 | dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); | 875 | dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); |
876 | if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { | 876 | if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { |
877 | dev_dbg(chan2dev(chan), | 877 | dev_dbg(chan2dev(chan), |
878 | "%s: chunk too big (%d, max size %lu)...\n", | 878 | "%s: chunk too big (%zu, max size %lu)...\n", |
879 | __func__, chunk->size, | 879 | __func__, chunk->size, |
880 | AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth); | 880 | AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth); |
881 | return NULL; | 881 | return NULL; |
@@ -956,7 +956,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
956 | if ((xt->numf > 1) && (xt->frame_size > 1)) | 956 | if ((xt->numf > 1) && (xt->frame_size > 1)) |
957 | return NULL; | 957 | return NULL; |
958 | 958 | ||
959 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", | 959 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n", |
960 | __func__, &xt->src_start, &xt->dst_start, xt->numf, | 960 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
961 | xt->frame_size, flags); | 961 | xt->frame_size, flags); |
962 | 962 | ||
@@ -990,7 +990,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
990 | dst_skip = chunk->size + dst_icg; | 990 | dst_skip = chunk->size + dst_icg; |
991 | 991 | ||
992 | dev_dbg(chan2dev(chan), | 992 | dev_dbg(chan2dev(chan), |
993 | "%s: chunk size=%d, src icg=%d, dst icg=%d\n", | 993 | "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n", |
994 | __func__, chunk->size, src_icg, dst_icg); | 994 | __func__, chunk->size, src_icg, dst_icg); |
995 | 995 | ||
996 | desc = at_xdmac_interleaved_queue_desc(chan, atchan, | 996 | desc = at_xdmac_interleaved_queue_desc(chan, atchan, |
@@ -1207,7 +1207,7 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
1207 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1207 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1208 | struct at_xdmac_desc *desc; | 1208 | struct at_xdmac_desc *desc; |
1209 | 1209 | ||
1210 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", | 1210 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n", |
1211 | __func__, &dest, len, value, flags); | 1211 | __func__, &dest, len, value, flags); |
1212 | 1212 | ||
1213 | if (unlikely(!len)) | 1213 | if (unlikely(!len)) |
@@ -1883,8 +1883,11 @@ static int atmel_xdmac_resume(struct device *dev) | |||
1883 | struct at_xdmac_chan *atchan; | 1883 | struct at_xdmac_chan *atchan; |
1884 | struct dma_chan *chan, *_chan; | 1884 | struct dma_chan *chan, *_chan; |
1885 | int i; | 1885 | int i; |
1886 | int ret; | ||
1886 | 1887 | ||
1887 | clk_prepare_enable(atxdmac->clk); | 1888 | ret = clk_prepare_enable(atxdmac->clk); |
1889 | if (ret) | ||
1890 | return ret; | ||
1888 | 1891 | ||
1889 | /* Clear pending interrupts. */ | 1892 | /* Clear pending interrupts. */ |
1890 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | 1893 | for (i = 0; i < atxdmac->dma.chancnt; i++) { |
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index e41bbc7cb094..6c2c44724637 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -36,6 +36,7 @@ | |||
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
39 | #include <linux/debugfs.h> | ||
39 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
40 | #include <linux/dmaengine.h> | 41 | #include <linux/dmaengine.h> |
41 | #include <linux/list.h> | 42 | #include <linux/list.h> |
@@ -48,7 +49,8 @@ | |||
48 | 49 | ||
49 | #include "dmaengine.h" | 50 | #include "dmaengine.h" |
50 | 51 | ||
51 | /* SBA command related defines */ | 52 | /* ====== Driver macros and defines ===== */ |
53 | |||
52 | #define SBA_TYPE_SHIFT 48 | 54 | #define SBA_TYPE_SHIFT 48 |
53 | #define SBA_TYPE_MASK GENMASK(1, 0) | 55 | #define SBA_TYPE_MASK GENMASK(1, 0) |
54 | #define SBA_TYPE_A 0x0 | 56 | #define SBA_TYPE_A 0x0 |
@@ -82,39 +84,40 @@ | |||
82 | #define SBA_CMD_WRITE_BUFFER 0xc | 84 | #define SBA_CMD_WRITE_BUFFER 0xc |
83 | #define SBA_CMD_GALOIS 0xe | 85 | #define SBA_CMD_GALOIS 0xe |
84 | 86 | ||
87 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 | ||
88 | |||
85 | /* Driver helper macros */ | 89 | /* Driver helper macros */ |
86 | #define to_sba_request(tx) \ | 90 | #define to_sba_request(tx) \ |
87 | container_of(tx, struct sba_request, tx) | 91 | container_of(tx, struct sba_request, tx) |
88 | #define to_sba_device(dchan) \ | 92 | #define to_sba_device(dchan) \ |
89 | container_of(dchan, struct sba_device, dma_chan) | 93 | container_of(dchan, struct sba_device, dma_chan) |
90 | 94 | ||
91 | enum sba_request_state { | 95 | /* ===== Driver data structures ===== */ |
92 | SBA_REQUEST_STATE_FREE = 1, | 96 | |
93 | SBA_REQUEST_STATE_ALLOCED = 2, | 97 | enum sba_request_flags { |
94 | SBA_REQUEST_STATE_PENDING = 3, | 98 | SBA_REQUEST_STATE_FREE = 0x001, |
95 | SBA_REQUEST_STATE_ACTIVE = 4, | 99 | SBA_REQUEST_STATE_ALLOCED = 0x002, |
96 | SBA_REQUEST_STATE_RECEIVED = 5, | 100 | SBA_REQUEST_STATE_PENDING = 0x004, |
97 | SBA_REQUEST_STATE_COMPLETED = 6, | 101 | SBA_REQUEST_STATE_ACTIVE = 0x008, |
98 | SBA_REQUEST_STATE_ABORTED = 7, | 102 | SBA_REQUEST_STATE_ABORTED = 0x010, |
103 | SBA_REQUEST_STATE_MASK = 0x0ff, | ||
104 | SBA_REQUEST_FENCE = 0x100, | ||
99 | }; | 105 | }; |
100 | 106 | ||
101 | struct sba_request { | 107 | struct sba_request { |
102 | /* Global state */ | 108 | /* Global state */ |
103 | struct list_head node; | 109 | struct list_head node; |
104 | struct sba_device *sba; | 110 | struct sba_device *sba; |
105 | enum sba_request_state state; | 111 | u32 flags; |
106 | bool fence; | ||
107 | /* Chained requests management */ | 112 | /* Chained requests management */ |
108 | struct sba_request *first; | 113 | struct sba_request *first; |
109 | struct list_head next; | 114 | struct list_head next; |
110 | unsigned int next_count; | ||
111 | atomic_t next_pending_count; | 115 | atomic_t next_pending_count; |
112 | /* BRCM message data */ | 116 | /* BRCM message data */ |
113 | void *resp; | ||
114 | dma_addr_t resp_dma; | ||
115 | struct brcm_sba_command *cmds; | ||
116 | struct brcm_message msg; | 117 | struct brcm_message msg; |
117 | struct dma_async_tx_descriptor tx; | 118 | struct dma_async_tx_descriptor tx; |
119 | /* SBA commands */ | ||
120 | struct brcm_sba_command cmds[0]; | ||
118 | }; | 121 | }; |
119 | 122 | ||
120 | enum sba_version { | 123 | enum sba_version { |
@@ -152,19 +155,18 @@ struct sba_device { | |||
152 | void *cmds_base; | 155 | void *cmds_base; |
153 | dma_addr_t cmds_dma_base; | 156 | dma_addr_t cmds_dma_base; |
154 | spinlock_t reqs_lock; | 157 | spinlock_t reqs_lock; |
155 | struct sba_request *reqs; | ||
156 | bool reqs_fence; | 158 | bool reqs_fence; |
157 | struct list_head reqs_alloc_list; | 159 | struct list_head reqs_alloc_list; |
158 | struct list_head reqs_pending_list; | 160 | struct list_head reqs_pending_list; |
159 | struct list_head reqs_active_list; | 161 | struct list_head reqs_active_list; |
160 | struct list_head reqs_received_list; | ||
161 | struct list_head reqs_completed_list; | ||
162 | struct list_head reqs_aborted_list; | 162 | struct list_head reqs_aborted_list; |
163 | struct list_head reqs_free_list; | 163 | struct list_head reqs_free_list; |
164 | int reqs_free_count; | 164 | /* DebugFS directory entries */ |
165 | struct dentry *root; | ||
166 | struct dentry *stats; | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | /* ====== SBA command helper routines ===== */ | 169 | /* ====== Command helper routines ===== */ |
168 | 170 | ||
169 | static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) | 171 | static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) |
170 | { | 172 | { |
@@ -196,32 +198,50 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) | |||
196 | ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); | 198 | ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); |
197 | } | 199 | } |
198 | 200 | ||
199 | /* ====== Channel resource management routines ===== */ | 201 | /* ====== General helper routines ===== */ |
202 | |||
203 | static void sba_peek_mchans(struct sba_device *sba) | ||
204 | { | ||
205 | int mchan_idx; | ||
206 | |||
207 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) | ||
208 | mbox_client_peek_data(sba->mchans[mchan_idx]); | ||
209 | } | ||
200 | 210 | ||
201 | static struct sba_request *sba_alloc_request(struct sba_device *sba) | 211 | static struct sba_request *sba_alloc_request(struct sba_device *sba) |
202 | { | 212 | { |
213 | bool found = false; | ||
203 | unsigned long flags; | 214 | unsigned long flags; |
204 | struct sba_request *req = NULL; | 215 | struct sba_request *req = NULL; |
205 | 216 | ||
206 | spin_lock_irqsave(&sba->reqs_lock, flags); | 217 | spin_lock_irqsave(&sba->reqs_lock, flags); |
218 | list_for_each_entry(req, &sba->reqs_free_list, node) { | ||
219 | if (async_tx_test_ack(&req->tx)) { | ||
220 | list_move_tail(&req->node, &sba->reqs_alloc_list); | ||
221 | found = true; | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
207 | 226 | ||
208 | req = list_first_entry_or_null(&sba->reqs_free_list, | 227 | if (!found) { |
209 | struct sba_request, node); | 228 | /* |
210 | if (req) { | 229 | * We have no more free requests so, we peek |
211 | list_move_tail(&req->node, &sba->reqs_alloc_list); | 230 | * mailbox channels hoping few active requests |
212 | req->state = SBA_REQUEST_STATE_ALLOCED; | 231 | * would have completed which will create more |
213 | req->fence = false; | 232 | * room for new requests. |
214 | req->first = req; | 233 | */ |
215 | INIT_LIST_HEAD(&req->next); | 234 | sba_peek_mchans(sba); |
216 | req->next_count = 1; | 235 | return NULL; |
217 | atomic_set(&req->next_pending_count, 1); | ||
218 | |||
219 | sba->reqs_free_count--; | ||
220 | |||
221 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | ||
222 | } | 236 | } |
223 | 237 | ||
224 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 238 | req->flags = SBA_REQUEST_STATE_ALLOCED; |
239 | req->first = req; | ||
240 | INIT_LIST_HEAD(&req->next); | ||
241 | atomic_set(&req->next_pending_count, 1); | ||
242 | |||
243 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | ||
244 | async_tx_ack(&req->tx); | ||
225 | 245 | ||
226 | return req; | 246 | return req; |
227 | } | 247 | } |
@@ -231,7 +251,8 @@ static void _sba_pending_request(struct sba_device *sba, | |||
231 | struct sba_request *req) | 251 | struct sba_request *req) |
232 | { | 252 | { |
233 | lockdep_assert_held(&sba->reqs_lock); | 253 | lockdep_assert_held(&sba->reqs_lock); |
234 | req->state = SBA_REQUEST_STATE_PENDING; | 254 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
255 | req->flags |= SBA_REQUEST_STATE_PENDING; | ||
235 | list_move_tail(&req->node, &sba->reqs_pending_list); | 256 | list_move_tail(&req->node, &sba->reqs_pending_list); |
236 | if (list_empty(&sba->reqs_active_list)) | 257 | if (list_empty(&sba->reqs_active_list)) |
237 | sba->reqs_fence = false; | 258 | sba->reqs_fence = false; |
@@ -246,9 +267,10 @@ static bool _sba_active_request(struct sba_device *sba, | |||
246 | sba->reqs_fence = false; | 267 | sba->reqs_fence = false; |
247 | if (sba->reqs_fence) | 268 | if (sba->reqs_fence) |
248 | return false; | 269 | return false; |
249 | req->state = SBA_REQUEST_STATE_ACTIVE; | 270 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
271 | req->flags |= SBA_REQUEST_STATE_ACTIVE; | ||
250 | list_move_tail(&req->node, &sba->reqs_active_list); | 272 | list_move_tail(&req->node, &sba->reqs_active_list); |
251 | if (req->fence) | 273 | if (req->flags & SBA_REQUEST_FENCE) |
252 | sba->reqs_fence = true; | 274 | sba->reqs_fence = true; |
253 | return true; | 275 | return true; |
254 | } | 276 | } |
@@ -258,7 +280,8 @@ static void _sba_abort_request(struct sba_device *sba, | |||
258 | struct sba_request *req) | 280 | struct sba_request *req) |
259 | { | 281 | { |
260 | lockdep_assert_held(&sba->reqs_lock); | 282 | lockdep_assert_held(&sba->reqs_lock); |
261 | req->state = SBA_REQUEST_STATE_ABORTED; | 283 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
284 | req->flags |= SBA_REQUEST_STATE_ABORTED; | ||
262 | list_move_tail(&req->node, &sba->reqs_aborted_list); | 285 | list_move_tail(&req->node, &sba->reqs_aborted_list); |
263 | if (list_empty(&sba->reqs_active_list)) | 286 | if (list_empty(&sba->reqs_active_list)) |
264 | sba->reqs_fence = false; | 287 | sba->reqs_fence = false; |
@@ -269,42 +292,11 @@ static void _sba_free_request(struct sba_device *sba, | |||
269 | struct sba_request *req) | 292 | struct sba_request *req) |
270 | { | 293 | { |
271 | lockdep_assert_held(&sba->reqs_lock); | 294 | lockdep_assert_held(&sba->reqs_lock); |
272 | req->state = SBA_REQUEST_STATE_FREE; | 295 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
296 | req->flags |= SBA_REQUEST_STATE_FREE; | ||
273 | list_move_tail(&req->node, &sba->reqs_free_list); | 297 | list_move_tail(&req->node, &sba->reqs_free_list); |
274 | if (list_empty(&sba->reqs_active_list)) | 298 | if (list_empty(&sba->reqs_active_list)) |
275 | sba->reqs_fence = false; | 299 | sba->reqs_fence = false; |
276 | sba->reqs_free_count++; | ||
277 | } | ||
278 | |||
279 | static void sba_received_request(struct sba_request *req) | ||
280 | { | ||
281 | unsigned long flags; | ||
282 | struct sba_device *sba = req->sba; | ||
283 | |||
284 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
285 | req->state = SBA_REQUEST_STATE_RECEIVED; | ||
286 | list_move_tail(&req->node, &sba->reqs_received_list); | ||
287 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
288 | } | ||
289 | |||
290 | static void sba_complete_chained_requests(struct sba_request *req) | ||
291 | { | ||
292 | unsigned long flags; | ||
293 | struct sba_request *nreq; | ||
294 | struct sba_device *sba = req->sba; | ||
295 | |||
296 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
297 | |||
298 | req->state = SBA_REQUEST_STATE_COMPLETED; | ||
299 | list_move_tail(&req->node, &sba->reqs_completed_list); | ||
300 | list_for_each_entry(nreq, &req->next, next) { | ||
301 | nreq->state = SBA_REQUEST_STATE_COMPLETED; | ||
302 | list_move_tail(&nreq->node, &sba->reqs_completed_list); | ||
303 | } | ||
304 | if (list_empty(&sba->reqs_active_list)) | ||
305 | sba->reqs_fence = false; | ||
306 | |||
307 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
308 | } | 300 | } |
309 | 301 | ||
310 | static void sba_free_chained_requests(struct sba_request *req) | 302 | static void sba_free_chained_requests(struct sba_request *req) |
@@ -332,8 +324,7 @@ static void sba_chain_request(struct sba_request *first, | |||
332 | 324 | ||
333 | list_add_tail(&req->next, &first->next); | 325 | list_add_tail(&req->next, &first->next); |
334 | req->first = first; | 326 | req->first = first; |
335 | first->next_count++; | 327 | atomic_inc(&first->next_pending_count); |
336 | atomic_set(&first->next_pending_count, first->next_count); | ||
337 | 328 | ||
338 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 329 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
339 | } | 330 | } |
@@ -349,14 +340,6 @@ static void sba_cleanup_nonpending_requests(struct sba_device *sba) | |||
349 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) | 340 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) |
350 | _sba_free_request(sba, req); | 341 | _sba_free_request(sba, req); |
351 | 342 | ||
352 | /* Freeup all received request */ | ||
353 | list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node) | ||
354 | _sba_free_request(sba, req); | ||
355 | |||
356 | /* Freeup all completed request */ | ||
357 | list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) | ||
358 | _sba_free_request(sba, req); | ||
359 | |||
360 | /* Set all active requests as aborted */ | 343 | /* Set all active requests as aborted */ |
361 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) | 344 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) |
362 | _sba_abort_request(sba, req); | 345 | _sba_abort_request(sba, req); |
@@ -383,26 +366,6 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) | |||
383 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 366 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
384 | } | 367 | } |
385 | 368 | ||
386 | /* ====== DMAENGINE callbacks ===== */ | ||
387 | |||
388 | static void sba_free_chan_resources(struct dma_chan *dchan) | ||
389 | { | ||
390 | /* | ||
391 | * Channel resources are pre-alloced so we just free-up | ||
392 | * whatever we can so that we can re-use pre-alloced | ||
393 | * channel resources next time. | ||
394 | */ | ||
395 | sba_cleanup_nonpending_requests(to_sba_device(dchan)); | ||
396 | } | ||
397 | |||
398 | static int sba_device_terminate_all(struct dma_chan *dchan) | ||
399 | { | ||
400 | /* Cleanup all pending requests */ | ||
401 | sba_cleanup_pending_requests(to_sba_device(dchan)); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static int sba_send_mbox_request(struct sba_device *sba, | 369 | static int sba_send_mbox_request(struct sba_device *sba, |
407 | struct sba_request *req) | 370 | struct sba_request *req) |
408 | { | 371 | { |
@@ -419,42 +382,156 @@ static int sba_send_mbox_request(struct sba_device *sba, | |||
419 | dev_err(sba->dev, "send message failed with error %d", ret); | 382 | dev_err(sba->dev, "send message failed with error %d", ret); |
420 | return ret; | 383 | return ret; |
421 | } | 384 | } |
385 | |||
386 | /* Check error returned by mailbox controller */ | ||
422 | ret = req->msg.error; | 387 | ret = req->msg.error; |
423 | if (ret < 0) { | 388 | if (ret < 0) { |
424 | dev_err(sba->dev, "message error %d", ret); | 389 | dev_err(sba->dev, "message error %d", ret); |
425 | return ret; | ||
426 | } | 390 | } |
427 | 391 | ||
428 | return 0; | 392 | /* Signal txdone for mailbox channel */ |
393 | mbox_client_txdone(sba->mchans[mchans_idx], ret); | ||
394 | |||
395 | return ret; | ||
429 | } | 396 | } |
430 | 397 | ||
431 | static void sba_issue_pending(struct dma_chan *dchan) | 398 | /* Note: Must be called with sba->reqs_lock held */ |
399 | static void _sba_process_pending_requests(struct sba_device *sba) | ||
432 | { | 400 | { |
433 | int ret; | 401 | int ret; |
434 | unsigned long flags; | 402 | u32 count; |
435 | struct sba_request *req, *req1; | 403 | struct sba_request *req; |
436 | struct sba_device *sba = to_sba_device(dchan); | ||
437 | 404 | ||
438 | spin_lock_irqsave(&sba->reqs_lock, flags); | 405 | /* |
406 | * Process few pending requests | ||
407 | * | ||
408 | * For now, we process (<number_of_mailbox_channels> * 8) | ||
409 | * number of requests at a time. | ||
410 | */ | ||
411 | count = sba->mchans_count * 8; | ||
412 | while (!list_empty(&sba->reqs_pending_list) && count) { | ||
413 | /* Get the first pending request */ | ||
414 | req = list_first_entry(&sba->reqs_pending_list, | ||
415 | struct sba_request, node); | ||
439 | 416 | ||
440 | /* Process all pending request */ | ||
441 | list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) { | ||
442 | /* Try to make request active */ | 417 | /* Try to make request active */ |
443 | if (!_sba_active_request(sba, req)) | 418 | if (!_sba_active_request(sba, req)) |
444 | break; | 419 | break; |
445 | 420 | ||
446 | /* Send request to mailbox channel */ | 421 | /* Send request to mailbox channel */ |
447 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
448 | ret = sba_send_mbox_request(sba, req); | 422 | ret = sba_send_mbox_request(sba, req); |
449 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
450 | |||
451 | /* If something went wrong then keep request pending */ | ||
452 | if (ret < 0) { | 423 | if (ret < 0) { |
453 | _sba_pending_request(sba, req); | 424 | _sba_pending_request(sba, req); |
454 | break; | 425 | break; |
455 | } | 426 | } |
427 | |||
428 | count--; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void sba_process_received_request(struct sba_device *sba, | ||
433 | struct sba_request *req) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | struct dma_async_tx_descriptor *tx; | ||
437 | struct sba_request *nreq, *first = req->first; | ||
438 | |||
439 | /* Process only after all chained requests are received */ | ||
440 | if (!atomic_dec_return(&first->next_pending_count)) { | ||
441 | tx = &first->tx; | ||
442 | |||
443 | WARN_ON(tx->cookie < 0); | ||
444 | if (tx->cookie > 0) { | ||
445 | dma_cookie_complete(tx); | ||
446 | dmaengine_desc_get_callback_invoke(tx, NULL); | ||
447 | dma_descriptor_unmap(tx); | ||
448 | tx->callback = NULL; | ||
449 | tx->callback_result = NULL; | ||
450 | } | ||
451 | |||
452 | dma_run_dependencies(tx); | ||
453 | |||
454 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
455 | |||
456 | /* Free all requests chained to first request */ | ||
457 | list_for_each_entry(nreq, &first->next, next) | ||
458 | _sba_free_request(sba, nreq); | ||
459 | INIT_LIST_HEAD(&first->next); | ||
460 | |||
461 | /* Free the first request */ | ||
462 | _sba_free_request(sba, first); | ||
463 | |||
464 | /* Process pending requests */ | ||
465 | _sba_process_pending_requests(sba); | ||
466 | |||
467 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
456 | } | 468 | } |
469 | } | ||
470 | |||
471 | static void sba_write_stats_in_seqfile(struct sba_device *sba, | ||
472 | struct seq_file *file) | ||
473 | { | ||
474 | unsigned long flags; | ||
475 | struct sba_request *req; | ||
476 | u32 free_count = 0, alloced_count = 0; | ||
477 | u32 pending_count = 0, active_count = 0, aborted_count = 0; | ||
478 | |||
479 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
480 | |||
481 | list_for_each_entry(req, &sba->reqs_free_list, node) | ||
482 | if (async_tx_test_ack(&req->tx)) | ||
483 | free_count++; | ||
484 | |||
485 | list_for_each_entry(req, &sba->reqs_alloc_list, node) | ||
486 | alloced_count++; | ||
487 | |||
488 | list_for_each_entry(req, &sba->reqs_pending_list, node) | ||
489 | pending_count++; | ||
490 | |||
491 | list_for_each_entry(req, &sba->reqs_active_list, node) | ||
492 | active_count++; | ||
457 | 493 | ||
494 | list_for_each_entry(req, &sba->reqs_aborted_list, node) | ||
495 | aborted_count++; | ||
496 | |||
497 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
498 | |||
499 | seq_printf(file, "maximum requests = %d\n", sba->max_req); | ||
500 | seq_printf(file, "free requests = %d\n", free_count); | ||
501 | seq_printf(file, "alloced requests = %d\n", alloced_count); | ||
502 | seq_printf(file, "pending requests = %d\n", pending_count); | ||
503 | seq_printf(file, "active requests = %d\n", active_count); | ||
504 | seq_printf(file, "aborted requests = %d\n", aborted_count); | ||
505 | } | ||
506 | |||
507 | /* ====== DMAENGINE callbacks ===== */ | ||
508 | |||
509 | static void sba_free_chan_resources(struct dma_chan *dchan) | ||
510 | { | ||
511 | /* | ||
512 | * Channel resources are pre-alloced so we just free-up | ||
513 | * whatever we can so that we can re-use pre-alloced | ||
514 | * channel resources next time. | ||
515 | */ | ||
516 | sba_cleanup_nonpending_requests(to_sba_device(dchan)); | ||
517 | } | ||
518 | |||
519 | static int sba_device_terminate_all(struct dma_chan *dchan) | ||
520 | { | ||
521 | /* Cleanup all pending requests */ | ||
522 | sba_cleanup_pending_requests(to_sba_device(dchan)); | ||
523 | |||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | static void sba_issue_pending(struct dma_chan *dchan) | ||
528 | { | ||
529 | unsigned long flags; | ||
530 | struct sba_device *sba = to_sba_device(dchan); | ||
531 | |||
532 | /* Process pending requests */ | ||
533 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
534 | _sba_process_pending_requests(sba); | ||
458 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 535 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
459 | } | 536 | } |
460 | 537 | ||
@@ -486,17 +563,15 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, | |||
486 | dma_cookie_t cookie, | 563 | dma_cookie_t cookie, |
487 | struct dma_tx_state *txstate) | 564 | struct dma_tx_state *txstate) |
488 | { | 565 | { |
489 | int mchan_idx; | ||
490 | enum dma_status ret; | 566 | enum dma_status ret; |
491 | struct sba_device *sba = to_sba_device(dchan); | 567 | struct sba_device *sba = to_sba_device(dchan); |
492 | 568 | ||
493 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) | ||
494 | mbox_client_peek_data(sba->mchans[mchan_idx]); | ||
495 | |||
496 | ret = dma_cookie_status(dchan, cookie, txstate); | 569 | ret = dma_cookie_status(dchan, cookie, txstate); |
497 | if (ret == DMA_COMPLETE) | 570 | if (ret == DMA_COMPLETE) |
498 | return ret; | 571 | return ret; |
499 | 572 | ||
573 | sba_peek_mchans(sba); | ||
574 | |||
500 | return dma_cookie_status(dchan, cookie, txstate); | 575 | return dma_cookie_status(dchan, cookie, txstate); |
501 | } | 576 | } |
502 | 577 | ||
@@ -506,6 +581,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, | |||
506 | { | 581 | { |
507 | u64 cmd; | 582 | u64 cmd; |
508 | u32 c_mdata; | 583 | u32 c_mdata; |
584 | dma_addr_t resp_dma = req->tx.phys; | ||
509 | struct brcm_sba_command *cmdsp = cmds; | 585 | struct brcm_sba_command *cmdsp = cmds; |
510 | 586 | ||
511 | /* Type-B command to load dummy data into buf0 */ | 587 | /* Type-B command to load dummy data into buf0 */ |
@@ -521,7 +597,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, | |||
521 | cmdsp->cmd = cmd; | 597 | cmdsp->cmd = cmd; |
522 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | 598 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
523 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | 599 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
524 | cmdsp->data = req->resp_dma; | 600 | cmdsp->data = resp_dma; |
525 | cmdsp->data_len = req->sba->hw_resp_size; | 601 | cmdsp->data_len = req->sba->hw_resp_size; |
526 | cmdsp++; | 602 | cmdsp++; |
527 | 603 | ||
@@ -542,11 +618,11 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, | |||
542 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 618 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
543 | if (req->sba->hw_resp_size) { | 619 | if (req->sba->hw_resp_size) { |
544 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 620 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
545 | cmdsp->resp = req->resp_dma; | 621 | cmdsp->resp = resp_dma; |
546 | cmdsp->resp_len = req->sba->hw_resp_size; | 622 | cmdsp->resp_len = req->sba->hw_resp_size; |
547 | } | 623 | } |
548 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 624 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
549 | cmdsp->data = req->resp_dma; | 625 | cmdsp->data = resp_dma; |
550 | cmdsp->data_len = req->sba->hw_resp_size; | 626 | cmdsp->data_len = req->sba->hw_resp_size; |
551 | cmdsp++; | 627 | cmdsp++; |
552 | 628 | ||
@@ -573,7 +649,7 @@ sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
573 | * Force fence so that no requests are submitted | 649 | * Force fence so that no requests are submitted |
574 | * until DMA callback for this request is invoked. | 650 | * until DMA callback for this request is invoked. |
575 | */ | 651 | */ |
576 | req->fence = true; | 652 | req->flags |= SBA_REQUEST_FENCE; |
577 | 653 | ||
578 | /* Fillup request message */ | 654 | /* Fillup request message */ |
579 | sba_fillup_interrupt_msg(req, req->cmds, &req->msg); | 655 | sba_fillup_interrupt_msg(req, req->cmds, &req->msg); |
@@ -593,6 +669,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req, | |||
593 | { | 669 | { |
594 | u64 cmd; | 670 | u64 cmd; |
595 | u32 c_mdata; | 671 | u32 c_mdata; |
672 | dma_addr_t resp_dma = req->tx.phys; | ||
596 | struct brcm_sba_command *cmdsp = cmds; | 673 | struct brcm_sba_command *cmdsp = cmds; |
597 | 674 | ||
598 | /* Type-B command to load data into buf0 */ | 675 | /* Type-B command to load data into buf0 */ |
@@ -629,7 +706,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req, | |||
629 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 706 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
630 | if (req->sba->hw_resp_size) { | 707 | if (req->sba->hw_resp_size) { |
631 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 708 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
632 | cmdsp->resp = req->resp_dma; | 709 | cmdsp->resp = resp_dma; |
633 | cmdsp->resp_len = req->sba->hw_resp_size; | 710 | cmdsp->resp_len = req->sba->hw_resp_size; |
634 | } | 711 | } |
635 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 712 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -656,7 +733,8 @@ sba_prep_dma_memcpy_req(struct sba_device *sba, | |||
656 | req = sba_alloc_request(sba); | 733 | req = sba_alloc_request(sba); |
657 | if (!req) | 734 | if (!req) |
658 | return NULL; | 735 | return NULL; |
659 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 736 | if (flags & DMA_PREP_FENCE) |
737 | req->flags |= SBA_REQUEST_FENCE; | ||
660 | 738 | ||
661 | /* Fillup request message */ | 739 | /* Fillup request message */ |
662 | sba_fillup_memcpy_msg(req, req->cmds, &req->msg, | 740 | sba_fillup_memcpy_msg(req, req->cmds, &req->msg, |
@@ -711,6 +789,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, | |||
711 | u64 cmd; | 789 | u64 cmd; |
712 | u32 c_mdata; | 790 | u32 c_mdata; |
713 | unsigned int i; | 791 | unsigned int i; |
792 | dma_addr_t resp_dma = req->tx.phys; | ||
714 | struct brcm_sba_command *cmdsp = cmds; | 793 | struct brcm_sba_command *cmdsp = cmds; |
715 | 794 | ||
716 | /* Type-B command to load data into buf0 */ | 795 | /* Type-B command to load data into buf0 */ |
@@ -766,7 +845,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, | |||
766 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 845 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
767 | if (req->sba->hw_resp_size) { | 846 | if (req->sba->hw_resp_size) { |
768 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 847 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
769 | cmdsp->resp = req->resp_dma; | 848 | cmdsp->resp = resp_dma; |
770 | cmdsp->resp_len = req->sba->hw_resp_size; | 849 | cmdsp->resp_len = req->sba->hw_resp_size; |
771 | } | 850 | } |
772 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 851 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -782,7 +861,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, | |||
782 | msg->error = 0; | 861 | msg->error = 0; |
783 | } | 862 | } |
784 | 863 | ||
785 | struct sba_request * | 864 | static struct sba_request * |
786 | sba_prep_dma_xor_req(struct sba_device *sba, | 865 | sba_prep_dma_xor_req(struct sba_device *sba, |
787 | dma_addr_t off, dma_addr_t dst, dma_addr_t *src, | 866 | dma_addr_t off, dma_addr_t dst, dma_addr_t *src, |
788 | u32 src_cnt, size_t len, unsigned long flags) | 867 | u32 src_cnt, size_t len, unsigned long flags) |
@@ -793,7 +872,8 @@ sba_prep_dma_xor_req(struct sba_device *sba, | |||
793 | req = sba_alloc_request(sba); | 872 | req = sba_alloc_request(sba); |
794 | if (!req) | 873 | if (!req) |
795 | return NULL; | 874 | return NULL; |
796 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 875 | if (flags & DMA_PREP_FENCE) |
876 | req->flags |= SBA_REQUEST_FENCE; | ||
797 | 877 | ||
798 | /* Fillup request message */ | 878 | /* Fillup request message */ |
799 | sba_fillup_xor_msg(req, req->cmds, &req->msg, | 879 | sba_fillup_xor_msg(req, req->cmds, &req->msg, |
@@ -854,6 +934,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
854 | u64 cmd; | 934 | u64 cmd; |
855 | u32 c_mdata; | 935 | u32 c_mdata; |
856 | unsigned int i; | 936 | unsigned int i; |
937 | dma_addr_t resp_dma = req->tx.phys; | ||
857 | struct brcm_sba_command *cmdsp = cmds; | 938 | struct brcm_sba_command *cmdsp = cmds; |
858 | 939 | ||
859 | if (pq_continue) { | 940 | if (pq_continue) { |
@@ -947,7 +1028,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
947 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1028 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
948 | if (req->sba->hw_resp_size) { | 1029 | if (req->sba->hw_resp_size) { |
949 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1030 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
950 | cmdsp->resp = req->resp_dma; | 1031 | cmdsp->resp = resp_dma; |
951 | cmdsp->resp_len = req->sba->hw_resp_size; | 1032 | cmdsp->resp_len = req->sba->hw_resp_size; |
952 | } | 1033 | } |
953 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1034 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -974,7 +1055,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
974 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1055 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
975 | if (req->sba->hw_resp_size) { | 1056 | if (req->sba->hw_resp_size) { |
976 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1057 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
977 | cmdsp->resp = req->resp_dma; | 1058 | cmdsp->resp = resp_dma; |
978 | cmdsp->resp_len = req->sba->hw_resp_size; | 1059 | cmdsp->resp_len = req->sba->hw_resp_size; |
979 | } | 1060 | } |
980 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1061 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -991,7 +1072,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
991 | msg->error = 0; | 1072 | msg->error = 0; |
992 | } | 1073 | } |
993 | 1074 | ||
994 | struct sba_request * | 1075 | static struct sba_request * |
995 | sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, | 1076 | sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, |
996 | dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, | 1077 | dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, |
997 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | 1078 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) |
@@ -1002,7 +1083,8 @@ sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, | |||
1002 | req = sba_alloc_request(sba); | 1083 | req = sba_alloc_request(sba); |
1003 | if (!req) | 1084 | if (!req) |
1004 | return NULL; | 1085 | return NULL; |
1005 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 1086 | if (flags & DMA_PREP_FENCE) |
1087 | req->flags |= SBA_REQUEST_FENCE; | ||
1006 | 1088 | ||
1007 | /* Fillup request messages */ | 1089 | /* Fillup request messages */ |
1008 | sba_fillup_pq_msg(req, dmaf_continue(flags), | 1090 | sba_fillup_pq_msg(req, dmaf_continue(flags), |
@@ -1027,6 +1109,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, | |||
1027 | u64 cmd; | 1109 | u64 cmd; |
1028 | u32 c_mdata; | 1110 | u32 c_mdata; |
1029 | u8 pos, dpos = raid6_gflog[scf]; | 1111 | u8 pos, dpos = raid6_gflog[scf]; |
1112 | dma_addr_t resp_dma = req->tx.phys; | ||
1030 | struct brcm_sba_command *cmdsp = cmds; | 1113 | struct brcm_sba_command *cmdsp = cmds; |
1031 | 1114 | ||
1032 | if (!dst_p) | 1115 | if (!dst_p) |
@@ -1105,7 +1188,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, | |||
1105 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1188 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
1106 | if (req->sba->hw_resp_size) { | 1189 | if (req->sba->hw_resp_size) { |
1107 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1190 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
1108 | cmdsp->resp = req->resp_dma; | 1191 | cmdsp->resp = resp_dma; |
1109 | cmdsp->resp_len = req->sba->hw_resp_size; | 1192 | cmdsp->resp_len = req->sba->hw_resp_size; |
1110 | } | 1193 | } |
1111 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1194 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -1226,7 +1309,7 @@ skip_q_computation: | |||
1226 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1309 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
1227 | if (req->sba->hw_resp_size) { | 1310 | if (req->sba->hw_resp_size) { |
1228 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1311 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
1229 | cmdsp->resp = req->resp_dma; | 1312 | cmdsp->resp = resp_dma; |
1230 | cmdsp->resp_len = req->sba->hw_resp_size; | 1313 | cmdsp->resp_len = req->sba->hw_resp_size; |
1231 | } | 1314 | } |
1232 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1315 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -1243,7 +1326,7 @@ skip_q: | |||
1243 | msg->error = 0; | 1326 | msg->error = 0; |
1244 | } | 1327 | } |
1245 | 1328 | ||
1246 | struct sba_request * | 1329 | static struct sba_request * |
1247 | sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, | 1330 | sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, |
1248 | dma_addr_t *dst_p, dma_addr_t *dst_q, | 1331 | dma_addr_t *dst_p, dma_addr_t *dst_q, |
1249 | dma_addr_t src, u8 scf, size_t len, | 1332 | dma_addr_t src, u8 scf, size_t len, |
@@ -1255,7 +1338,8 @@ sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, | |||
1255 | req = sba_alloc_request(sba); | 1338 | req = sba_alloc_request(sba); |
1256 | if (!req) | 1339 | if (!req) |
1257 | return NULL; | 1340 | return NULL; |
1258 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 1341 | if (flags & DMA_PREP_FENCE) |
1342 | req->flags |= SBA_REQUEST_FENCE; | ||
1259 | 1343 | ||
1260 | /* Fillup request messages */ | 1344 | /* Fillup request messages */ |
1261 | sba_fillup_pq_single_msg(req, dmaf_continue(flags), | 1345 | sba_fillup_pq_single_msg(req, dmaf_continue(flags), |
@@ -1370,40 +1454,10 @@ fail: | |||
1370 | 1454 | ||
1371 | /* ====== Mailbox callbacks ===== */ | 1455 | /* ====== Mailbox callbacks ===== */ |
1372 | 1456 | ||
1373 | static void sba_dma_tx_actions(struct sba_request *req) | ||
1374 | { | ||
1375 | struct dma_async_tx_descriptor *tx = &req->tx; | ||
1376 | |||
1377 | WARN_ON(tx->cookie < 0); | ||
1378 | |||
1379 | if (tx->cookie > 0) { | ||
1380 | dma_cookie_complete(tx); | ||
1381 | |||
1382 | /* | ||
1383 | * Call the callback (must not sleep or submit new | ||
1384 | * operations to this channel) | ||
1385 | */ | ||
1386 | if (tx->callback) | ||
1387 | tx->callback(tx->callback_param); | ||
1388 | |||
1389 | dma_descriptor_unmap(tx); | ||
1390 | } | ||
1391 | |||
1392 | /* Run dependent operations */ | ||
1393 | dma_run_dependencies(tx); | ||
1394 | |||
1395 | /* If waiting for 'ack' then move to completed list */ | ||
1396 | if (!async_tx_test_ack(&req->tx)) | ||
1397 | sba_complete_chained_requests(req); | ||
1398 | else | ||
1399 | sba_free_chained_requests(req); | ||
1400 | } | ||
1401 | |||
1402 | static void sba_receive_message(struct mbox_client *cl, void *msg) | 1457 | static void sba_receive_message(struct mbox_client *cl, void *msg) |
1403 | { | 1458 | { |
1404 | unsigned long flags; | ||
1405 | struct brcm_message *m = msg; | 1459 | struct brcm_message *m = msg; |
1406 | struct sba_request *req = m->ctx, *req1; | 1460 | struct sba_request *req = m->ctx; |
1407 | struct sba_device *sba = req->sba; | 1461 | struct sba_device *sba = req->sba; |
1408 | 1462 | ||
1409 | /* Error count if message has error */ | 1463 | /* Error count if message has error */ |
@@ -1411,52 +1465,37 @@ static void sba_receive_message(struct mbox_client *cl, void *msg) | |||
1411 | dev_err(sba->dev, "%s got message with error %d", | 1465 | dev_err(sba->dev, "%s got message with error %d", |
1412 | dma_chan_name(&sba->dma_chan), m->error); | 1466 | dma_chan_name(&sba->dma_chan), m->error); |
1413 | 1467 | ||
1414 | /* Mark request as received */ | 1468 | /* Process received request */ |
1415 | sba_received_request(req); | 1469 | sba_process_received_request(sba, req); |
1416 | 1470 | } | |
1417 | /* Wait for all chained requests to be completed */ | ||
1418 | if (atomic_dec_return(&req->first->next_pending_count)) | ||
1419 | goto done; | ||
1420 | |||
1421 | /* Point to first request */ | ||
1422 | req = req->first; | ||
1423 | |||
1424 | /* Update request */ | ||
1425 | if (req->state == SBA_REQUEST_STATE_RECEIVED) | ||
1426 | sba_dma_tx_actions(req); | ||
1427 | else | ||
1428 | sba_free_chained_requests(req); | ||
1429 | 1471 | ||
1430 | spin_lock_irqsave(&sba->reqs_lock, flags); | 1472 | /* ====== Debugfs callbacks ====== */ |
1431 | 1473 | ||
1432 | /* Re-check all completed request waiting for 'ack' */ | 1474 | static int sba_debugfs_stats_show(struct seq_file *file, void *offset) |
1433 | list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) { | 1475 | { |
1434 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 1476 | struct platform_device *pdev = to_platform_device(file->private); |
1435 | sba_dma_tx_actions(req); | 1477 | struct sba_device *sba = platform_get_drvdata(pdev); |
1436 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
1437 | } | ||
1438 | 1478 | ||
1439 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 1479 | /* Write stats in file */ |
1480 | sba_write_stats_in_seqfile(sba, file); | ||
1440 | 1481 | ||
1441 | done: | 1482 | return 0; |
1442 | /* Try to submit pending request */ | ||
1443 | sba_issue_pending(&sba->dma_chan); | ||
1444 | } | 1483 | } |
1445 | 1484 | ||
1446 | /* ====== Platform driver routines ===== */ | 1485 | /* ====== Platform driver routines ===== */ |
1447 | 1486 | ||
1448 | static int sba_prealloc_channel_resources(struct sba_device *sba) | 1487 | static int sba_prealloc_channel_resources(struct sba_device *sba) |
1449 | { | 1488 | { |
1450 | int i, j, p, ret = 0; | 1489 | int i, j, ret = 0; |
1451 | struct sba_request *req = NULL; | 1490 | struct sba_request *req = NULL; |
1452 | 1491 | ||
1453 | sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev, | 1492 | sba->resp_base = dma_alloc_coherent(sba->mbox_dev, |
1454 | sba->max_resp_pool_size, | 1493 | sba->max_resp_pool_size, |
1455 | &sba->resp_dma_base, GFP_KERNEL); | 1494 | &sba->resp_dma_base, GFP_KERNEL); |
1456 | if (!sba->resp_base) | 1495 | if (!sba->resp_base) |
1457 | return -ENOMEM; | 1496 | return -ENOMEM; |
1458 | 1497 | ||
1459 | sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev, | 1498 | sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, |
1460 | sba->max_cmds_pool_size, | 1499 | sba->max_cmds_pool_size, |
1461 | &sba->cmds_dma_base, GFP_KERNEL); | 1500 | &sba->cmds_dma_base, GFP_KERNEL); |
1462 | if (!sba->cmds_base) { | 1501 | if (!sba->cmds_base) { |
@@ -1469,36 +1508,23 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1469 | INIT_LIST_HEAD(&sba->reqs_alloc_list); | 1508 | INIT_LIST_HEAD(&sba->reqs_alloc_list); |
1470 | INIT_LIST_HEAD(&sba->reqs_pending_list); | 1509 | INIT_LIST_HEAD(&sba->reqs_pending_list); |
1471 | INIT_LIST_HEAD(&sba->reqs_active_list); | 1510 | INIT_LIST_HEAD(&sba->reqs_active_list); |
1472 | INIT_LIST_HEAD(&sba->reqs_received_list); | ||
1473 | INIT_LIST_HEAD(&sba->reqs_completed_list); | ||
1474 | INIT_LIST_HEAD(&sba->reqs_aborted_list); | 1511 | INIT_LIST_HEAD(&sba->reqs_aborted_list); |
1475 | INIT_LIST_HEAD(&sba->reqs_free_list); | 1512 | INIT_LIST_HEAD(&sba->reqs_free_list); |
1476 | 1513 | ||
1477 | sba->reqs = devm_kcalloc(sba->dev, sba->max_req, | 1514 | for (i = 0; i < sba->max_req; i++) { |
1478 | sizeof(*req), GFP_KERNEL); | 1515 | req = devm_kzalloc(sba->dev, |
1479 | if (!sba->reqs) { | 1516 | sizeof(*req) + |
1480 | ret = -ENOMEM; | 1517 | sba->max_cmd_per_req * sizeof(req->cmds[0]), |
1481 | goto fail_free_cmds_pool; | 1518 | GFP_KERNEL); |
1482 | } | 1519 | if (!req) { |
1483 | 1520 | ret = -ENOMEM; | |
1484 | for (i = 0, p = 0; i < sba->max_req; i++) { | 1521 | goto fail_free_cmds_pool; |
1485 | req = &sba->reqs[i]; | 1522 | } |
1486 | INIT_LIST_HEAD(&req->node); | 1523 | INIT_LIST_HEAD(&req->node); |
1487 | req->sba = sba; | 1524 | req->sba = sba; |
1488 | req->state = SBA_REQUEST_STATE_FREE; | 1525 | req->flags = SBA_REQUEST_STATE_FREE; |
1489 | INIT_LIST_HEAD(&req->next); | 1526 | INIT_LIST_HEAD(&req->next); |
1490 | req->next_count = 1; | ||
1491 | atomic_set(&req->next_pending_count, 0); | 1527 | atomic_set(&req->next_pending_count, 0); |
1492 | req->fence = false; | ||
1493 | req->resp = sba->resp_base + p; | ||
1494 | req->resp_dma = sba->resp_dma_base + p; | ||
1495 | p += sba->hw_resp_size; | ||
1496 | req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req, | ||
1497 | sizeof(*req->cmds), GFP_KERNEL); | ||
1498 | if (!req->cmds) { | ||
1499 | ret = -ENOMEM; | ||
1500 | goto fail_free_cmds_pool; | ||
1501 | } | ||
1502 | for (j = 0; j < sba->max_cmd_per_req; j++) { | 1528 | for (j = 0; j < sba->max_cmd_per_req; j++) { |
1503 | req->cmds[j].cmd = 0; | 1529 | req->cmds[j].cmd = 0; |
1504 | req->cmds[j].cmd_dma = sba->cmds_base + | 1530 | req->cmds[j].cmd_dma = sba->cmds_base + |
@@ -1509,21 +1535,20 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1509 | } | 1535 | } |
1510 | memset(&req->msg, 0, sizeof(req->msg)); | 1536 | memset(&req->msg, 0, sizeof(req->msg)); |
1511 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | 1537 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); |
1538 | async_tx_ack(&req->tx); | ||
1512 | req->tx.tx_submit = sba_tx_submit; | 1539 | req->tx.tx_submit = sba_tx_submit; |
1513 | req->tx.phys = req->resp_dma; | 1540 | req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; |
1514 | list_add_tail(&req->node, &sba->reqs_free_list); | 1541 | list_add_tail(&req->node, &sba->reqs_free_list); |
1515 | } | 1542 | } |
1516 | 1543 | ||
1517 | sba->reqs_free_count = sba->max_req; | ||
1518 | |||
1519 | return 0; | 1544 | return 0; |
1520 | 1545 | ||
1521 | fail_free_cmds_pool: | 1546 | fail_free_cmds_pool: |
1522 | dma_free_coherent(sba->dma_dev.dev, | 1547 | dma_free_coherent(sba->mbox_dev, |
1523 | sba->max_cmds_pool_size, | 1548 | sba->max_cmds_pool_size, |
1524 | sba->cmds_base, sba->cmds_dma_base); | 1549 | sba->cmds_base, sba->cmds_dma_base); |
1525 | fail_free_resp_pool: | 1550 | fail_free_resp_pool: |
1526 | dma_free_coherent(sba->dma_dev.dev, | 1551 | dma_free_coherent(sba->mbox_dev, |
1527 | sba->max_resp_pool_size, | 1552 | sba->max_resp_pool_size, |
1528 | sba->resp_base, sba->resp_dma_base); | 1553 | sba->resp_base, sba->resp_dma_base); |
1529 | return ret; | 1554 | return ret; |
@@ -1532,9 +1557,9 @@ fail_free_resp_pool: | |||
1532 | static void sba_freeup_channel_resources(struct sba_device *sba) | 1557 | static void sba_freeup_channel_resources(struct sba_device *sba) |
1533 | { | 1558 | { |
1534 | dmaengine_terminate_all(&sba->dma_chan); | 1559 | dmaengine_terminate_all(&sba->dma_chan); |
1535 | dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size, | 1560 | dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, |
1536 | sba->cmds_base, sba->cmds_dma_base); | 1561 | sba->cmds_base, sba->cmds_dma_base); |
1537 | dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size, | 1562 | dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, |
1538 | sba->resp_base, sba->resp_dma_base); | 1563 | sba->resp_base, sba->resp_dma_base); |
1539 | sba->resp_base = NULL; | 1564 | sba->resp_base = NULL; |
1540 | sba->resp_dma_base = 0; | 1565 | sba->resp_dma_base = 0; |
@@ -1625,6 +1650,13 @@ static int sba_probe(struct platform_device *pdev) | |||
1625 | sba->dev = &pdev->dev; | 1650 | sba->dev = &pdev->dev; |
1626 | platform_set_drvdata(pdev, sba); | 1651 | platform_set_drvdata(pdev, sba); |
1627 | 1652 | ||
1653 | /* Number of channels equals number of mailbox channels */ | ||
1654 | ret = of_count_phandle_with_args(pdev->dev.of_node, | ||
1655 | "mboxes", "#mbox-cells"); | ||
1656 | if (ret <= 0) | ||
1657 | return -ENODEV; | ||
1658 | mchans_count = ret; | ||
1659 | |||
1628 | /* Determine SBA version from DT compatible string */ | 1660 | /* Determine SBA version from DT compatible string */ |
1629 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) | 1661 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) |
1630 | sba->ver = SBA_VER_1; | 1662 | sba->ver = SBA_VER_1; |
@@ -1637,14 +1669,12 @@ static int sba_probe(struct platform_device *pdev) | |||
1637 | /* Derived Configuration parameters */ | 1669 | /* Derived Configuration parameters */ |
1638 | switch (sba->ver) { | 1670 | switch (sba->ver) { |
1639 | case SBA_VER_1: | 1671 | case SBA_VER_1: |
1640 | sba->max_req = 1024; | ||
1641 | sba->hw_buf_size = 4096; | 1672 | sba->hw_buf_size = 4096; |
1642 | sba->hw_resp_size = 8; | 1673 | sba->hw_resp_size = 8; |
1643 | sba->max_pq_coefs = 6; | 1674 | sba->max_pq_coefs = 6; |
1644 | sba->max_pq_srcs = 6; | 1675 | sba->max_pq_srcs = 6; |
1645 | break; | 1676 | break; |
1646 | case SBA_VER_2: | 1677 | case SBA_VER_2: |
1647 | sba->max_req = 1024; | ||
1648 | sba->hw_buf_size = 4096; | 1678 | sba->hw_buf_size = 4096; |
1649 | sba->hw_resp_size = 8; | 1679 | sba->hw_resp_size = 8; |
1650 | sba->max_pq_coefs = 30; | 1680 | sba->max_pq_coefs = 30; |
@@ -1658,6 +1688,7 @@ static int sba_probe(struct platform_device *pdev) | |||
1658 | default: | 1688 | default: |
1659 | return -EINVAL; | 1689 | return -EINVAL; |
1660 | } | 1690 | } |
1691 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; | ||
1661 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; | 1692 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; |
1662 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; | 1693 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; |
1663 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; | 1694 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; |
@@ -1668,25 +1699,17 @@ static int sba_probe(struct platform_device *pdev) | |||
1668 | sba->client.dev = &pdev->dev; | 1699 | sba->client.dev = &pdev->dev; |
1669 | sba->client.rx_callback = sba_receive_message; | 1700 | sba->client.rx_callback = sba_receive_message; |
1670 | sba->client.tx_block = false; | 1701 | sba->client.tx_block = false; |
1671 | sba->client.knows_txdone = false; | 1702 | sba->client.knows_txdone = true; |
1672 | sba->client.tx_tout = 0; | 1703 | sba->client.tx_tout = 0; |
1673 | 1704 | ||
1674 | /* Number of channels equals number of mailbox channels */ | ||
1675 | ret = of_count_phandle_with_args(pdev->dev.of_node, | ||
1676 | "mboxes", "#mbox-cells"); | ||
1677 | if (ret <= 0) | ||
1678 | return -ENODEV; | ||
1679 | mchans_count = ret; | ||
1680 | sba->mchans_count = 0; | ||
1681 | atomic_set(&sba->mchans_current, 0); | ||
1682 | |||
1683 | /* Allocate mailbox channel array */ | 1705 | /* Allocate mailbox channel array */ |
1684 | sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count, | 1706 | sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, |
1685 | sizeof(*sba->mchans), GFP_KERNEL); | 1707 | sizeof(*sba->mchans), GFP_KERNEL); |
1686 | if (!sba->mchans) | 1708 | if (!sba->mchans) |
1687 | return -ENOMEM; | 1709 | return -ENOMEM; |
1688 | 1710 | ||
1689 | /* Request mailbox channels */ | 1711 | /* Request mailbox channels */ |
1712 | sba->mchans_count = 0; | ||
1690 | for (i = 0; i < mchans_count; i++) { | 1713 | for (i = 0; i < mchans_count; i++) { |
1691 | sba->mchans[i] = mbox_request_channel(&sba->client, i); | 1714 | sba->mchans[i] = mbox_request_channel(&sba->client, i); |
1692 | if (IS_ERR(sba->mchans[i])) { | 1715 | if (IS_ERR(sba->mchans[i])) { |
@@ -1695,6 +1718,7 @@ static int sba_probe(struct platform_device *pdev) | |||
1695 | } | 1718 | } |
1696 | sba->mchans_count++; | 1719 | sba->mchans_count++; |
1697 | } | 1720 | } |
1721 | atomic_set(&sba->mchans_current, 0); | ||
1698 | 1722 | ||
1699 | /* Find-out underlying mailbox device */ | 1723 | /* Find-out underlying mailbox device */ |
1700 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | 1724 | ret = of_parse_phandle_with_args(pdev->dev.of_node, |
@@ -1723,15 +1747,34 @@ static int sba_probe(struct platform_device *pdev) | |||
1723 | } | 1747 | } |
1724 | } | 1748 | } |
1725 | 1749 | ||
1726 | /* Register DMA device with linux async framework */ | 1750 | /* Prealloc channel resource */ |
1727 | ret = sba_async_register(sba); | 1751 | ret = sba_prealloc_channel_resources(sba); |
1728 | if (ret) | 1752 | if (ret) |
1729 | goto fail_free_mchans; | 1753 | goto fail_free_mchans; |
1730 | 1754 | ||
1731 | /* Prealloc channel resource */ | 1755 | /* Check availability of debugfs */ |
1732 | ret = sba_prealloc_channel_resources(sba); | 1756 | if (!debugfs_initialized()) |
1757 | goto skip_debugfs; | ||
1758 | |||
1759 | /* Create debugfs root entry */ | ||
1760 | sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); | ||
1761 | if (IS_ERR_OR_NULL(sba->root)) { | ||
1762 | dev_err(sba->dev, "failed to create debugfs root entry\n"); | ||
1763 | sba->root = NULL; | ||
1764 | goto skip_debugfs; | ||
1765 | } | ||
1766 | |||
1767 | /* Create debugfs stats entry */ | ||
1768 | sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, | ||
1769 | sba_debugfs_stats_show); | ||
1770 | if (IS_ERR_OR_NULL(sba->stats)) | ||
1771 | dev_err(sba->dev, "failed to create debugfs stats file\n"); | ||
1772 | skip_debugfs: | ||
1773 | |||
1774 | /* Register DMA device with Linux async framework */ | ||
1775 | ret = sba_async_register(sba); | ||
1733 | if (ret) | 1776 | if (ret) |
1734 | goto fail_async_dev_unreg; | 1777 | goto fail_free_resources; |
1735 | 1778 | ||
1736 | /* Print device info */ | 1779 | /* Print device info */ |
1737 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", | 1780 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", |
@@ -1740,8 +1783,9 @@ static int sba_probe(struct platform_device *pdev) | |||
1740 | 1783 | ||
1741 | return 0; | 1784 | return 0; |
1742 | 1785 | ||
1743 | fail_async_dev_unreg: | 1786 | fail_free_resources: |
1744 | dma_async_device_unregister(&sba->dma_dev); | 1787 | debugfs_remove_recursive(sba->root); |
1788 | sba_freeup_channel_resources(sba); | ||
1745 | fail_free_mchans: | 1789 | fail_free_mchans: |
1746 | for (i = 0; i < sba->mchans_count; i++) | 1790 | for (i = 0; i < sba->mchans_count; i++) |
1747 | mbox_free_channel(sba->mchans[i]); | 1791 | mbox_free_channel(sba->mchans[i]); |
@@ -1753,10 +1797,12 @@ static int sba_remove(struct platform_device *pdev) | |||
1753 | int i; | 1797 | int i; |
1754 | struct sba_device *sba = platform_get_drvdata(pdev); | 1798 | struct sba_device *sba = platform_get_drvdata(pdev); |
1755 | 1799 | ||
1756 | sba_freeup_channel_resources(sba); | ||
1757 | |||
1758 | dma_async_device_unregister(&sba->dma_dev); | 1800 | dma_async_device_unregister(&sba->dma_dev); |
1759 | 1801 | ||
1802 | debugfs_remove_recursive(sba->root); | ||
1803 | |||
1804 | sba_freeup_channel_resources(sba); | ||
1805 | |||
1760 | for (i = 0; i < sba->mchans_count; i++) | 1806 | for (i = 0; i < sba->mchans_count; i++) |
1761 | mbox_free_channel(sba->mchans[i]); | 1807 | mbox_free_channel(sba->mchans[i]); |
1762 | 1808 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d9118ec23025..b451354735d3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device) | |||
923 | return -ENODEV; | 923 | return -ENODEV; |
924 | 924 | ||
925 | /* validate device routines */ | 925 | /* validate device routines */ |
926 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | 926 | if (!device->dev) { |
927 | !device->device_prep_dma_memcpy); | 927 | pr_err("DMAdevice must have dev\n"); |
928 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | 928 | return -EIO; |
929 | !device->device_prep_dma_xor); | 929 | } |
930 | BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && | 930 | |
931 | !device->device_prep_dma_xor_val); | 931 | if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { |
932 | BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && | 932 | dev_err(device->dev, |
933 | !device->device_prep_dma_pq); | 933 | "Device claims capability %s, but op is not defined\n", |
934 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | 934 | "DMA_MEMCPY"); |
935 | !device->device_prep_dma_pq_val); | 935 | return -EIO; |
936 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | 936 | } |
937 | !device->device_prep_dma_memset); | 937 | |
938 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 938 | if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { |
939 | !device->device_prep_dma_interrupt); | 939 | dev_err(device->dev, |
940 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | 940 | "Device claims capability %s, but op is not defined\n", |
941 | !device->device_prep_dma_sg); | 941 | "DMA_XOR"); |
942 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | 942 | return -EIO; |
943 | !device->device_prep_dma_cyclic); | 943 | } |
944 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | 944 | |
945 | !device->device_prep_interleaved_dma); | 945 | if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { |
946 | 946 | dev_err(device->dev, | |
947 | BUG_ON(!device->device_tx_status); | 947 | "Device claims capability %s, but op is not defined\n", |
948 | BUG_ON(!device->device_issue_pending); | 948 | "DMA_XOR_VAL"); |
949 | BUG_ON(!device->dev); | 949 | return -EIO; |
950 | } | ||
951 | |||
952 | if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { | ||
953 | dev_err(device->dev, | ||
954 | "Device claims capability %s, but op is not defined\n", | ||
955 | "DMA_PQ"); | ||
956 | return -EIO; | ||
957 | } | ||
958 | |||
959 | if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { | ||
960 | dev_err(device->dev, | ||
961 | "Device claims capability %s, but op is not defined\n", | ||
962 | "DMA_PQ_VAL"); | ||
963 | return -EIO; | ||
964 | } | ||
965 | |||
966 | if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { | ||
967 | dev_err(device->dev, | ||
968 | "Device claims capability %s, but op is not defined\n", | ||
969 | "DMA_MEMSET"); | ||
970 | return -EIO; | ||
971 | } | ||
972 | |||
973 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { | ||
974 | dev_err(device->dev, | ||
975 | "Device claims capability %s, but op is not defined\n", | ||
976 | "DMA_INTERRUPT"); | ||
977 | return -EIO; | ||
978 | } | ||
979 | |||
980 | if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { | ||
981 | dev_err(device->dev, | ||
982 | "Device claims capability %s, but op is not defined\n", | ||
983 | "DMA_CYCLIC"); | ||
984 | return -EIO; | ||
985 | } | ||
986 | |||
987 | if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { | ||
988 | dev_err(device->dev, | ||
989 | "Device claims capability %s, but op is not defined\n", | ||
990 | "DMA_INTERLEAVE"); | ||
991 | return -EIO; | ||
992 | } | ||
993 | |||
994 | |||
995 | if (!device->device_tx_status) { | ||
996 | dev_err(device->dev, "Device tx_status is not defined\n"); | ||
997 | return -EIO; | ||
998 | } | ||
999 | |||
1000 | |||
1001 | if (!device->device_issue_pending) { | ||
1002 | dev_err(device->dev, "Device issue_pending is not defined\n"); | ||
1003 | return -EIO; | ||
1004 | } | ||
950 | 1005 | ||
951 | /* note: this only matters in the | 1006 | /* note: this only matters in the |
952 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case | 1007 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a07ef3d6b3ec..34ff53290b03 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR); | |||
52 | MODULE_PARM_DESC(iterations, | 52 | MODULE_PARM_DESC(iterations, |
53 | "Iterations before stopping test (default: infinite)"); | 53 | "Iterations before stopping test (default: infinite)"); |
54 | 54 | ||
55 | static unsigned int sg_buffers = 1; | ||
56 | module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); | ||
57 | MODULE_PARM_DESC(sg_buffers, | ||
58 | "Number of scatter gather buffers (default: 1)"); | ||
59 | |||
60 | static unsigned int dmatest; | 55 | static unsigned int dmatest; |
61 | module_param(dmatest, uint, S_IRUGO | S_IWUSR); | 56 | module_param(dmatest, uint, S_IRUGO | S_IWUSR); |
62 | MODULE_PARM_DESC(dmatest, | 57 | MODULE_PARM_DESC(dmatest, |
63 | "dmatest 0-memcpy 1-slave_sg (default: 0)"); | 58 | "dmatest 0-memcpy 1-memset (default: 0)"); |
64 | 59 | ||
65 | static unsigned int xor_sources = 3; | 60 | static unsigned int xor_sources = 3; |
66 | module_param(xor_sources, uint, S_IRUGO | S_IWUSR); | 61 | module_param(xor_sources, uint, S_IRUGO | S_IWUSR); |
@@ -158,6 +153,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); | |||
158 | #define PATTERN_COPY 0x40 | 153 | #define PATTERN_COPY 0x40 |
159 | #define PATTERN_OVERWRITE 0x20 | 154 | #define PATTERN_OVERWRITE 0x20 |
160 | #define PATTERN_COUNT_MASK 0x1f | 155 | #define PATTERN_COUNT_MASK 0x1f |
156 | #define PATTERN_MEMSET_IDX 0x01 | ||
161 | 157 | ||
162 | struct dmatest_thread { | 158 | struct dmatest_thread { |
163 | struct list_head node; | 159 | struct list_head node; |
@@ -239,46 +235,62 @@ static unsigned long dmatest_random(void) | |||
239 | return buf; | 235 | return buf; |
240 | } | 236 | } |
241 | 237 | ||
238 | static inline u8 gen_inv_idx(u8 index, bool is_memset) | ||
239 | { | ||
240 | u8 val = is_memset ? PATTERN_MEMSET_IDX : index; | ||
241 | |||
242 | return ~val & PATTERN_COUNT_MASK; | ||
243 | } | ||
244 | |||
245 | static inline u8 gen_src_value(u8 index, bool is_memset) | ||
246 | { | ||
247 | return PATTERN_SRC | gen_inv_idx(index, is_memset); | ||
248 | } | ||
249 | |||
250 | static inline u8 gen_dst_value(u8 index, bool is_memset) | ||
251 | { | ||
252 | return PATTERN_DST | gen_inv_idx(index, is_memset); | ||
253 | } | ||
254 | |||
242 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, | 255 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, |
243 | unsigned int buf_size) | 256 | unsigned int buf_size, bool is_memset) |
244 | { | 257 | { |
245 | unsigned int i; | 258 | unsigned int i; |
246 | u8 *buf; | 259 | u8 *buf; |
247 | 260 | ||
248 | for (; (buf = *bufs); bufs++) { | 261 | for (; (buf = *bufs); bufs++) { |
249 | for (i = 0; i < start; i++) | 262 | for (i = 0; i < start; i++) |
250 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | 263 | buf[i] = gen_src_value(i, is_memset); |
251 | for ( ; i < start + len; i++) | 264 | for ( ; i < start + len; i++) |
252 | buf[i] = PATTERN_SRC | PATTERN_COPY | 265 | buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY; |
253 | | (~i & PATTERN_COUNT_MASK); | ||
254 | for ( ; i < buf_size; i++) | 266 | for ( ; i < buf_size; i++) |
255 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | 267 | buf[i] = gen_src_value(i, is_memset); |
256 | buf++; | 268 | buf++; |
257 | } | 269 | } |
258 | } | 270 | } |
259 | 271 | ||
260 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, | 272 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, |
261 | unsigned int buf_size) | 273 | unsigned int buf_size, bool is_memset) |
262 | { | 274 | { |
263 | unsigned int i; | 275 | unsigned int i; |
264 | u8 *buf; | 276 | u8 *buf; |
265 | 277 | ||
266 | for (; (buf = *bufs); bufs++) { | 278 | for (; (buf = *bufs); bufs++) { |
267 | for (i = 0; i < start; i++) | 279 | for (i = 0; i < start; i++) |
268 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | 280 | buf[i] = gen_dst_value(i, is_memset); |
269 | for ( ; i < start + len; i++) | 281 | for ( ; i < start + len; i++) |
270 | buf[i] = PATTERN_DST | PATTERN_OVERWRITE | 282 | buf[i] = gen_dst_value(i, is_memset) | |
271 | | (~i & PATTERN_COUNT_MASK); | 283 | PATTERN_OVERWRITE; |
272 | for ( ; i < buf_size; i++) | 284 | for ( ; i < buf_size; i++) |
273 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | 285 | buf[i] = gen_dst_value(i, is_memset); |
274 | } | 286 | } |
275 | } | 287 | } |
276 | 288 | ||
277 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, | 289 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, |
278 | unsigned int counter, bool is_srcbuf) | 290 | unsigned int counter, bool is_srcbuf, bool is_memset) |
279 | { | 291 | { |
280 | u8 diff = actual ^ pattern; | 292 | u8 diff = actual ^ pattern; |
281 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); | 293 | u8 expected = pattern | gen_inv_idx(counter, is_memset); |
282 | const char *thread_name = current->comm; | 294 | const char *thread_name = current->comm; |
283 | 295 | ||
284 | if (is_srcbuf) | 296 | if (is_srcbuf) |
@@ -298,7 +310,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, | |||
298 | 310 | ||
299 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | 311 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, |
300 | unsigned int end, unsigned int counter, u8 pattern, | 312 | unsigned int end, unsigned int counter, u8 pattern, |
301 | bool is_srcbuf) | 313 | bool is_srcbuf, bool is_memset) |
302 | { | 314 | { |
303 | unsigned int i; | 315 | unsigned int i; |
304 | unsigned int error_count = 0; | 316 | unsigned int error_count = 0; |
@@ -311,11 +323,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
311 | counter = counter_orig; | 323 | counter = counter_orig; |
312 | for (i = start; i < end; i++) { | 324 | for (i = start; i < end; i++) { |
313 | actual = buf[i]; | 325 | actual = buf[i]; |
314 | expected = pattern | (~counter & PATTERN_COUNT_MASK); | 326 | expected = pattern | gen_inv_idx(counter, is_memset); |
315 | if (actual != expected) { | 327 | if (actual != expected) { |
316 | if (error_count < MAX_ERROR_COUNT) | 328 | if (error_count < MAX_ERROR_COUNT) |
317 | dmatest_mismatch(actual, pattern, i, | 329 | dmatest_mismatch(actual, pattern, i, |
318 | counter, is_srcbuf); | 330 | counter, is_srcbuf, |
331 | is_memset); | ||
319 | error_count++; | 332 | error_count++; |
320 | } | 333 | } |
321 | counter++; | 334 | counter++; |
@@ -435,6 +448,7 @@ static int dmatest_func(void *data) | |||
435 | s64 runtime = 0; | 448 | s64 runtime = 0; |
436 | unsigned long long total_len = 0; | 449 | unsigned long long total_len = 0; |
437 | u8 align = 0; | 450 | u8 align = 0; |
451 | bool is_memset = false; | ||
438 | 452 | ||
439 | set_freezable(); | 453 | set_freezable(); |
440 | 454 | ||
@@ -448,9 +462,10 @@ static int dmatest_func(void *data) | |||
448 | if (thread->type == DMA_MEMCPY) { | 462 | if (thread->type == DMA_MEMCPY) { |
449 | align = dev->copy_align; | 463 | align = dev->copy_align; |
450 | src_cnt = dst_cnt = 1; | 464 | src_cnt = dst_cnt = 1; |
451 | } else if (thread->type == DMA_SG) { | 465 | } else if (thread->type == DMA_MEMSET) { |
452 | align = dev->copy_align; | 466 | align = dev->fill_align; |
453 | src_cnt = dst_cnt = sg_buffers; | 467 | src_cnt = dst_cnt = 1; |
468 | is_memset = true; | ||
454 | } else if (thread->type == DMA_XOR) { | 469 | } else if (thread->type == DMA_XOR) { |
455 | /* force odd to ensure dst = src */ | 470 | /* force odd to ensure dst = src */ |
456 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 471 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
@@ -530,8 +545,6 @@ static int dmatest_func(void *data) | |||
530 | dma_addr_t srcs[src_cnt]; | 545 | dma_addr_t srcs[src_cnt]; |
531 | dma_addr_t *dsts; | 546 | dma_addr_t *dsts; |
532 | unsigned int src_off, dst_off, len; | 547 | unsigned int src_off, dst_off, len; |
533 | struct scatterlist tx_sg[src_cnt]; | ||
534 | struct scatterlist rx_sg[src_cnt]; | ||
535 | 548 | ||
536 | total_tests++; | 549 | total_tests++; |
537 | 550 | ||
@@ -571,9 +584,9 @@ static int dmatest_func(void *data) | |||
571 | dst_off = (dst_off >> align) << align; | 584 | dst_off = (dst_off >> align) << align; |
572 | 585 | ||
573 | dmatest_init_srcs(thread->srcs, src_off, len, | 586 | dmatest_init_srcs(thread->srcs, src_off, len, |
574 | params->buf_size); | 587 | params->buf_size, is_memset); |
575 | dmatest_init_dsts(thread->dsts, dst_off, len, | 588 | dmatest_init_dsts(thread->dsts, dst_off, len, |
576 | params->buf_size); | 589 | params->buf_size, is_memset); |
577 | 590 | ||
578 | diff = ktime_sub(ktime_get(), start); | 591 | diff = ktime_sub(ktime_get(), start); |
579 | filltime = ktime_add(filltime, diff); | 592 | filltime = ktime_add(filltime, diff); |
@@ -627,22 +640,15 @@ static int dmatest_func(void *data) | |||
627 | um->bidi_cnt++; | 640 | um->bidi_cnt++; |
628 | } | 641 | } |
629 | 642 | ||
630 | sg_init_table(tx_sg, src_cnt); | ||
631 | sg_init_table(rx_sg, src_cnt); | ||
632 | for (i = 0; i < src_cnt; i++) { | ||
633 | sg_dma_address(&rx_sg[i]) = srcs[i]; | ||
634 | sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off; | ||
635 | sg_dma_len(&tx_sg[i]) = len; | ||
636 | sg_dma_len(&rx_sg[i]) = len; | ||
637 | } | ||
638 | |||
639 | if (thread->type == DMA_MEMCPY) | 643 | if (thread->type == DMA_MEMCPY) |
640 | tx = dev->device_prep_dma_memcpy(chan, | 644 | tx = dev->device_prep_dma_memcpy(chan, |
641 | dsts[0] + dst_off, | 645 | dsts[0] + dst_off, |
642 | srcs[0], len, flags); | 646 | srcs[0], len, flags); |
643 | else if (thread->type == DMA_SG) | 647 | else if (thread->type == DMA_MEMSET) |
644 | tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt, | 648 | tx = dev->device_prep_dma_memset(chan, |
645 | rx_sg, src_cnt, flags); | 649 | dsts[0] + dst_off, |
650 | *(thread->srcs[0] + src_off), | ||
651 | len, flags); | ||
646 | else if (thread->type == DMA_XOR) | 652 | else if (thread->type == DMA_XOR) |
647 | tx = dev->device_prep_dma_xor(chan, | 653 | tx = dev->device_prep_dma_xor(chan, |
648 | dsts[0] + dst_off, | 654 | dsts[0] + dst_off, |
@@ -722,23 +728,25 @@ static int dmatest_func(void *data) | |||
722 | start = ktime_get(); | 728 | start = ktime_get(); |
723 | pr_debug("%s: verifying source buffer...\n", current->comm); | 729 | pr_debug("%s: verifying source buffer...\n", current->comm); |
724 | error_count = dmatest_verify(thread->srcs, 0, src_off, | 730 | error_count = dmatest_verify(thread->srcs, 0, src_off, |
725 | 0, PATTERN_SRC, true); | 731 | 0, PATTERN_SRC, true, is_memset); |
726 | error_count += dmatest_verify(thread->srcs, src_off, | 732 | error_count += dmatest_verify(thread->srcs, src_off, |
727 | src_off + len, src_off, | 733 | src_off + len, src_off, |
728 | PATTERN_SRC | PATTERN_COPY, true); | 734 | PATTERN_SRC | PATTERN_COPY, true, is_memset); |
729 | error_count += dmatest_verify(thread->srcs, src_off + len, | 735 | error_count += dmatest_verify(thread->srcs, src_off + len, |
730 | params->buf_size, src_off + len, | 736 | params->buf_size, src_off + len, |
731 | PATTERN_SRC, true); | 737 | PATTERN_SRC, true, is_memset); |
732 | 738 | ||
733 | pr_debug("%s: verifying dest buffer...\n", current->comm); | 739 | pr_debug("%s: verifying dest buffer...\n", current->comm); |
734 | error_count += dmatest_verify(thread->dsts, 0, dst_off, | 740 | error_count += dmatest_verify(thread->dsts, 0, dst_off, |
735 | 0, PATTERN_DST, false); | 741 | 0, PATTERN_DST, false, is_memset); |
742 | |||
736 | error_count += dmatest_verify(thread->dsts, dst_off, | 743 | error_count += dmatest_verify(thread->dsts, dst_off, |
737 | dst_off + len, src_off, | 744 | dst_off + len, src_off, |
738 | PATTERN_SRC | PATTERN_COPY, false); | 745 | PATTERN_SRC | PATTERN_COPY, false, is_memset); |
746 | |||
739 | error_count += dmatest_verify(thread->dsts, dst_off + len, | 747 | error_count += dmatest_verify(thread->dsts, dst_off + len, |
740 | params->buf_size, dst_off + len, | 748 | params->buf_size, dst_off + len, |
741 | PATTERN_DST, false); | 749 | PATTERN_DST, false, is_memset); |
742 | 750 | ||
743 | diff = ktime_sub(ktime_get(), start); | 751 | diff = ktime_sub(ktime_get(), start); |
744 | comparetime = ktime_add(comparetime, diff); | 752 | comparetime = ktime_add(comparetime, diff); |
@@ -821,8 +829,8 @@ static int dmatest_add_threads(struct dmatest_info *info, | |||
821 | 829 | ||
822 | if (type == DMA_MEMCPY) | 830 | if (type == DMA_MEMCPY) |
823 | op = "copy"; | 831 | op = "copy"; |
824 | else if (type == DMA_SG) | 832 | else if (type == DMA_MEMSET) |
825 | op = "sg"; | 833 | op = "set"; |
826 | else if (type == DMA_XOR) | 834 | else if (type == DMA_XOR) |
827 | op = "xor"; | 835 | op = "xor"; |
828 | else if (type == DMA_PQ) | 836 | else if (type == DMA_PQ) |
@@ -883,9 +891,9 @@ static int dmatest_add_channel(struct dmatest_info *info, | |||
883 | } | 891 | } |
884 | } | 892 | } |
885 | 893 | ||
886 | if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) { | 894 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { |
887 | if (dmatest == 1) { | 895 | if (dmatest == 1) { |
888 | cnt = dmatest_add_threads(info, dtc, DMA_SG); | 896 | cnt = dmatest_add_threads(info, dtc, DMA_MEMSET); |
889 | thread_count += cnt > 0 ? cnt : 0; | 897 | thread_count += cnt > 0 ? cnt : 0; |
890 | } | 898 | } |
891 | } | 899 | } |
@@ -961,8 +969,8 @@ static void run_threaded_test(struct dmatest_info *info) | |||
961 | params->noverify = noverify; | 969 | params->noverify = noverify; |
962 | 970 | ||
963 | request_channels(info, DMA_MEMCPY); | 971 | request_channels(info, DMA_MEMCPY); |
972 | request_channels(info, DMA_MEMSET); | ||
964 | request_channels(info, DMA_XOR); | 973 | request_channels(info, DMA_XOR); |
965 | request_channels(info, DMA_SG); | ||
966 | request_channels(info, DMA_PQ); | 974 | request_channels(info, DMA_PQ); |
967 | } | 975 | } |
968 | 976 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 3b8b752ede2d..3eaece888e75 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -825,122 +825,6 @@ fail: | |||
825 | return NULL; | 825 | return NULL; |
826 | } | 826 | } |
827 | 827 | ||
828 | static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, | ||
829 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
830 | struct scatterlist *src_sg, unsigned int src_nents, | ||
831 | unsigned long flags) | ||
832 | { | ||
833 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
834 | struct fsldma_chan *chan = to_fsl_chan(dchan); | ||
835 | size_t dst_avail, src_avail; | ||
836 | dma_addr_t dst, src; | ||
837 | size_t len; | ||
838 | |||
839 | /* basic sanity checks */ | ||
840 | if (dst_nents == 0 || src_nents == 0) | ||
841 | return NULL; | ||
842 | |||
843 | if (dst_sg == NULL || src_sg == NULL) | ||
844 | return NULL; | ||
845 | |||
846 | /* | ||
847 | * TODO: should we check that both scatterlists have the same | ||
848 | * TODO: number of bytes in total? Is that really an error? | ||
849 | */ | ||
850 | |||
851 | /* get prepared for the loop */ | ||
852 | dst_avail = sg_dma_len(dst_sg); | ||
853 | src_avail = sg_dma_len(src_sg); | ||
854 | |||
855 | /* run until we are out of scatterlist entries */ | ||
856 | while (true) { | ||
857 | |||
858 | /* create the largest transaction possible */ | ||
859 | len = min_t(size_t, src_avail, dst_avail); | ||
860 | len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); | ||
861 | if (len == 0) | ||
862 | goto fetch; | ||
863 | |||
864 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; | ||
865 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; | ||
866 | |||
867 | /* allocate and populate the descriptor */ | ||
868 | new = fsl_dma_alloc_descriptor(chan); | ||
869 | if (!new) { | ||
870 | chan_err(chan, "%s\n", msg_ld_oom); | ||
871 | goto fail; | ||
872 | } | ||
873 | |||
874 | set_desc_cnt(chan, &new->hw, len); | ||
875 | set_desc_src(chan, &new->hw, src); | ||
876 | set_desc_dst(chan, &new->hw, dst); | ||
877 | |||
878 | if (!first) | ||
879 | first = new; | ||
880 | else | ||
881 | set_desc_next(chan, &prev->hw, new->async_tx.phys); | ||
882 | |||
883 | new->async_tx.cookie = 0; | ||
884 | async_tx_ack(&new->async_tx); | ||
885 | prev = new; | ||
886 | |||
887 | /* Insert the link descriptor to the LD ring */ | ||
888 | list_add_tail(&new->node, &first->tx_list); | ||
889 | |||
890 | /* update metadata */ | ||
891 | dst_avail -= len; | ||
892 | src_avail -= len; | ||
893 | |||
894 | fetch: | ||
895 | /* fetch the next dst scatterlist entry */ | ||
896 | if (dst_avail == 0) { | ||
897 | |||
898 | /* no more entries: we're done */ | ||
899 | if (dst_nents == 0) | ||
900 | break; | ||
901 | |||
902 | /* fetch the next entry: if there are no more: done */ | ||
903 | dst_sg = sg_next(dst_sg); | ||
904 | if (dst_sg == NULL) | ||
905 | break; | ||
906 | |||
907 | dst_nents--; | ||
908 | dst_avail = sg_dma_len(dst_sg); | ||
909 | } | ||
910 | |||
911 | /* fetch the next src scatterlist entry */ | ||
912 | if (src_avail == 0) { | ||
913 | |||
914 | /* no more entries: we're done */ | ||
915 | if (src_nents == 0) | ||
916 | break; | ||
917 | |||
918 | /* fetch the next entry: if there are no more: done */ | ||
919 | src_sg = sg_next(src_sg); | ||
920 | if (src_sg == NULL) | ||
921 | break; | ||
922 | |||
923 | src_nents--; | ||
924 | src_avail = sg_dma_len(src_sg); | ||
925 | } | ||
926 | } | ||
927 | |||
928 | new->async_tx.flags = flags; /* client is in control of this ack */ | ||
929 | new->async_tx.cookie = -EBUSY; | ||
930 | |||
931 | /* Set End-of-link to the last link descriptor of new list */ | ||
932 | set_ld_eol(chan, new); | ||
933 | |||
934 | return &first->async_tx; | ||
935 | |||
936 | fail: | ||
937 | if (!first) | ||
938 | return NULL; | ||
939 | |||
940 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
941 | return NULL; | ||
942 | } | ||
943 | |||
944 | static int fsl_dma_device_terminate_all(struct dma_chan *dchan) | 828 | static int fsl_dma_device_terminate_all(struct dma_chan *dchan) |
945 | { | 829 | { |
946 | struct fsldma_chan *chan; | 830 | struct fsldma_chan *chan; |
@@ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op) | |||
1357 | fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); | 1241 | fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); |
1358 | 1242 | ||
1359 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1243 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
1360 | dma_cap_set(DMA_SG, fdev->common.cap_mask); | ||
1361 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1244 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
1362 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1245 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
1363 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1246 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
1364 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1247 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
1365 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | ||
1366 | fdev->common.device_tx_status = fsl_tx_status; | 1248 | fdev->common.device_tx_status = fsl_tx_status; |
1367 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1249 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1368 | fdev->common.device_config = fsl_dma_device_config; | 1250 | fdev->common.device_config = fsl_dma_device_config; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index a371b07a0981..f70cc74032ea 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -644,9 +644,13 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | |||
644 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | 644 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
645 | } | 645 | } |
646 | 646 | ||
647 | /* 5 microsecond delay per pending descriptor */ | 647 | /* microsecond delay by sysfs variable per pending descriptor */ |
648 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), | 648 | if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) { |
649 | ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); | 649 | writew(min((ioat_chan->intr_coalesce * (active - i)), |
650 | IOAT_INTRDELAY_MASK), | ||
651 | ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); | ||
652 | ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce; | ||
653 | } | ||
650 | } | 654 | } |
651 | 655 | ||
652 | static void ioat_cleanup(struct ioatdma_chan *ioat_chan) | 656 | static void ioat_cleanup(struct ioatdma_chan *ioat_chan) |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index a9bc1a15b0d1..56200eefcf5e 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -142,11 +142,14 @@ struct ioatdma_chan { | |||
142 | spinlock_t prep_lock; | 142 | spinlock_t prep_lock; |
143 | struct ioat_descs descs[2]; | 143 | struct ioat_descs descs[2]; |
144 | int desc_chunks; | 144 | int desc_chunks; |
145 | int intr_coalesce; | ||
146 | int prev_intr_coalesce; | ||
145 | }; | 147 | }; |
146 | 148 | ||
147 | struct ioat_sysfs_entry { | 149 | struct ioat_sysfs_entry { |
148 | struct attribute attr; | 150 | struct attribute attr; |
149 | ssize_t (*show)(struct dma_chan *, char *); | 151 | ssize_t (*show)(struct dma_chan *, char *); |
152 | ssize_t (*store)(struct dma_chan *, const char *, size_t); | ||
150 | }; | 153 | }; |
151 | 154 | ||
152 | /** | 155 | /** |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index ed8ed1192775..93e006c3441d 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -39,7 +39,7 @@ MODULE_VERSION(IOAT_DMA_VERSION); | |||
39 | MODULE_LICENSE("Dual BSD/GPL"); | 39 | MODULE_LICENSE("Dual BSD/GPL"); |
40 | MODULE_AUTHOR("Intel Corporation"); | 40 | MODULE_AUTHOR("Intel Corporation"); |
41 | 41 | ||
42 | static struct pci_device_id ioat_pci_tbl[] = { | 42 | static const struct pci_device_id ioat_pci_tbl[] = { |
43 | /* I/OAT v3 platforms */ | 43 | /* I/OAT v3 platforms */ |
44 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | 44 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, |
45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | 45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, |
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c index cb4a857ee21b..3ac677f29e8f 100644 --- a/drivers/dma/ioat/sysfs.c +++ b/drivers/dma/ioat/sysfs.c | |||
@@ -64,8 +64,24 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
64 | return entry->show(&ioat_chan->dma_chan, page); | 64 | return entry->show(&ioat_chan->dma_chan, page); |
65 | } | 65 | } |
66 | 66 | ||
67 | static ssize_t | ||
68 | ioat_attr_store(struct kobject *kobj, struct attribute *attr, | ||
69 | const char *page, size_t count) | ||
70 | { | ||
71 | struct ioat_sysfs_entry *entry; | ||
72 | struct ioatdma_chan *ioat_chan; | ||
73 | |||
74 | entry = container_of(attr, struct ioat_sysfs_entry, attr); | ||
75 | ioat_chan = container_of(kobj, struct ioatdma_chan, kobj); | ||
76 | |||
77 | if (!entry->store) | ||
78 | return -EIO; | ||
79 | return entry->store(&ioat_chan->dma_chan, page, count); | ||
80 | } | ||
81 | |||
67 | const struct sysfs_ops ioat_sysfs_ops = { | 82 | const struct sysfs_ops ioat_sysfs_ops = { |
68 | .show = ioat_attr_show, | 83 | .show = ioat_attr_show, |
84 | .store = ioat_attr_store, | ||
69 | }; | 85 | }; |
70 | 86 | ||
71 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) | 87 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) |
@@ -121,11 +137,37 @@ static ssize_t ring_active_show(struct dma_chan *c, char *page) | |||
121 | } | 137 | } |
122 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | 138 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); |
123 | 139 | ||
140 | static ssize_t intr_coalesce_show(struct dma_chan *c, char *page) | ||
141 | { | ||
142 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
143 | |||
144 | return sprintf(page, "%d\n", ioat_chan->intr_coalesce); | ||
145 | } | ||
146 | |||
147 | static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page, | ||
148 | size_t count) | ||
149 | { | ||
150 | int intr_coalesce = 0; | ||
151 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
152 | |||
153 | if (sscanf(page, "%du", &intr_coalesce) != -1) { | ||
154 | if ((intr_coalesce < 0) || | ||
155 | (intr_coalesce > IOAT_INTRDELAY_MASK)) | ||
156 | return -EINVAL; | ||
157 | ioat_chan->intr_coalesce = intr_coalesce; | ||
158 | } | ||
159 | |||
160 | return count; | ||
161 | } | ||
162 | |||
163 | static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce); | ||
164 | |||
124 | static struct attribute *ioat_attrs[] = { | 165 | static struct attribute *ioat_attrs[] = { |
125 | &ring_size_attr.attr, | 166 | &ring_size_attr.attr, |
126 | &ring_active_attr.attr, | 167 | &ring_active_attr.attr, |
127 | &ioat_cap_attr.attr, | 168 | &ioat_cap_attr.attr, |
128 | &ioat_version_attr.attr, | 169 | &ioat_version_attr.attr, |
170 | &intr_coalesce_attr.attr, | ||
129 | NULL, | 171 | NULL, |
130 | }; | 172 | }; |
131 | 173 | ||
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 01e25c68dd5a..01d2a750a621 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -223,7 +223,6 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) | |||
223 | if (c && (tc1 & BIT(i))) { | 223 | if (c && (tc1 & BIT(i))) { |
224 | spin_lock_irqsave(&c->vc.lock, flags); | 224 | spin_lock_irqsave(&c->vc.lock, flags); |
225 | vchan_cookie_complete(&p->ds_run->vd); | 225 | vchan_cookie_complete(&p->ds_run->vd); |
226 | WARN_ON_ONCE(p->ds_done); | ||
227 | p->ds_done = p->ds_run; | 226 | p->ds_done = p->ds_run; |
228 | p->ds_run = NULL; | 227 | p->ds_run = NULL; |
229 | spin_unlock_irqrestore(&c->vc.lock, flags); | 228 | spin_unlock_irqrestore(&c->vc.lock, flags); |
@@ -274,13 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) | |||
274 | */ | 273 | */ |
275 | list_del(&ds->vd.node); | 274 | list_del(&ds->vd.node); |
276 | 275 | ||
277 | WARN_ON_ONCE(c->phy->ds_run); | ||
278 | WARN_ON_ONCE(c->phy->ds_done); | ||
279 | c->phy->ds_run = ds; | 276 | c->phy->ds_run = ds; |
277 | c->phy->ds_done = NULL; | ||
280 | /* start dma */ | 278 | /* start dma */ |
281 | k3_dma_set_desc(c->phy, &ds->desc_hw[0]); | 279 | k3_dma_set_desc(c->phy, &ds->desc_hw[0]); |
282 | return 0; | 280 | return 0; |
283 | } | 281 | } |
282 | c->phy->ds_run = NULL; | ||
283 | c->phy->ds_done = NULL; | ||
284 | return -EAGAIN; | 284 | return -EAGAIN; |
285 | } | 285 | } |
286 | 286 | ||
@@ -722,11 +722,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan) | |||
722 | k3_dma_free_desc(&p->ds_run->vd); | 722 | k3_dma_free_desc(&p->ds_run->vd); |
723 | p->ds_run = NULL; | 723 | p->ds_run = NULL; |
724 | } | 724 | } |
725 | if (p->ds_done) { | 725 | p->ds_done = NULL; |
726 | k3_dma_free_desc(&p->ds_done->vd); | ||
727 | p->ds_done = NULL; | ||
728 | } | ||
729 | |||
730 | } | 726 | } |
731 | spin_unlock_irqrestore(&c->vc.lock, flags); | 727 | spin_unlock_irqrestore(&c->vc.lock, flags); |
732 | vchan_dma_desc_free_list(&c->vc, &head); | 728 | vchan_dma_desc_free_list(&c->vc, &head); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25bc5b103aa2..1993889003fd 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, | |||
68 | hw_desc->byte_count = byte_count; | 68 | hw_desc->byte_count = byte_count; |
69 | } | 69 | } |
70 | 70 | ||
71 | /* Populate the descriptor */ | ||
72 | static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc, | ||
73 | dma_addr_t dma_src, dma_addr_t dma_dst, | ||
74 | u32 len, struct mv_xor_desc_slot *prev) | ||
75 | { | ||
76 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
77 | |||
78 | hw_desc->status = XOR_DESC_DMA_OWNED; | ||
79 | hw_desc->phy_next_desc = 0; | ||
80 | /* Configure for XOR with only one src address -> MEMCPY */ | ||
81 | hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0); | ||
82 | hw_desc->phy_dest_addr = dma_dst; | ||
83 | hw_desc->phy_src_addr[0] = dma_src; | ||
84 | hw_desc->byte_count = len; | ||
85 | |||
86 | if (prev) { | ||
87 | struct mv_xor_desc *hw_prev = prev->hw_desc; | ||
88 | |||
89 | hw_prev->phy_next_desc = desc->async_tx.phys; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc) | ||
94 | { | ||
95 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
96 | |||
97 | /* Enable end-of-descriptor interrupt */ | ||
98 | hw_desc->desc_command |= XOR_DESC_EOD_INT_EN; | ||
99 | } | ||
100 | |||
101 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) | 71 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) |
102 | { | 72 | { |
103 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 73 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
@@ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
662 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | 632 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); |
663 | } | 633 | } |
664 | 634 | ||
665 | /** | ||
666 | * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction | ||
667 | * @chan: DMA channel | ||
668 | * @dst_sg: Destination scatter list | ||
669 | * @dst_sg_len: Number of entries in destination scatter list | ||
670 | * @src_sg: Source scatter list | ||
671 | * @src_sg_len: Number of entries in source scatter list | ||
672 | * @flags: transfer ack flags | ||
673 | * | ||
674 | * Return: Async transaction descriptor on success and NULL on failure | ||
675 | */ | ||
676 | static struct dma_async_tx_descriptor * | ||
677 | mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg, | ||
678 | unsigned int dst_sg_len, struct scatterlist *src_sg, | ||
679 | unsigned int src_sg_len, unsigned long flags) | ||
680 | { | ||
681 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
682 | struct mv_xor_desc_slot *new; | ||
683 | struct mv_xor_desc_slot *first = NULL; | ||
684 | struct mv_xor_desc_slot *prev = NULL; | ||
685 | size_t len, dst_avail, src_avail; | ||
686 | dma_addr_t dma_dst, dma_src; | ||
687 | int desc_cnt = 0; | ||
688 | int ret; | ||
689 | |||
690 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
691 | "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n", | ||
692 | __func__, dst_sg_len, src_sg_len, flags); | ||
693 | |||
694 | dst_avail = sg_dma_len(dst_sg); | ||
695 | src_avail = sg_dma_len(src_sg); | ||
696 | |||
697 | /* Run until we are out of scatterlist entries */ | ||
698 | while (true) { | ||
699 | /* Allocate and populate the descriptor */ | ||
700 | desc_cnt++; | ||
701 | new = mv_chan_alloc_slot(mv_chan); | ||
702 | if (!new) { | ||
703 | dev_err(mv_chan_to_devp(mv_chan), | ||
704 | "Out of descriptors (desc_cnt=%d)!\n", | ||
705 | desc_cnt); | ||
706 | goto err; | ||
707 | } | ||
708 | |||
709 | len = min_t(size_t, src_avail, dst_avail); | ||
710 | len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT); | ||
711 | if (len == 0) | ||
712 | goto fetch; | ||
713 | |||
714 | if (len < MV_XOR_MIN_BYTE_COUNT) { | ||
715 | dev_err(mv_chan_to_devp(mv_chan), | ||
716 | "Transfer size of %zu too small!\n", len); | ||
717 | goto err; | ||
718 | } | ||
719 | |||
720 | dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - | ||
721 | dst_avail; | ||
722 | dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - | ||
723 | src_avail; | ||
724 | |||
725 | /* Check if a new window needs to get added for 'dst' */ | ||
726 | ret = mv_xor_add_io_win(mv_chan, dma_dst); | ||
727 | if (ret) | ||
728 | goto err; | ||
729 | |||
730 | /* Check if a new window needs to get added for 'src' */ | ||
731 | ret = mv_xor_add_io_win(mv_chan, dma_src); | ||
732 | if (ret) | ||
733 | goto err; | ||
734 | |||
735 | /* Populate the descriptor */ | ||
736 | mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev); | ||
737 | prev = new; | ||
738 | dst_avail -= len; | ||
739 | src_avail -= len; | ||
740 | |||
741 | if (!first) | ||
742 | first = new; | ||
743 | else | ||
744 | list_move_tail(&new->node, &first->sg_tx_list); | ||
745 | |||
746 | fetch: | ||
747 | /* Fetch the next dst scatterlist entry */ | ||
748 | if (dst_avail == 0) { | ||
749 | if (dst_sg_len == 0) | ||
750 | break; | ||
751 | |||
752 | /* Fetch the next entry: if there are no more: done */ | ||
753 | dst_sg = sg_next(dst_sg); | ||
754 | if (dst_sg == NULL) | ||
755 | break; | ||
756 | |||
757 | dst_sg_len--; | ||
758 | dst_avail = sg_dma_len(dst_sg); | ||
759 | } | ||
760 | |||
761 | /* Fetch the next src scatterlist entry */ | ||
762 | if (src_avail == 0) { | ||
763 | if (src_sg_len == 0) | ||
764 | break; | ||
765 | |||
766 | /* Fetch the next entry: if there are no more: done */ | ||
767 | src_sg = sg_next(src_sg); | ||
768 | if (src_sg == NULL) | ||
769 | break; | ||
770 | |||
771 | src_sg_len--; | ||
772 | src_avail = sg_dma_len(src_sg); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | /* Set the EOD flag in the last descriptor */ | ||
777 | mv_xor_desc_config_eod(new); | ||
778 | first->async_tx.flags = flags; | ||
779 | |||
780 | return &first->async_tx; | ||
781 | |||
782 | err: | ||
783 | /* Cleanup: Move all descriptors back into the free list */ | ||
784 | spin_lock_bh(&mv_chan->lock); | ||
785 | mv_desc_clean_slot(first, mv_chan); | ||
786 | spin_unlock_bh(&mv_chan->lock); | ||
787 | |||
788 | return NULL; | ||
789 | } | ||
790 | |||
791 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | 635 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
792 | { | 636 | { |
793 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 637 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
@@ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1254 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | 1098 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; |
1255 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1099 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1256 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1100 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1257 | if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) | ||
1258 | dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg; | ||
1259 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1101 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1260 | dma_dev->max_xor = 8; | 1102 | dma_dev->max_xor = 8; |
1261 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1103 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
@@ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1305 | goto err_free_irq; | 1147 | goto err_free_irq; |
1306 | } | 1148 | } |
1307 | 1149 | ||
1308 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n", | 1150 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", |
1309 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | 1151 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", |
1310 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1152 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1311 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1153 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1312 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "", | ||
1313 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1154 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1314 | 1155 | ||
1315 | dma_async_device_register(dma_dev); | 1156 | dma_async_device_register(dma_dev); |
@@ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1552 | 1393 | ||
1553 | dma_cap_zero(cap_mask); | 1394 | dma_cap_zero(cap_mask); |
1554 | dma_cap_set(DMA_MEMCPY, cap_mask); | 1395 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1555 | dma_cap_set(DMA_SG, cap_mask); | ||
1556 | dma_cap_set(DMA_XOR, cap_mask); | 1396 | dma_cap_set(DMA_XOR, cap_mask); |
1557 | dma_cap_set(DMA_INTERRUPT, cap_mask); | 1397 | dma_cap_set(DMA_INTERRUPT, cap_mask); |
1558 | 1398 | ||
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 3f45b9bdf201..d3f918a9ee76 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c | |||
@@ -1005,21 +1005,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy( | |||
1005 | DMA_MEM_TO_MEM, flags); | 1005 | DMA_MEM_TO_MEM, flags); |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( | ||
1009 | struct dma_chan *dchan, | ||
1010 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
1011 | struct scatterlist *src_sg, unsigned int src_nents, | ||
1012 | unsigned long flags) | ||
1013 | { | ||
1014 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
1015 | |||
1016 | if (dst_nents != src_nents) | ||
1017 | return NULL; | ||
1018 | |||
1019 | return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, | ||
1020 | DMA_MEM_TO_MEM, flags); | ||
1021 | } | ||
1022 | |||
1023 | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( | 1008 | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( |
1024 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 1009 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
1025 | enum dma_transfer_direction direction, unsigned long flags, void *context) | 1010 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
@@ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev) | |||
1417 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | 1402 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
1418 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | 1403 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
1419 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | 1404 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
1420 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | ||
1421 | 1405 | ||
1422 | /* Common and MEMCPY operations */ | 1406 | /* Common and MEMCPY operations */ |
1423 | dma_dev->device_alloc_chan_resources | 1407 | dma_dev->device_alloc_chan_resources |
1424 | = nbpf_alloc_chan_resources; | 1408 | = nbpf_alloc_chan_resources; |
1425 | dma_dev->device_free_chan_resources = nbpf_free_chan_resources; | 1409 | dma_dev->device_free_chan_resources = nbpf_free_chan_resources; |
1426 | dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; | ||
1427 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; | 1410 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; |
1428 | dma_dev->device_tx_status = nbpf_tx_status; | 1411 | dma_dev->device_tx_status = nbpf_tx_status; |
1429 | dma_dev->device_issue_pending = nbpf_issue_pending; | 1412 | dma_dev->device_issue_pending = nbpf_issue_pending; |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index faae0bfe1109..91fd395c90c4 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -38,8 +38,8 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) | |||
38 | if (ofdma->of_node == dma_spec->np) | 38 | if (ofdma->of_node == dma_spec->np) |
39 | return ofdma; | 39 | return ofdma; |
40 | 40 | ||
41 | pr_debug("%s: can't find DMA controller %s\n", __func__, | 41 | pr_debug("%s: can't find DMA controller %pOF\n", __func__, |
42 | dma_spec->np->full_name); | 42 | dma_spec->np); |
43 | 43 | ||
44 | return NULL; | 44 | return NULL; |
45 | } | 45 | } |
@@ -255,8 +255,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
255 | 255 | ||
256 | count = of_property_count_strings(np, "dma-names"); | 256 | count = of_property_count_strings(np, "dma-names"); |
257 | if (count < 0) { | 257 | if (count < 0) { |
258 | pr_err("%s: dma-names property of node '%s' missing or empty\n", | 258 | pr_err("%s: dma-names property of node '%pOF' missing or empty\n", |
259 | __func__, np->full_name); | 259 | __func__, np); |
260 | return ERR_PTR(-ENODEV); | 260 | return ERR_PTR(-ENODEV); |
261 | } | 261 | } |
262 | 262 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index b19ee04567b5..f122c2a7b9f0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -3023,7 +3023,7 @@ static int pl330_remove(struct amba_device *adev) | |||
3023 | return 0; | 3023 | return 0; |
3024 | } | 3024 | } |
3025 | 3025 | ||
3026 | static struct amba_id pl330_ids[] = { | 3026 | static const struct amba_id pl330_ids[] = { |
3027 | { | 3027 | { |
3028 | .id = 0x00041330, | 3028 | .id = 0x00041330, |
3029 | .mask = 0x000fffff, | 3029 | .mask = 0x000fffff, |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index b1535b1fe95c..4cf0d4d0cecf 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -4040,9 +4040,9 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) | |||
4040 | /* it is DMA0 or DMA1 */ | 4040 | /* it is DMA0 or DMA1 */ |
4041 | idx = of_get_property(np, "cell-index", &len); | 4041 | idx = of_get_property(np, "cell-index", &len); |
4042 | if (!idx || (len != sizeof(u32))) { | 4042 | if (!idx || (len != sizeof(u32))) { |
4043 | dev_err(&ofdev->dev, "Device node %s has missing " | 4043 | dev_err(&ofdev->dev, "Device node %pOF has missing " |
4044 | "or invalid cell-index property\n", | 4044 | "or invalid cell-index property\n", |
4045 | np->full_name); | 4045 | np); |
4046 | return -EINVAL; | 4046 | return -EINVAL; |
4047 | } | 4047 | } |
4048 | id = *idx; | 4048 | id = *idx; |
@@ -4307,7 +4307,7 @@ static int ppc440spe_adma_remove(struct platform_device *ofdev) | |||
4307 | * "poly" allows setting/checking used polynomial (for PPC440SPe only). | 4307 | * "poly" allows setting/checking used polynomial (for PPC440SPe only). |
4308 | */ | 4308 | */ |
4309 | 4309 | ||
4310 | static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf) | 4310 | static ssize_t devices_show(struct device_driver *dev, char *buf) |
4311 | { | 4311 | { |
4312 | ssize_t size = 0; | 4312 | ssize_t size = 0; |
4313 | int i; | 4313 | int i; |
@@ -4321,16 +4321,17 @@ static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf) | |||
4321 | } | 4321 | } |
4322 | return size; | 4322 | return size; |
4323 | } | 4323 | } |
4324 | static DRIVER_ATTR_RO(devices); | ||
4324 | 4325 | ||
4325 | static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf) | 4326 | static ssize_t enable_show(struct device_driver *dev, char *buf) |
4326 | { | 4327 | { |
4327 | return snprintf(buf, PAGE_SIZE, | 4328 | return snprintf(buf, PAGE_SIZE, |
4328 | "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", | 4329 | "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", |
4329 | ppc440spe_r6_enabled ? "EN" : "DIS"); | 4330 | ppc440spe_r6_enabled ? "EN" : "DIS"); |
4330 | } | 4331 | } |
4331 | 4332 | ||
4332 | static ssize_t store_ppc440spe_r6enable(struct device_driver *dev, | 4333 | static ssize_t enable_store(struct device_driver *dev, const char *buf, |
4333 | const char *buf, size_t count) | 4334 | size_t count) |
4334 | { | 4335 | { |
4335 | unsigned long val; | 4336 | unsigned long val; |
4336 | 4337 | ||
@@ -4357,8 +4358,9 @@ static ssize_t store_ppc440spe_r6enable(struct device_driver *dev, | |||
4357 | } | 4358 | } |
4358 | return count; | 4359 | return count; |
4359 | } | 4360 | } |
4361 | static DRIVER_ATTR_RW(enable); | ||
4360 | 4362 | ||
4361 | static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf) | 4363 | static ssize_t poly_store(struct device_driver *dev, char *buf) |
4362 | { | 4364 | { |
4363 | ssize_t size = 0; | 4365 | ssize_t size = 0; |
4364 | u32 reg; | 4366 | u32 reg; |
@@ -4377,8 +4379,8 @@ static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf) | |||
4377 | return size; | 4379 | return size; |
4378 | } | 4380 | } |
4379 | 4381 | ||
4380 | static ssize_t store_ppc440spe_r6poly(struct device_driver *dev, | 4382 | static ssize_t poly_store(struct device_driver *dev, const char *buf, |
4381 | const char *buf, size_t count) | 4383 | size_t count) |
4382 | { | 4384 | { |
4383 | unsigned long reg, val; | 4385 | unsigned long reg, val; |
4384 | 4386 | ||
@@ -4404,12 +4406,7 @@ static ssize_t store_ppc440spe_r6poly(struct device_driver *dev, | |||
4404 | 4406 | ||
4405 | return count; | 4407 | return count; |
4406 | } | 4408 | } |
4407 | 4409 | static DRIVER_ATTR_RW(poly); | |
4408 | static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL); | ||
4409 | static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable, | ||
4410 | store_ppc440spe_r6enable); | ||
4411 | static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly, | ||
4412 | store_ppc440spe_r6poly); | ||
4413 | 4410 | ||
4414 | /* | 4411 | /* |
4415 | * Common initialisation for RAID engines; allocate memory for | 4412 | * Common initialisation for RAID engines; allocate memory for |
@@ -4448,8 +4445,7 @@ static int ppc440spe_configure_raid_devices(void) | |||
4448 | dcr_base = dcr_resource_start(np, 0); | 4445 | dcr_base = dcr_resource_start(np, 0); |
4449 | dcr_len = dcr_resource_len(np, 0); | 4446 | dcr_len = dcr_resource_len(np, 0); |
4450 | if (!dcr_base && !dcr_len) { | 4447 | if (!dcr_base && !dcr_len) { |
4451 | pr_err("%s: can't get DCR registers base/len!\n", | 4448 | pr_err("%pOF: can't get DCR registers base/len!\n", np); |
4452 | np->full_name); | ||
4453 | of_node_put(np); | 4449 | of_node_put(np); |
4454 | iounmap(i2o_reg); | 4450 | iounmap(i2o_reg); |
4455 | return -ENODEV; | 4451 | return -ENODEV; |
@@ -4457,7 +4453,7 @@ static int ppc440spe_configure_raid_devices(void) | |||
4457 | 4453 | ||
4458 | i2o_dcr_host = dcr_map(np, dcr_base, dcr_len); | 4454 | i2o_dcr_host = dcr_map(np, dcr_base, dcr_len); |
4459 | if (!DCR_MAP_OK(i2o_dcr_host)) { | 4455 | if (!DCR_MAP_OK(i2o_dcr_host)) { |
4460 | pr_err("%s: failed to map DCRs!\n", np->full_name); | 4456 | pr_err("%pOF: failed to map DCRs!\n", np); |
4461 | of_node_put(np); | 4457 | of_node_put(np); |
4462 | iounmap(i2o_reg); | 4458 | iounmap(i2o_reg); |
4463 | return -ENODEV; | 4459 | return -ENODEV; |
@@ -4518,15 +4514,14 @@ static int ppc440spe_configure_raid_devices(void) | |||
4518 | dcr_base = dcr_resource_start(np, 0); | 4514 | dcr_base = dcr_resource_start(np, 0); |
4519 | dcr_len = dcr_resource_len(np, 0); | 4515 | dcr_len = dcr_resource_len(np, 0); |
4520 | if (!dcr_base && !dcr_len) { | 4516 | if (!dcr_base && !dcr_len) { |
4521 | pr_err("%s: can't get DCR registers base/len!\n", | 4517 | pr_err("%pOF: can't get DCR registers base/len!\n", np); |
4522 | np->full_name); | ||
4523 | ret = -ENODEV; | 4518 | ret = -ENODEV; |
4524 | goto out_mq; | 4519 | goto out_mq; |
4525 | } | 4520 | } |
4526 | 4521 | ||
4527 | ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len); | 4522 | ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len); |
4528 | if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) { | 4523 | if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) { |
4529 | pr_err("%s: failed to map DCRs!\n", np->full_name); | 4524 | pr_err("%pOF: failed to map DCRs!\n", np); |
4530 | ret = -ENODEV; | 4525 | ret = -ENODEV; |
4531 | goto out_mq; | 4526 | goto out_mq; |
4532 | } | 4527 | } |
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 03c4eb3fd314..6d89fb6a6a92 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -65,6 +65,7 @@ struct bam_desc_hw { | |||
65 | #define DESC_FLAG_EOT BIT(14) | 65 | #define DESC_FLAG_EOT BIT(14) |
66 | #define DESC_FLAG_EOB BIT(13) | 66 | #define DESC_FLAG_EOB BIT(13) |
67 | #define DESC_FLAG_NWD BIT(12) | 67 | #define DESC_FLAG_NWD BIT(12) |
68 | #define DESC_FLAG_CMD BIT(11) | ||
68 | 69 | ||
69 | struct bam_async_desc { | 70 | struct bam_async_desc { |
70 | struct virt_dma_desc vd; | 71 | struct virt_dma_desc vd; |
@@ -645,6 +646,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
645 | unsigned int curr_offset = 0; | 646 | unsigned int curr_offset = 0; |
646 | 647 | ||
647 | do { | 648 | do { |
649 | if (flags & DMA_PREP_CMD) | ||
650 | desc->flags |= cpu_to_le16(DESC_FLAG_CMD); | ||
651 | |||
648 | desc->addr = cpu_to_le32(sg_dma_address(sg) + | 652 | desc->addr = cpu_to_le32(sg_dma_address(sg) + |
649 | curr_offset); | 653 | curr_offset); |
650 | 654 | ||
@@ -960,7 +964,7 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
960 | 964 | ||
961 | /* set any special flags on the last descriptor */ | 965 | /* set any special flags on the last descriptor */ |
962 | if (async_desc->num_desc == async_desc->xfer_len) | 966 | if (async_desc->num_desc == async_desc->xfer_len) |
963 | desc[async_desc->xfer_len - 1].flags = | 967 | desc[async_desc->xfer_len - 1].flags |= |
964 | cpu_to_le16(async_desc->flags); | 968 | cpu_to_le16(async_desc->flags); |
965 | else | 969 | else |
966 | desc[async_desc->xfer_len - 1].flags |= | 970 | desc[async_desc->xfer_len - 1].flags |= |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 34fb6afd229b..e3669850aef4 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
@@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | |||
411 | return NULL; | 411 | return NULL; |
412 | 412 | ||
413 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | 413 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, |
414 | src, dest, len, flags); | 414 | src, dest, len, flags, |
415 | HIDMA_TRE_MEMCPY); | ||
416 | |||
417 | /* Place descriptor in prepared list */ | ||
418 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
419 | list_add_tail(&mdesc->node, &mchan->prepared); | ||
420 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
421 | |||
422 | return &mdesc->desc; | ||
423 | } | ||
424 | |||
425 | static struct dma_async_tx_descriptor * | ||
426 | hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, | ||
427 | size_t len, unsigned long flags) | ||
428 | { | ||
429 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
430 | struct hidma_desc *mdesc = NULL; | ||
431 | struct hidma_dev *mdma = mchan->dmadev; | ||
432 | unsigned long irqflags; | ||
433 | |||
434 | /* Get free descriptor */ | ||
435 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
436 | if (!list_empty(&mchan->free)) { | ||
437 | mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); | ||
438 | list_del(&mdesc->node); | ||
439 | } | ||
440 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
441 | |||
442 | if (!mdesc) | ||
443 | return NULL; | ||
444 | |||
445 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | ||
446 | value, dest, len, flags, | ||
447 | HIDMA_TRE_MEMSET); | ||
415 | 448 | ||
416 | /* Place descriptor in prepared list */ | 449 | /* Place descriptor in prepared list */ |
417 | spin_lock_irqsave(&mchan->lock, irqflags); | 450 | spin_lock_irqsave(&mchan->lock, irqflags); |
@@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev) | |||
776 | pm_runtime_get_sync(dmadev->ddev.dev); | 809 | pm_runtime_get_sync(dmadev->ddev.dev); |
777 | 810 | ||
778 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); | 811 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); |
812 | dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask); | ||
779 | if (WARN_ON(!pdev->dev.dma_mask)) { | 813 | if (WARN_ON(!pdev->dev.dma_mask)) { |
780 | rc = -ENXIO; | 814 | rc = -ENXIO; |
781 | goto dmafree; | 815 | goto dmafree; |
@@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev) | |||
786 | dmadev->dev_trca = trca; | 820 | dmadev->dev_trca = trca; |
787 | dmadev->trca_resource = trca_resource; | 821 | dmadev->trca_resource = trca_resource; |
788 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; | 822 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; |
823 | dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset; | ||
789 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; | 824 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; |
790 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; | 825 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; |
791 | dmadev->ddev.device_tx_status = hidma_tx_status; | 826 | dmadev->ddev.device_tx_status = hidma_tx_status; |
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index 41e0aa283828..5f9966e82c0b 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h | |||
@@ -28,6 +28,11 @@ | |||
28 | #define HIDMA_TRE_DEST_LOW_IDX 4 | 28 | #define HIDMA_TRE_DEST_LOW_IDX 4 |
29 | #define HIDMA_TRE_DEST_HI_IDX 5 | 29 | #define HIDMA_TRE_DEST_HI_IDX 5 |
30 | 30 | ||
31 | enum tre_type { | ||
32 | HIDMA_TRE_MEMCPY = 3, | ||
33 | HIDMA_TRE_MEMSET = 4, | ||
34 | }; | ||
35 | |||
31 | struct hidma_tre { | 36 | struct hidma_tre { |
32 | atomic_t allocated; /* if this channel is allocated */ | 37 | atomic_t allocated; /* if this channel is allocated */ |
33 | bool queued; /* flag whether this is pending */ | 38 | bool queued; /* flag whether this is pending */ |
@@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl); | |||
150 | int hidma_ll_disable(struct hidma_lldev *lldev); | 155 | int hidma_ll_disable(struct hidma_lldev *lldev); |
151 | int hidma_ll_enable(struct hidma_lldev *llhndl); | 156 | int hidma_ll_enable(struct hidma_lldev *llhndl); |
152 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | 157 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, |
153 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | 158 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype); |
154 | void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); | 159 | void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); |
155 | int hidma_ll_setup(struct hidma_lldev *lldev); | 160 | int hidma_ll_setup(struct hidma_lldev *lldev); |
156 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, | 161 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, |
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 1530a661518d..4999e266b2de 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c | |||
@@ -105,10 +105,6 @@ enum ch_state { | |||
105 | HIDMA_CH_STOPPED = 4, | 105 | HIDMA_CH_STOPPED = 4, |
106 | }; | 106 | }; |
107 | 107 | ||
108 | enum tre_type { | ||
109 | HIDMA_TRE_MEMCPY = 3, | ||
110 | }; | ||
111 | |||
112 | enum err_code { | 108 | enum err_code { |
113 | HIDMA_EVRE_STATUS_COMPLETE = 1, | 109 | HIDMA_EVRE_STATUS_COMPLETE = 1, |
114 | HIDMA_EVRE_STATUS_ERROR = 4, | 110 | HIDMA_EVRE_STATUS_ERROR = 4, |
@@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name, | |||
174 | tre->err_info = 0; | 170 | tre->err_info = 0; |
175 | tre->lldev = lldev; | 171 | tre->lldev = lldev; |
176 | tre_local = &tre->tre_local[0]; | 172 | tre_local = &tre->tre_local[0]; |
177 | tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; | 173 | tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8; |
178 | tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8; | ||
179 | tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ | 174 | tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ |
180 | *tre_ch = i; | 175 | *tre_ch = i; |
181 | if (callback) | 176 | if (callback) |
@@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev) | |||
607 | 602 | ||
608 | void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, | 603 | void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, |
609 | dma_addr_t src, dma_addr_t dest, u32 len, | 604 | dma_addr_t src, dma_addr_t dest, u32 len, |
610 | u32 flags) | 605 | u32 flags, u32 txntype) |
611 | { | 606 | { |
612 | struct hidma_tre *tre; | 607 | struct hidma_tre *tre; |
613 | u32 *tre_local; | 608 | u32 *tre_local; |
@@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, | |||
626 | } | 621 | } |
627 | 622 | ||
628 | tre_local = &tre->tre_local[0]; | 623 | tre_local = &tre->tre_local[0]; |
624 | tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0); | ||
625 | tre_local[HIDMA_TRE_CFG_IDX] |= txntype; | ||
629 | tre_local[HIDMA_TRE_LEN_IDX] = len; | 626 | tre_local[HIDMA_TRE_LEN_IDX] = len; |
630 | tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); | 627 | tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); |
631 | tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); | 628 | tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); |
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 5a0991bc4787..7335e2eb9b72 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "hidma_mgmt.h" | 29 | #include "hidma_mgmt.h" |
30 | 30 | ||
31 | #define HIDMA_QOS_N_OFFSET 0x300 | 31 | #define HIDMA_QOS_N_OFFSET 0x700 |
32 | #define HIDMA_CFG_OFFSET 0x400 | 32 | #define HIDMA_CFG_OFFSET 0x400 |
33 | #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C | 33 | #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C |
34 | #define HIDMA_MAX_XACTIONS_OFFSET 0x420 | 34 | #define HIDMA_MAX_XACTIONS_OFFSET 0x420 |
@@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) | |||
227 | goto out; | 227 | goto out; |
228 | } | 228 | } |
229 | 229 | ||
230 | if (max_write_request) { | 230 | if (max_write_request && |
231 | (max_write_request != mgmtdev->max_write_request)) { | ||
231 | dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n", | 232 | dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n", |
232 | max_write_request); | 233 | max_write_request); |
233 | mgmtdev->max_write_request = max_write_request; | 234 | mgmtdev->max_write_request = max_write_request; |
@@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) | |||
240 | dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); | 241 | dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); |
241 | goto out; | 242 | goto out; |
242 | } | 243 | } |
243 | if (max_read_request) { | 244 | if (max_read_request && |
245 | (max_read_request != mgmtdev->max_read_request)) { | ||
244 | dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n", | 246 | dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n", |
245 | max_read_request); | 247 | max_read_request); |
246 | mgmtdev->max_read_request = max_read_request; | 248 | mgmtdev->max_read_request = max_read_request; |
@@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) | |||
253 | dev_err(&pdev->dev, "max-write-transactions missing\n"); | 255 | dev_err(&pdev->dev, "max-write-transactions missing\n"); |
254 | goto out; | 256 | goto out; |
255 | } | 257 | } |
256 | if (max_wr_xactions) { | 258 | if (max_wr_xactions && |
259 | (max_wr_xactions != mgmtdev->max_wr_xactions)) { | ||
257 | dev_info(&pdev->dev, "overriding max-write-transactions: %d\n", | 260 | dev_info(&pdev->dev, "overriding max-write-transactions: %d\n", |
258 | max_wr_xactions); | 261 | max_wr_xactions); |
259 | mgmtdev->max_wr_xactions = max_wr_xactions; | 262 | mgmtdev->max_wr_xactions = max_wr_xactions; |
@@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) | |||
266 | dev_err(&pdev->dev, "max-read-transactions missing\n"); | 269 | dev_err(&pdev->dev, "max-read-transactions missing\n"); |
267 | goto out; | 270 | goto out; |
268 | } | 271 | } |
269 | if (max_rd_xactions) { | 272 | if (max_rd_xactions && |
273 | (max_rd_xactions != mgmtdev->max_rd_xactions)) { | ||
270 | dev_info(&pdev->dev, "overriding max-read-transactions: %d\n", | 274 | dev_info(&pdev->dev, "overriding max-read-transactions: %d\n", |
271 | max_rd_xactions); | 275 | max_rd_xactions); |
272 | mgmtdev->max_rd_xactions = max_rd_xactions; | 276 | mgmtdev->max_rd_xactions = max_rd_xactions; |
@@ -354,7 +358,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) | |||
354 | struct platform_device_info pdevinfo; | 358 | struct platform_device_info pdevinfo; |
355 | struct of_phandle_args out_irq; | 359 | struct of_phandle_args out_irq; |
356 | struct device_node *child; | 360 | struct device_node *child; |
357 | struct resource *res; | 361 | struct resource *res = NULL; |
358 | const __be32 *cell; | 362 | const __be32 *cell; |
359 | int ret = 0, size, i, num; | 363 | int ret = 0, size, i, num; |
360 | u64 addr, addr_size; | 364 | u64 addr, addr_size; |
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index ffcadca53243..2b2c7db3e480 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -1690,6 +1690,15 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, | |||
1690 | if (!irqname) | 1690 | if (!irqname) |
1691 | return -ENOMEM; | 1691 | return -ENOMEM; |
1692 | 1692 | ||
1693 | /* | ||
1694 | * Initialize the DMA engine channel and add it to the DMA engine | ||
1695 | * channels list. | ||
1696 | */ | ||
1697 | chan->device = &dmac->engine; | ||
1698 | dma_cookie_init(chan); | ||
1699 | |||
1700 | list_add_tail(&chan->device_node, &dmac->engine.channels); | ||
1701 | |||
1693 | ret = devm_request_threaded_irq(dmac->dev, rchan->irq, | 1702 | ret = devm_request_threaded_irq(dmac->dev, rchan->irq, |
1694 | rcar_dmac_isr_channel, | 1703 | rcar_dmac_isr_channel, |
1695 | rcar_dmac_isr_channel_thread, 0, | 1704 | rcar_dmac_isr_channel_thread, 0, |
@@ -1700,15 +1709,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, | |||
1700 | return ret; | 1709 | return ret; |
1701 | } | 1710 | } |
1702 | 1711 | ||
1703 | /* | ||
1704 | * Initialize the DMA engine channel and add it to the DMA engine | ||
1705 | * channels list. | ||
1706 | */ | ||
1707 | chan->device = &dmac->engine; | ||
1708 | dma_cookie_init(chan); | ||
1709 | |||
1710 | list_add_tail(&chan->device_node, &dmac->engine.channels); | ||
1711 | |||
1712 | return 0; | 1712 | return 0; |
1713 | } | 1713 | } |
1714 | 1714 | ||
@@ -1794,14 +1794,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) | |||
1794 | if (!irqname) | 1794 | if (!irqname) |
1795 | return -ENOMEM; | 1795 | return -ENOMEM; |
1796 | 1796 | ||
1797 | ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, | ||
1798 | irqname, dmac); | ||
1799 | if (ret) { | ||
1800 | dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", | ||
1801 | irq, ret); | ||
1802 | return ret; | ||
1803 | } | ||
1804 | |||
1805 | /* Enable runtime PM and initialize the device. */ | 1797 | /* Enable runtime PM and initialize the device. */ |
1806 | pm_runtime_enable(&pdev->dev); | 1798 | pm_runtime_enable(&pdev->dev); |
1807 | ret = pm_runtime_get_sync(&pdev->dev); | 1799 | ret = pm_runtime_get_sync(&pdev->dev); |
@@ -1818,8 +1810,32 @@ static int rcar_dmac_probe(struct platform_device *pdev) | |||
1818 | goto error; | 1810 | goto error; |
1819 | } | 1811 | } |
1820 | 1812 | ||
1821 | /* Initialize the channels. */ | 1813 | /* Initialize engine */ |
1822 | INIT_LIST_HEAD(&dmac->engine.channels); | 1814 | engine = &dmac->engine; |
1815 | |||
1816 | dma_cap_set(DMA_MEMCPY, engine->cap_mask); | ||
1817 | dma_cap_set(DMA_SLAVE, engine->cap_mask); | ||
1818 | |||
1819 | engine->dev = &pdev->dev; | ||
1820 | engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); | ||
1821 | |||
1822 | engine->src_addr_widths = widths; | ||
1823 | engine->dst_addr_widths = widths; | ||
1824 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
1825 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1826 | |||
1827 | engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; | ||
1828 | engine->device_free_chan_resources = rcar_dmac_free_chan_resources; | ||
1829 | engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; | ||
1830 | engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; | ||
1831 | engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; | ||
1832 | engine->device_config = rcar_dmac_device_config; | ||
1833 | engine->device_terminate_all = rcar_dmac_chan_terminate_all; | ||
1834 | engine->device_tx_status = rcar_dmac_tx_status; | ||
1835 | engine->device_issue_pending = rcar_dmac_issue_pending; | ||
1836 | engine->device_synchronize = rcar_dmac_device_synchronize; | ||
1837 | |||
1838 | INIT_LIST_HEAD(&engine->channels); | ||
1823 | 1839 | ||
1824 | for (i = 0; i < dmac->n_channels; ++i) { | 1840 | for (i = 0; i < dmac->n_channels; ++i) { |
1825 | ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], | 1841 | ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], |
@@ -1828,6 +1844,14 @@ static int rcar_dmac_probe(struct platform_device *pdev) | |||
1828 | goto error; | 1844 | goto error; |
1829 | } | 1845 | } |
1830 | 1846 | ||
1847 | ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, | ||
1848 | irqname, dmac); | ||
1849 | if (ret) { | ||
1850 | dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", | ||
1851 | irq, ret); | ||
1852 | return ret; | ||
1853 | } | ||
1854 | |||
1831 | /* Register the DMAC as a DMA provider for DT. */ | 1855 | /* Register the DMAC as a DMA provider for DT. */ |
1832 | ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, | 1856 | ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, |
1833 | NULL); | 1857 | NULL); |
@@ -1839,29 +1863,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) | |||
1839 | * | 1863 | * |
1840 | * Default transfer size of 32 bytes requires 32-byte alignment. | 1864 | * Default transfer size of 32 bytes requires 32-byte alignment. |
1841 | */ | 1865 | */ |
1842 | engine = &dmac->engine; | ||
1843 | dma_cap_set(DMA_MEMCPY, engine->cap_mask); | ||
1844 | dma_cap_set(DMA_SLAVE, engine->cap_mask); | ||
1845 | |||
1846 | engine->dev = &pdev->dev; | ||
1847 | engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); | ||
1848 | |||
1849 | engine->src_addr_widths = widths; | ||
1850 | engine->dst_addr_widths = widths; | ||
1851 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
1852 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1853 | |||
1854 | engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; | ||
1855 | engine->device_free_chan_resources = rcar_dmac_free_chan_resources; | ||
1856 | engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; | ||
1857 | engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; | ||
1858 | engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; | ||
1859 | engine->device_config = rcar_dmac_device_config; | ||
1860 | engine->device_terminate_all = rcar_dmac_chan_terminate_all; | ||
1861 | engine->device_tx_status = rcar_dmac_tx_status; | ||
1862 | engine->device_issue_pending = rcar_dmac_issue_pending; | ||
1863 | engine->device_synchronize = rcar_dmac_device_synchronize; | ||
1864 | |||
1865 | ret = dma_async_device_register(engine); | 1866 | ret = dma_async_device_register(engine); |
1866 | if (ret < 0) | 1867 | if (ret < 0) |
1867 | goto error; | 1868 | goto error; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c3052fbfd092..c2b089af0420 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -79,7 +79,7 @@ static int dma40_memcpy_channels[] = { | |||
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* Default configuration for physcial memcpy */ | 81 | /* Default configuration for physcial memcpy */ |
82 | static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { | 82 | static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = { |
83 | .mode = STEDMA40_MODE_PHYSICAL, | 83 | .mode = STEDMA40_MODE_PHYSICAL, |
84 | .dir = DMA_MEM_TO_MEM, | 84 | .dir = DMA_MEM_TO_MEM, |
85 | 85 | ||
@@ -93,7 +93,7 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { | |||
93 | }; | 93 | }; |
94 | 94 | ||
95 | /* Default configuration for logical memcpy */ | 95 | /* Default configuration for logical memcpy */ |
96 | static struct stedma40_chan_cfg dma40_memcpy_conf_log = { | 96 | static const struct stedma40_chan_cfg dma40_memcpy_conf_log = { |
97 | .mode = STEDMA40_MODE_LOGICAL, | 97 | .mode = STEDMA40_MODE_LOGICAL, |
98 | .dir = DMA_MEM_TO_MEM, | 98 | .dir = DMA_MEM_TO_MEM, |
99 | 99 | ||
@@ -2485,19 +2485,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
2485 | } | 2485 | } |
2486 | 2486 | ||
2487 | static struct dma_async_tx_descriptor * | 2487 | static struct dma_async_tx_descriptor * |
2488 | d40_prep_memcpy_sg(struct dma_chan *chan, | ||
2489 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
2490 | struct scatterlist *src_sg, unsigned int src_nents, | ||
2491 | unsigned long dma_flags) | ||
2492 | { | ||
2493 | if (dst_nents != src_nents) | ||
2494 | return NULL; | ||
2495 | |||
2496 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, | ||
2497 | DMA_MEM_TO_MEM, dma_flags); | ||
2498 | } | ||
2499 | |||
2500 | static struct dma_async_tx_descriptor * | ||
2501 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 2488 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2502 | unsigned int sg_len, enum dma_transfer_direction direction, | 2489 | unsigned int sg_len, enum dma_transfer_direction direction, |
2503 | unsigned long dma_flags, void *context) | 2490 | unsigned long dma_flags, void *context) |
@@ -2821,9 +2808,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | |||
2821 | dev->copy_align = DMAENGINE_ALIGN_4_BYTES; | 2808 | dev->copy_align = DMAENGINE_ALIGN_4_BYTES; |
2822 | } | 2809 | } |
2823 | 2810 | ||
2824 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | ||
2825 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | ||
2826 | |||
2827 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | 2811 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) |
2828 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | 2812 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; |
2829 | 2813 | ||
@@ -2865,7 +2849,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2865 | 2849 | ||
2866 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2850 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2867 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2851 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2868 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); | ||
2869 | 2852 | ||
2870 | d40_ops_init(base, &base->dma_memcpy); | 2853 | d40_ops_init(base, &base->dma_memcpy); |
2871 | 2854 | ||
@@ -2883,7 +2866,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2883 | dma_cap_zero(base->dma_both.cap_mask); | 2866 | dma_cap_zero(base->dma_both.cap_mask); |
2884 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2867 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2885 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2868 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2886 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); | ||
2887 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | 2869 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
2888 | 2870 | ||
2889 | d40_ops_init(base, &base->dma_both); | 2871 | d40_ops_init(base, &base->dma_both); |
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index a2358780ab2c..bcd496edc70f 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -101,6 +101,17 @@ struct sun6i_dma_config { | |||
101 | u32 nr_max_channels; | 101 | u32 nr_max_channels; |
102 | u32 nr_max_requests; | 102 | u32 nr_max_requests; |
103 | u32 nr_max_vchans; | 103 | u32 nr_max_vchans; |
104 | /* | ||
105 | * In the datasheets/user manuals of newer Allwinner SoCs, a special | ||
106 | * bit (bit 2 at register 0x20) is present. | ||
107 | * It's named "DMA MCLK interface circuit auto gating bit" in the | ||
108 | * documents, and the footnote of this register says that this bit | ||
109 | * should be set up when initializing the DMA controller. | ||
110 | * Allwinner A23/A33 user manuals do not have this bit documented, | ||
111 | * however these SoCs really have and need this bit, as seen in the | ||
112 | * BSP kernel source code. | ||
113 | */ | ||
114 | bool gate_needed; | ||
104 | }; | 115 | }; |
105 | 116 | ||
106 | /* | 117 | /* |
@@ -1009,6 +1020,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { | |||
1009 | .nr_max_channels = 8, | 1020 | .nr_max_channels = 8, |
1010 | .nr_max_requests = 24, | 1021 | .nr_max_requests = 24, |
1011 | .nr_max_vchans = 37, | 1022 | .nr_max_vchans = 37, |
1023 | .gate_needed = true, | ||
1012 | }; | 1024 | }; |
1013 | 1025 | ||
1014 | static struct sun6i_dma_config sun8i_a83t_dma_cfg = { | 1026 | static struct sun6i_dma_config sun8i_a83t_dma_cfg = { |
@@ -1028,11 +1040,24 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { | |||
1028 | .nr_max_vchans = 34, | 1040 | .nr_max_vchans = 34, |
1029 | }; | 1041 | }; |
1030 | 1042 | ||
1043 | /* | ||
1044 | * The V3s have only 8 physical channels, a maximum DRQ port id of 23, | ||
1045 | * and a total of 24 usable source and destination endpoints. | ||
1046 | */ | ||
1047 | |||
1048 | static struct sun6i_dma_config sun8i_v3s_dma_cfg = { | ||
1049 | .nr_max_channels = 8, | ||
1050 | .nr_max_requests = 23, | ||
1051 | .nr_max_vchans = 24, | ||
1052 | .gate_needed = true, | ||
1053 | }; | ||
1054 | |||
1031 | static const struct of_device_id sun6i_dma_match[] = { | 1055 | static const struct of_device_id sun6i_dma_match[] = { |
1032 | { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, | 1056 | { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, |
1033 | { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, | 1057 | { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, |
1034 | { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, | 1058 | { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, |
1035 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, | 1059 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, |
1060 | { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, | ||
1036 | { /* sentinel */ } | 1061 | { /* sentinel */ } |
1037 | }; | 1062 | }; |
1038 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); | 1063 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); |
@@ -1174,13 +1199,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1174 | goto err_dma_unregister; | 1199 | goto err_dma_unregister; |
1175 | } | 1200 | } |
1176 | 1201 | ||
1177 | /* | 1202 | if (sdc->cfg->gate_needed) |
1178 | * sun8i variant requires us to toggle a dma gating register, | ||
1179 | * as seen in Allwinner's SDK. This register is not documented | ||
1180 | * in the A23 user manual. | ||
1181 | */ | ||
1182 | if (of_device_is_compatible(pdev->dev.of_node, | ||
1183 | "allwinner,sun8i-a23-dma")) | ||
1184 | writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE); | 1203 | writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE); |
1185 | 1204 | ||
1186 | return 0; | 1205 | return 0; |
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2403475a37cf..2f65a8fde21d 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -308,7 +308,7 @@ static const struct of_device_id ti_dra7_master_match[] = { | |||
308 | static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) | 308 | static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) |
309 | { | 309 | { |
310 | for (; len > 0; len--) | 310 | for (; len > 0; len--) |
311 | clear_bit(offset + (len - 1), p); | 311 | set_bit(offset + (len - 1), p); |
312 | } | 312 | } |
313 | 313 | ||
314 | static int ti_dra7_xbar_probe(struct platform_device *pdev) | 314 | static int ti_dra7_xbar_probe(struct platform_device *pdev) |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 8b693b712d0f..1d5988849aa6 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -391,11 +391,6 @@ static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, | |||
391 | *paddr += nbytes; | 391 | *paddr += nbytes; |
392 | } | 392 | } |
393 | 393 | ||
394 | static void xgene_dma_invalidate_buffer(__le64 *ext8) | ||
395 | { | ||
396 | *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE); | ||
397 | } | ||
398 | |||
399 | static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) | 394 | static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) |
400 | { | 395 | { |
401 | switch (idx) { | 396 | switch (idx) { |
@@ -425,48 +420,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, | |||
425 | XGENE_DMA_DESC_HOENQ_NUM_POS); | 420 | XGENE_DMA_DESC_HOENQ_NUM_POS); |
426 | } | 421 | } |
427 | 422 | ||
428 | static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | ||
429 | struct xgene_dma_desc_sw *desc_sw, | ||
430 | dma_addr_t dst, dma_addr_t src, | ||
431 | size_t len) | ||
432 | { | ||
433 | struct xgene_dma_desc_hw *desc1, *desc2; | ||
434 | int i; | ||
435 | |||
436 | /* Get 1st descriptor */ | ||
437 | desc1 = &desc_sw->desc1; | ||
438 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | ||
439 | |||
440 | /* Set destination address */ | ||
441 | desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); | ||
442 | desc1->m3 |= cpu_to_le64(dst); | ||
443 | |||
444 | /* Set 1st source address */ | ||
445 | xgene_dma_set_src_buffer(&desc1->m1, &len, &src); | ||
446 | |||
447 | if (!len) | ||
448 | return; | ||
449 | |||
450 | /* | ||
451 | * We need to split this source buffer, | ||
452 | * and need to use 2nd descriptor | ||
453 | */ | ||
454 | desc2 = &desc_sw->desc2; | ||
455 | desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); | ||
456 | |||
457 | /* Set 2nd to 5th source address */ | ||
458 | for (i = 0; i < 4 && len; i++) | ||
459 | xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i), | ||
460 | &len, &src); | ||
461 | |||
462 | /* Invalidate unused source address field */ | ||
463 | for (; i < 4; i++) | ||
464 | xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i)); | ||
465 | |||
466 | /* Updated flag that we have prepared 64B descriptor */ | ||
467 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | ||
468 | } | ||
469 | |||
470 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | 423 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, |
471 | struct xgene_dma_desc_sw *desc_sw, | 424 | struct xgene_dma_desc_sw *desc_sw, |
472 | dma_addr_t *dst, dma_addr_t *src, | 425 | dma_addr_t *dst, dma_addr_t *src, |
@@ -891,114 +844,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) | |||
891 | chan->desc_pool = NULL; | 844 | chan->desc_pool = NULL; |
892 | } | 845 | } |
893 | 846 | ||
894 | static struct dma_async_tx_descriptor *xgene_dma_prep_sg( | ||
895 | struct dma_chan *dchan, struct scatterlist *dst_sg, | ||
896 | u32 dst_nents, struct scatterlist *src_sg, | ||
897 | u32 src_nents, unsigned long flags) | ||
898 | { | ||
899 | struct xgene_dma_desc_sw *first = NULL, *new = NULL; | ||
900 | struct xgene_dma_chan *chan; | ||
901 | size_t dst_avail, src_avail; | ||
902 | dma_addr_t dst, src; | ||
903 | size_t len; | ||
904 | |||
905 | if (unlikely(!dchan)) | ||
906 | return NULL; | ||
907 | |||
908 | if (unlikely(!dst_nents || !src_nents)) | ||
909 | return NULL; | ||
910 | |||
911 | if (unlikely(!dst_sg || !src_sg)) | ||
912 | return NULL; | ||
913 | |||
914 | chan = to_dma_chan(dchan); | ||
915 | |||
916 | /* Get prepared for the loop */ | ||
917 | dst_avail = sg_dma_len(dst_sg); | ||
918 | src_avail = sg_dma_len(src_sg); | ||
919 | dst_nents--; | ||
920 | src_nents--; | ||
921 | |||
922 | /* Run until we are out of scatterlist entries */ | ||
923 | while (true) { | ||
924 | /* Create the largest transaction possible */ | ||
925 | len = min_t(size_t, src_avail, dst_avail); | ||
926 | len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); | ||
927 | if (len == 0) | ||
928 | goto fetch; | ||
929 | |||
930 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; | ||
931 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; | ||
932 | |||
933 | /* Allocate the link descriptor from DMA pool */ | ||
934 | new = xgene_dma_alloc_descriptor(chan); | ||
935 | if (!new) | ||
936 | goto fail; | ||
937 | |||
938 | /* Prepare DMA descriptor */ | ||
939 | xgene_dma_prep_cpy_desc(chan, new, dst, src, len); | ||
940 | |||
941 | if (!first) | ||
942 | first = new; | ||
943 | |||
944 | new->tx.cookie = 0; | ||
945 | async_tx_ack(&new->tx); | ||
946 | |||
947 | /* update metadata */ | ||
948 | dst_avail -= len; | ||
949 | src_avail -= len; | ||
950 | |||
951 | /* Insert the link descriptor to the LD ring */ | ||
952 | list_add_tail(&new->node, &first->tx_list); | ||
953 | |||
954 | fetch: | ||
955 | /* fetch the next dst scatterlist entry */ | ||
956 | if (dst_avail == 0) { | ||
957 | /* no more entries: we're done */ | ||
958 | if (dst_nents == 0) | ||
959 | break; | ||
960 | |||
961 | /* fetch the next entry: if there are no more: done */ | ||
962 | dst_sg = sg_next(dst_sg); | ||
963 | if (!dst_sg) | ||
964 | break; | ||
965 | |||
966 | dst_nents--; | ||
967 | dst_avail = sg_dma_len(dst_sg); | ||
968 | } | ||
969 | |||
970 | /* fetch the next src scatterlist entry */ | ||
971 | if (src_avail == 0) { | ||
972 | /* no more entries: we're done */ | ||
973 | if (src_nents == 0) | ||
974 | break; | ||
975 | |||
976 | /* fetch the next entry: if there are no more: done */ | ||
977 | src_sg = sg_next(src_sg); | ||
978 | if (!src_sg) | ||
979 | break; | ||
980 | |||
981 | src_nents--; | ||
982 | src_avail = sg_dma_len(src_sg); | ||
983 | } | ||
984 | } | ||
985 | |||
986 | if (!new) | ||
987 | return NULL; | ||
988 | |||
989 | new->tx.flags = flags; /* client is in control of this ack */ | ||
990 | new->tx.cookie = -EBUSY; | ||
991 | list_splice(&first->tx_list, &new->tx_list); | ||
992 | |||
993 | return &new->tx; | ||
994 | fail: | ||
995 | if (!first) | ||
996 | return NULL; | ||
997 | |||
998 | xgene_dma_free_desc_list(chan, &first->tx_list); | ||
999 | return NULL; | ||
1000 | } | ||
1001 | |||
1002 | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( | 847 | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( |
1003 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, | 848 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, |
1004 | u32 src_cnt, size_t len, unsigned long flags) | 849 | u32 src_cnt, size_t len, unsigned long flags) |
@@ -1653,7 +1498,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | |||
1653 | dma_cap_zero(dma_dev->cap_mask); | 1498 | dma_cap_zero(dma_dev->cap_mask); |
1654 | 1499 | ||
1655 | /* Set DMA device capability */ | 1500 | /* Set DMA device capability */ |
1656 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | ||
1657 | 1501 | ||
1658 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR | 1502 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR |
1659 | * and channel 1 supports XOR, PQ both. First thing here is we have | 1503 | * and channel 1 supports XOR, PQ both. First thing here is we have |
@@ -1679,7 +1523,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | |||
1679 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | 1523 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; |
1680 | dma_dev->device_issue_pending = xgene_dma_issue_pending; | 1524 | dma_dev->device_issue_pending = xgene_dma_issue_pending; |
1681 | dma_dev->device_tx_status = xgene_dma_tx_status; | 1525 | dma_dev->device_tx_status = xgene_dma_tx_status; |
1682 | dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; | ||
1683 | 1526 | ||
1684 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1527 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1685 | dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; | 1528 | dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; |
@@ -1731,8 +1574,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id) | |||
1731 | 1574 | ||
1732 | /* DMA capability info */ | 1575 | /* DMA capability info */ |
1733 | dev_info(pdma->dev, | 1576 | dev_info(pdma->dev, |
1734 | "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan), | 1577 | "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan), |
1735 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", | ||
1736 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", | 1578 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", |
1737 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | 1579 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); |
1738 | 1580 | ||
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8cf87b1a284b..8722bcba489d 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
@@ -2124,7 +2124,7 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | |||
2124 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); | 2124 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); |
2125 | if (IS_ERR(*axi_clk)) { | 2125 | if (IS_ERR(*axi_clk)) { |
2126 | err = PTR_ERR(*axi_clk); | 2126 | err = PTR_ERR(*axi_clk); |
2127 | dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); | 2127 | dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); |
2128 | return err; | 2128 | return err; |
2129 | } | 2129 | } |
2130 | 2130 | ||
@@ -2142,25 +2142,25 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | |||
2142 | 2142 | ||
2143 | err = clk_prepare_enable(*axi_clk); | 2143 | err = clk_prepare_enable(*axi_clk); |
2144 | if (err) { | 2144 | if (err) { |
2145 | dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); | 2145 | dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); |
2146 | return err; | 2146 | return err; |
2147 | } | 2147 | } |
2148 | 2148 | ||
2149 | err = clk_prepare_enable(*tx_clk); | 2149 | err = clk_prepare_enable(*tx_clk); |
2150 | if (err) { | 2150 | if (err) { |
2151 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); | 2151 | dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); |
2152 | goto err_disable_axiclk; | 2152 | goto err_disable_axiclk; |
2153 | } | 2153 | } |
2154 | 2154 | ||
2155 | err = clk_prepare_enable(*rx_clk); | 2155 | err = clk_prepare_enable(*rx_clk); |
2156 | if (err) { | 2156 | if (err) { |
2157 | dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); | 2157 | dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); |
2158 | goto err_disable_txclk; | 2158 | goto err_disable_txclk; |
2159 | } | 2159 | } |
2160 | 2160 | ||
2161 | err = clk_prepare_enable(*sg_clk); | 2161 | err = clk_prepare_enable(*sg_clk); |
2162 | if (err) { | 2162 | if (err) { |
2163 | dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); | 2163 | dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); |
2164 | goto err_disable_rxclk; | 2164 | goto err_disable_rxclk; |
2165 | } | 2165 | } |
2166 | 2166 | ||
@@ -2189,26 +2189,26 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | |||
2189 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); | 2189 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); |
2190 | if (IS_ERR(*axi_clk)) { | 2190 | if (IS_ERR(*axi_clk)) { |
2191 | err = PTR_ERR(*axi_clk); | 2191 | err = PTR_ERR(*axi_clk); |
2192 | dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); | 2192 | dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); |
2193 | return err; | 2193 | return err; |
2194 | } | 2194 | } |
2195 | 2195 | ||
2196 | *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); | 2196 | *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); |
2197 | if (IS_ERR(*dev_clk)) { | 2197 | if (IS_ERR(*dev_clk)) { |
2198 | err = PTR_ERR(*dev_clk); | 2198 | err = PTR_ERR(*dev_clk); |
2199 | dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); | 2199 | dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); |
2200 | return err; | 2200 | return err; |
2201 | } | 2201 | } |
2202 | 2202 | ||
2203 | err = clk_prepare_enable(*axi_clk); | 2203 | err = clk_prepare_enable(*axi_clk); |
2204 | if (err) { | 2204 | if (err) { |
2205 | dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); | 2205 | dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); |
2206 | return err; | 2206 | return err; |
2207 | } | 2207 | } |
2208 | 2208 | ||
2209 | err = clk_prepare_enable(*dev_clk); | 2209 | err = clk_prepare_enable(*dev_clk); |
2210 | if (err) { | 2210 | if (err) { |
2211 | dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); | 2211 | dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); |
2212 | goto err_disable_axiclk; | 2212 | goto err_disable_axiclk; |
2213 | } | 2213 | } |
2214 | 2214 | ||
@@ -2229,7 +2229,7 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | |||
2229 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); | 2229 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); |
2230 | if (IS_ERR(*axi_clk)) { | 2230 | if (IS_ERR(*axi_clk)) { |
2231 | err = PTR_ERR(*axi_clk); | 2231 | err = PTR_ERR(*axi_clk); |
2232 | dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); | 2232 | dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); |
2233 | return err; | 2233 | return err; |
2234 | } | 2234 | } |
2235 | 2235 | ||
@@ -2251,31 +2251,31 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | |||
2251 | 2251 | ||
2252 | err = clk_prepare_enable(*axi_clk); | 2252 | err = clk_prepare_enable(*axi_clk); |
2253 | if (err) { | 2253 | if (err) { |
2254 | dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); | 2254 | dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); |
2255 | return err; | 2255 | return err; |
2256 | } | 2256 | } |
2257 | 2257 | ||
2258 | err = clk_prepare_enable(*tx_clk); | 2258 | err = clk_prepare_enable(*tx_clk); |
2259 | if (err) { | 2259 | if (err) { |
2260 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); | 2260 | dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); |
2261 | goto err_disable_axiclk; | 2261 | goto err_disable_axiclk; |
2262 | } | 2262 | } |
2263 | 2263 | ||
2264 | err = clk_prepare_enable(*txs_clk); | 2264 | err = clk_prepare_enable(*txs_clk); |
2265 | if (err) { | 2265 | if (err) { |
2266 | dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); | 2266 | dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); |
2267 | goto err_disable_txclk; | 2267 | goto err_disable_txclk; |
2268 | } | 2268 | } |
2269 | 2269 | ||
2270 | err = clk_prepare_enable(*rx_clk); | 2270 | err = clk_prepare_enable(*rx_clk); |
2271 | if (err) { | 2271 | if (err) { |
2272 | dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); | 2272 | dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); |
2273 | goto err_disable_txsclk; | 2273 | goto err_disable_txsclk; |
2274 | } | 2274 | } |
2275 | 2275 | ||
2276 | err = clk_prepare_enable(*rxs_clk); | 2276 | err = clk_prepare_enable(*rxs_clk); |
2277 | if (err) { | 2277 | if (err) { |
2278 | dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); | 2278 | dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); |
2279 | goto err_disable_rxclk; | 2279 | goto err_disable_rxclk; |
2280 | } | 2280 | } |
2281 | 2281 | ||
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 47f64192d2fd..1ee1241ca797 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c | |||
@@ -830,98 +830,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( | |||
830 | } | 830 | } |
831 | 831 | ||
832 | /** | 832 | /** |
833 | * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction | ||
834 | * @dchan: DMA channel | ||
835 | * @dst_sg: Destination scatter list | ||
836 | * @dst_sg_len: Number of entries in destination scatter list | ||
837 | * @src_sg: Source scatter list | ||
838 | * @src_sg_len: Number of entries in source scatter list | ||
839 | * @flags: transfer ack flags | ||
840 | * | ||
841 | * Return: Async transaction descriptor on success and NULL on failure | ||
842 | */ | ||
843 | static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( | ||
844 | struct dma_chan *dchan, struct scatterlist *dst_sg, | ||
845 | unsigned int dst_sg_len, struct scatterlist *src_sg, | ||
846 | unsigned int src_sg_len, unsigned long flags) | ||
847 | { | ||
848 | struct zynqmp_dma_desc_sw *new, *first = NULL; | ||
849 | struct zynqmp_dma_chan *chan = to_chan(dchan); | ||
850 | void *desc = NULL, *prev = NULL; | ||
851 | size_t len, dst_avail, src_avail; | ||
852 | dma_addr_t dma_dst, dma_src; | ||
853 | u32 desc_cnt = 0, i; | ||
854 | struct scatterlist *sg; | ||
855 | |||
856 | for_each_sg(src_sg, sg, src_sg_len, i) | ||
857 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), | ||
858 | ZYNQMP_DMA_MAX_TRANS_LEN); | ||
859 | |||
860 | spin_lock_bh(&chan->lock); | ||
861 | if (desc_cnt > chan->desc_free_cnt) { | ||
862 | spin_unlock_bh(&chan->lock); | ||
863 | dev_dbg(chan->dev, "chan %p descs are not available\n", chan); | ||
864 | return NULL; | ||
865 | } | ||
866 | chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; | ||
867 | spin_unlock_bh(&chan->lock); | ||
868 | |||
869 | dst_avail = sg_dma_len(dst_sg); | ||
870 | src_avail = sg_dma_len(src_sg); | ||
871 | |||
872 | /* Run until we are out of scatterlist entries */ | ||
873 | while (true) { | ||
874 | /* Allocate and populate the descriptor */ | ||
875 | new = zynqmp_dma_get_descriptor(chan); | ||
876 | desc = (struct zynqmp_dma_desc_ll *)new->src_v; | ||
877 | len = min_t(size_t, src_avail, dst_avail); | ||
878 | len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); | ||
879 | if (len == 0) | ||
880 | goto fetch; | ||
881 | dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - | ||
882 | dst_avail; | ||
883 | dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - | ||
884 | src_avail; | ||
885 | |||
886 | zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, | ||
887 | len, prev); | ||
888 | prev = desc; | ||
889 | dst_avail -= len; | ||
890 | src_avail -= len; | ||
891 | |||
892 | if (!first) | ||
893 | first = new; | ||
894 | else | ||
895 | list_add_tail(&new->node, &first->tx_list); | ||
896 | fetch: | ||
897 | /* Fetch the next dst scatterlist entry */ | ||
898 | if (dst_avail == 0) { | ||
899 | if (dst_sg_len == 0) | ||
900 | break; | ||
901 | dst_sg = sg_next(dst_sg); | ||
902 | if (dst_sg == NULL) | ||
903 | break; | ||
904 | dst_sg_len--; | ||
905 | dst_avail = sg_dma_len(dst_sg); | ||
906 | } | ||
907 | /* Fetch the next src scatterlist entry */ | ||
908 | if (src_avail == 0) { | ||
909 | if (src_sg_len == 0) | ||
910 | break; | ||
911 | src_sg = sg_next(src_sg); | ||
912 | if (src_sg == NULL) | ||
913 | break; | ||
914 | src_sg_len--; | ||
915 | src_avail = sg_dma_len(src_sg); | ||
916 | } | ||
917 | } | ||
918 | |||
919 | zynqmp_dma_desc_config_eod(chan, desc); | ||
920 | first->async_tx.flags = flags; | ||
921 | return &first->async_tx; | ||
922 | } | ||
923 | |||
924 | /** | ||
925 | * zynqmp_dma_chan_remove - Channel remove function | 833 | * zynqmp_dma_chan_remove - Channel remove function |
926 | * @chan: ZynqMP DMA channel pointer | 834 | * @chan: ZynqMP DMA channel pointer |
927 | */ | 835 | */ |
@@ -1064,11 +972,9 @@ static int zynqmp_dma_probe(struct platform_device *pdev) | |||
1064 | INIT_LIST_HEAD(&zdev->common.channels); | 972 | INIT_LIST_HEAD(&zdev->common.channels); |
1065 | 973 | ||
1066 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); | 974 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); |
1067 | dma_cap_set(DMA_SG, zdev->common.cap_mask); | ||
1068 | dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); | 975 | dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); |
1069 | 976 | ||
1070 | p = &zdev->common; | 977 | p = &zdev->common; |
1071 | p->device_prep_dma_sg = zynqmp_dma_prep_sg; | ||
1072 | p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; | 978 | p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; |
1073 | p->device_terminate_all = zynqmp_dma_device_terminate_all; | 979 | p->device_terminate_all = zynqmp_dma_device_terminate_all; |
1074 | p->device_issue_pending = zynqmp_dma_issue_pending; | 980 | p->device_issue_pending = zynqmp_dma_issue_pending; |
diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h new file mode 100644 index 000000000000..077d43a358e5 --- /dev/null +++ b/include/linux/dma/qcom_bam_dma.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _QCOM_BAM_DMA_H | ||
15 | #define _QCOM_BAM_DMA_H | ||
16 | |||
17 | #include <asm/byteorder.h> | ||
18 | |||
19 | /* | ||
20 | * This data type corresponds to the native Command Element | ||
21 | * supported by BAM DMA Engine. | ||
22 | * | ||
23 | * @cmd_and_addr - upper 8 bits command and lower 24 bits register address. | ||
24 | * @data - for write command: content to be written into peripheral register. | ||
25 | * for read command: dest addr to write peripheral register value. | ||
26 | * @mask - register mask. | ||
27 | * @reserved - for future usage. | ||
28 | * | ||
29 | */ | ||
30 | struct bam_cmd_element { | ||
31 | __le32 cmd_and_addr; | ||
32 | __le32 data; | ||
33 | __le32 mask; | ||
34 | __le32 reserved; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * This enum indicates the command type in a command element | ||
39 | */ | ||
40 | enum bam_command_type { | ||
41 | BAM_WRITE_COMMAND = 0, | ||
42 | BAM_READ_COMMAND, | ||
43 | }; | ||
44 | |||
45 | /* | ||
46 | * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command | ||
47 | * element with the data already in le32 format. | ||
48 | * | ||
49 | * @bam_ce: bam command element | ||
50 | * @addr: target address | ||
51 | * @cmd: BAM command | ||
52 | * @data: actual data for write and dest addr for read in le32 | ||
53 | */ | ||
54 | static inline void | ||
55 | bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr, | ||
56 | enum bam_command_type cmd, __le32 data) | ||
57 | { | ||
58 | bam_ce->cmd_and_addr = | ||
59 | cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24)); | ||
60 | bam_ce->data = data; | ||
61 | bam_ce->mask = cpu_to_le32(0xffffffff); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * bam_prep_ce - Wrapper function to prepare a single BAM command element | ||
66 | * with the data. | ||
67 | * | ||
68 | * @bam_ce: BAM command element | ||
69 | * @addr: target address | ||
70 | * @cmd: BAM command | ||
71 | * @data: actual data for write and dest addr for read | ||
72 | */ | ||
73 | static inline void | ||
74 | bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr, | ||
75 | enum bam_command_type cmd, u32 data) | ||
76 | { | ||
77 | bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data)); | ||
78 | } | ||
79 | #endif | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 533680860865..8319101170fc 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -68,7 +68,6 @@ enum dma_transaction_type { | |||
68 | DMA_MEMSET, | 68 | DMA_MEMSET, |
69 | DMA_MEMSET_SG, | 69 | DMA_MEMSET_SG, |
70 | DMA_INTERRUPT, | 70 | DMA_INTERRUPT, |
71 | DMA_SG, | ||
72 | DMA_PRIVATE, | 71 | DMA_PRIVATE, |
73 | DMA_ASYNC_TX, | 72 | DMA_ASYNC_TX, |
74 | DMA_SLAVE, | 73 | DMA_SLAVE, |
@@ -186,6 +185,9 @@ struct dma_interleaved_template { | |||
186 | * on the result of this operation | 185 | * on the result of this operation |
187 | * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till | 186 | * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till |
188 | * cleared or freed | 187 | * cleared or freed |
188 | * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command | ||
189 | * data and the descriptor should be in different format from normal | ||
190 | * data descriptors. | ||
189 | */ | 191 | */ |
190 | enum dma_ctrl_flags { | 192 | enum dma_ctrl_flags { |
191 | DMA_PREP_INTERRUPT = (1 << 0), | 193 | DMA_PREP_INTERRUPT = (1 << 0), |
@@ -195,6 +197,7 @@ enum dma_ctrl_flags { | |||
195 | DMA_PREP_CONTINUE = (1 << 4), | 197 | DMA_PREP_CONTINUE = (1 << 4), |
196 | DMA_PREP_FENCE = (1 << 5), | 198 | DMA_PREP_FENCE = (1 << 5), |
197 | DMA_CTRL_REUSE = (1 << 6), | 199 | DMA_CTRL_REUSE = (1 << 6), |
200 | DMA_PREP_CMD = (1 << 7), | ||
198 | }; | 201 | }; |
199 | 202 | ||
200 | /** | 203 | /** |
@@ -771,11 +774,6 @@ struct dma_device { | |||
771 | unsigned int nents, int value, unsigned long flags); | 774 | unsigned int nents, int value, unsigned long flags); |
772 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 775 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
773 | struct dma_chan *chan, unsigned long flags); | 776 | struct dma_chan *chan, unsigned long flags); |
774 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( | ||
775 | struct dma_chan *chan, | ||
776 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
777 | struct scatterlist *src_sg, unsigned int src_nents, | ||
778 | unsigned long flags); | ||
779 | 777 | ||
780 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 778 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
781 | struct dma_chan *chan, struct scatterlist *sgl, | 779 | struct dma_chan *chan, struct scatterlist *sgl, |
@@ -905,19 +903,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( | |||
905 | len, flags); | 903 | len, flags); |
906 | } | 904 | } |
907 | 905 | ||
908 | static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | ||
909 | struct dma_chan *chan, | ||
910 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
911 | struct scatterlist *src_sg, unsigned int src_nents, | ||
912 | unsigned long flags) | ||
913 | { | ||
914 | if (!chan || !chan->device || !chan->device->device_prep_dma_sg) | ||
915 | return NULL; | ||
916 | |||
917 | return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, | ||
918 | src_sg, src_nents, flags); | ||
919 | } | ||
920 | |||
921 | /** | 906 | /** |
922 | * dmaengine_terminate_all() - Terminate all active DMA transfers | 907 | * dmaengine_terminate_all() - Terminate all active DMA transfers |
923 | * @chan: The channel for which to terminate the transfers | 908 | * @chan: The channel for which to terminate the transfers |