diff options
author | Vinod Koul <vinod.koul@intel.com> | 2018-04-09 23:25:26 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2018-04-09 23:25:26 -0400 |
commit | c21bd0a86789ed54cf7dfb948c590766484a585c (patch) | |
tree | 726a95e33d5736f3dffda829a87044302ad2a12f | |
parent | ab2528c1b19e6e3b5a3713dfe6b054c672b4a498 (diff) | |
parent | e10734e5a0285220f46a37e8bbdfb241acebb04b (diff) |
Merge branch 'topic/mtek' into for-linus
-rw-r--r-- | Documentation/devicetree/bindings/dma/mtk-hsdma.txt | 33 | ||||
-rw-r--r-- | MAINTAINERS | 9 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/mediatek/Kconfig | 13 | ||||
-rw-r--r-- | drivers/dma/mediatek/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/mediatek/mtk-hsdma.c | 1056 |
7 files changed, 1115 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/dma/mtk-hsdma.txt b/Documentation/devicetree/bindings/dma/mtk-hsdma.txt new file mode 100644 index 000000000000..4bb317359dc6 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/mtk-hsdma.txt | |||
@@ -0,0 +1,33 @@ | |||
1 | MediaTek High-Speed DMA Controller | ||
2 | ================================== | ||
3 | |||
4 | This device follows the generic DMA bindings defined in dma/dma.txt. | ||
5 | |||
6 | Required properties: | ||
7 | |||
8 | - compatible: Must be one of | ||
9 | "mediatek,mt7622-hsdma": for MT7622 SoC | ||
10 | "mediatek,mt7623-hsdma": for MT7623 SoC | ||
11 | - reg: Should contain the register's base address and length. | ||
12 | - interrupts: Should contain a reference to the interrupt used by this | ||
13 | device. | ||
14 | - clocks: Should be the clock specifiers corresponding to the entry in | ||
15 | clock-names property. | ||
16 | - clock-names: Should contain "hsdma" entries. | ||
17 | - power-domains: Phandle to the power domain that the device is part of | ||
18 | - #dma-cells: The length of the DMA specifier, must be <1>. This one cell | ||
19 | in dmas property of a client device represents the channel | ||
20 | number. | ||
21 | Example: | ||
22 | |||
23 | hsdma: dma-controller@1b007000 { | ||
24 | compatible = "mediatek,mt7623-hsdma"; | ||
25 | reg = <0 0x1b007000 0 0x1000>; | ||
26 | interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>; | ||
27 | clocks = <ðsys CLK_ETHSYS_HSDMA>; | ||
28 | clock-names = "hsdma"; | ||
29 | power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; | ||
30 | #dma-cells = <1>; | ||
31 | }; | ||
32 | |||
33 | DMA clients must use the format described in dma/dma.txt file. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index b31bfdb8a09e..bccced5d131d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -8785,6 +8785,15 @@ M: Sean Wang <sean.wang@mediatek.com> | |||
8785 | S: Maintained | 8785 | S: Maintained |
8786 | F: drivers/media/rc/mtk-cir.c | 8786 | F: drivers/media/rc/mtk-cir.c |
8787 | 8787 | ||
8788 | MEDIATEK DMA DRIVER | ||
8789 | M: Sean Wang <sean.wang@mediatek.com> | ||
8790 | L: dmaengine@vger.kernel.org | ||
8791 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
8792 | L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) | ||
8793 | S: Maintained | ||
8794 | F: Documentation/devicetree/bindings/dma/mtk-* | ||
8795 | F: drivers/dma/mediatek/ | ||
8796 | |||
8788 | MEDIATEK PMIC LED DRIVER | 8797 | MEDIATEK PMIC LED DRIVER |
8789 | M: Sean Wang <sean.wang@mediatek.com> | 8798 | M: Sean Wang <sean.wang@mediatek.com> |
8790 | S: Maintained | 8799 | S: Maintained |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index c36272aa7c09..6d61cd023633 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -643,6 +643,8 @@ config ZX_DMA | |||
643 | # driver files | 643 | # driver files |
644 | source "drivers/dma/bestcomm/Kconfig" | 644 | source "drivers/dma/bestcomm/Kconfig" |
645 | 645 | ||
646 | source "drivers/dma/mediatek/Kconfig" | ||
647 | |||
646 | source "drivers/dma/qcom/Kconfig" | 648 | source "drivers/dma/qcom/Kconfig" |
647 | 649 | ||
648 | source "drivers/dma/dw/Kconfig" | 650 | source "drivers/dma/dw/Kconfig" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c242a5e8906b..0f62a4d49aab 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -76,5 +76,6 @@ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o | |||
76 | obj-$(CONFIG_ZX_DMA) += zx_dma.o | 76 | obj-$(CONFIG_ZX_DMA) += zx_dma.o |
77 | obj-$(CONFIG_ST_FDMA) += st_fdma.o | 77 | obj-$(CONFIG_ST_FDMA) += st_fdma.o |
78 | 78 | ||
79 | obj-y += mediatek/ | ||
79 | obj-y += qcom/ | 80 | obj-y += qcom/ |
80 | obj-y += xilinx/ | 81 | obj-y += xilinx/ |
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig new file mode 100644 index 000000000000..27bac0bba09e --- /dev/null +++ b/drivers/dma/mediatek/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | |||
2 | config MTK_HSDMA | ||
3 | tristate "MediaTek High-Speed DMA controller support" | ||
4 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
5 | select DMA_ENGINE | ||
6 | select DMA_VIRTUAL_CHANNELS | ||
7 | ---help--- | ||
8 | Enable support for High-Speed DMA controller on MediaTek | ||
9 | SoCs. | ||
10 | |||
11 | This controller provides the channels which is dedicated to | ||
12 | memory-to-memory transfer to offload from CPU through ring- | ||
13 | based descriptor management. | ||
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile new file mode 100644 index 000000000000..6e778f842f01 --- /dev/null +++ b/drivers/dma/mediatek/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o | |||
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c new file mode 100644 index 000000000000..b7ec56ae02a6 --- /dev/null +++ b/drivers/dma/mediatek/mtk-hsdma.c | |||
@@ -0,0 +1,1056 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (c) 2017-2018 MediaTek Inc. | ||
3 | |||
4 | /* | ||
5 | * Driver for MediaTek High-Speed DMA Controller | ||
6 | * | ||
7 | * Author: Sean Wang <sean.wang@mediatek.com> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/clk.h> | ||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/iopoll.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/of_device.h> | ||
21 | #include <linux/of_dma.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/pm_runtime.h> | ||
24 | #include <linux/refcount.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | #include "../virt-dma.h" | ||
28 | |||
29 | #define MTK_HSDMA_USEC_POLL 20 | ||
30 | #define MTK_HSDMA_TIMEOUT_POLL 200000 | ||
31 | #define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
32 | |||
33 | /* The default number of virtual channel */ | ||
34 | #define MTK_HSDMA_NR_VCHANS 3 | ||
35 | |||
36 | /* Only one physical channel supported */ | ||
37 | #define MTK_HSDMA_NR_MAX_PCHANS 1 | ||
38 | |||
39 | /* Macro for physical descriptor (PD) manipulation */ | ||
40 | /* The number of PD which must be 2 of power */ | ||
41 | #define MTK_DMA_SIZE 64 | ||
42 | #define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1)) | ||
43 | #define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1)) | ||
44 | #define MTK_HSDMA_MAX_LEN 0x3f80 | ||
45 | #define MTK_HSDMA_ALIGN_SIZE 4 | ||
46 | #define MTK_HSDMA_PLEN_MASK 0x3fff | ||
47 | #define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16) | ||
48 | #define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK) | ||
49 | |||
50 | /* Registers for underlying ring manipulation */ | ||
51 | #define MTK_HSDMA_TX_BASE 0x0 | ||
52 | #define MTK_HSDMA_TX_CNT 0x4 | ||
53 | #define MTK_HSDMA_TX_CPU 0x8 | ||
54 | #define MTK_HSDMA_TX_DMA 0xc | ||
55 | #define MTK_HSDMA_RX_BASE 0x100 | ||
56 | #define MTK_HSDMA_RX_CNT 0x104 | ||
57 | #define MTK_HSDMA_RX_CPU 0x108 | ||
58 | #define MTK_HSDMA_RX_DMA 0x10c | ||
59 | |||
60 | /* Registers for global setup */ | ||
61 | #define MTK_HSDMA_GLO 0x204 | ||
62 | #define MTK_HSDMA_GLO_MULTI_DMA BIT(10) | ||
63 | #define MTK_HSDMA_TX_WB_DDONE BIT(6) | ||
64 | #define MTK_HSDMA_BURST_64BYTES (0x2 << 4) | ||
65 | #define MTK_HSDMA_GLO_RX_BUSY BIT(3) | ||
66 | #define MTK_HSDMA_GLO_RX_DMA BIT(2) | ||
67 | #define MTK_HSDMA_GLO_TX_BUSY BIT(1) | ||
68 | #define MTK_HSDMA_GLO_TX_DMA BIT(0) | ||
69 | #define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \ | ||
70 | MTK_HSDMA_GLO_RX_DMA) | ||
71 | #define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \ | ||
72 | MTK_HSDMA_GLO_TX_BUSY) | ||
73 | #define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \ | ||
74 | MTK_HSDMA_GLO_RX_DMA | \ | ||
75 | MTK_HSDMA_TX_WB_DDONE | \ | ||
76 | MTK_HSDMA_BURST_64BYTES | \ | ||
77 | MTK_HSDMA_GLO_MULTI_DMA) | ||
78 | |||
79 | /* Registers for reset */ | ||
80 | #define MTK_HSDMA_RESET 0x208 | ||
81 | #define MTK_HSDMA_RST_TX BIT(0) | ||
82 | #define MTK_HSDMA_RST_RX BIT(16) | ||
83 | |||
84 | /* Registers for interrupt control */ | ||
85 | #define MTK_HSDMA_DLYINT 0x20c | ||
86 | #define MTK_HSDMA_RXDLY_INT_EN BIT(15) | ||
87 | |||
88 | /* Interrupt fires when the pending number's more than the specified */ | ||
89 | #define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8) | ||
90 | |||
91 | /* Interrupt fires when the pending time's more than the specified in 20 us */ | ||
92 | #define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f) | ||
93 | #define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \ | ||
94 | MTK_HSDMA_RXMAX_PINT(20) | \ | ||
95 | MTK_HSDMA_RXMAX_PTIME(20)) | ||
96 | #define MTK_HSDMA_INT_STATUS 0x220 | ||
97 | #define MTK_HSDMA_INT_ENABLE 0x228 | ||
98 | #define MTK_HSDMA_INT_RXDONE BIT(16) | ||
99 | |||
100 | enum mtk_hsdma_vdesc_flag { | ||
101 | MTK_HSDMA_VDESC_FINISHED = 0x01, | ||
102 | }; | ||
103 | |||
104 | #define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED) | ||
105 | |||
106 | /** | ||
107 | * struct mtk_hsdma_pdesc - This is the struct holding info describing physical | ||
108 | * descriptor (PD) and its placement must be kept at | ||
109 | * 4-bytes alignment in little endian order. | ||
110 | * @desc[1-4]: The control pad used to indicate hardware how to | ||
111 | * deal with the descriptor such as source and | ||
112 | * destination address and data length. The maximum | ||
113 | * data length each pdesc can handle is 0x3f80 bytes | ||
114 | */ | ||
115 | struct mtk_hsdma_pdesc { | ||
116 | __le32 desc1; | ||
117 | __le32 desc2; | ||
118 | __le32 desc3; | ||
119 | __le32 desc4; | ||
120 | } __packed __aligned(4); | ||
121 | |||
122 | /** | ||
123 | * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual | ||
124 | * descriptor (VD) | ||
125 | * @vd: An instance for struct virt_dma_desc | ||
126 | * @len: The total data size device wants to move | ||
127 | * @residue: The remaining data size device will move | ||
128 | * @dest: The destination address device wants to move to | ||
129 | * @src: The source address device wants to move from | ||
130 | */ | ||
131 | struct mtk_hsdma_vdesc { | ||
132 | struct virt_dma_desc vd; | ||
133 | size_t len; | ||
134 | size_t residue; | ||
135 | dma_addr_t dest; | ||
136 | dma_addr_t src; | ||
137 | }; | ||
138 | |||
139 | /** | ||
140 | * struct mtk_hsdma_cb - This is the struct holding extra info required for RX | ||
141 | * ring to know what relevant VD the the PD is being | ||
142 | * mapped to. | ||
143 | * @vd: Pointer to the relevant VD. | ||
144 | * @flag: Flag indicating what action should be taken when VD | ||
145 | * is completed. | ||
146 | */ | ||
147 | struct mtk_hsdma_cb { | ||
148 | struct virt_dma_desc *vd; | ||
149 | enum mtk_hsdma_vdesc_flag flag; | ||
150 | }; | ||
151 | |||
152 | /** | ||
153 | * struct mtk_hsdma_ring - This struct holds info describing underlying ring | ||
154 | * space | ||
155 | * @txd: The descriptor TX ring which describes DMA source | ||
156 | * information | ||
157 | * @rxd: The descriptor RX ring which describes DMA | ||
158 | * destination information | ||
159 | * @cb: The extra information pointed at by RX ring | ||
160 | * @tphys: The physical addr of TX ring | ||
161 | * @rphys: The physical addr of RX ring | ||
162 | * @cur_tptr: Pointer to the next free descriptor used by the host | ||
163 | * @cur_rptr: Pointer to the last done descriptor by the device | ||
164 | */ | ||
165 | struct mtk_hsdma_ring { | ||
166 | struct mtk_hsdma_pdesc *txd; | ||
167 | struct mtk_hsdma_pdesc *rxd; | ||
168 | struct mtk_hsdma_cb *cb; | ||
169 | dma_addr_t tphys; | ||
170 | dma_addr_t rphys; | ||
171 | u16 cur_tptr; | ||
172 | u16 cur_rptr; | ||
173 | }; | ||
174 | |||
175 | /** | ||
176 | * struct mtk_hsdma_pchan - This is the struct holding info describing physical | ||
177 | * channel (PC) | ||
178 | * @ring: An instance for the underlying ring | ||
179 | * @sz_ring: Total size allocated for the ring | ||
180 | * @nr_free: Total number of free rooms in the ring. It would | ||
181 | * be accessed and updated frequently between IRQ | ||
182 | * context and user context to reflect whether ring | ||
183 | * can accept requests from VD. | ||
184 | */ | ||
185 | struct mtk_hsdma_pchan { | ||
186 | struct mtk_hsdma_ring ring; | ||
187 | size_t sz_ring; | ||
188 | atomic_t nr_free; | ||
189 | }; | ||
190 | |||
191 | /** | ||
192 | * struct mtk_hsdma_vchan - This is the struct holding info describing virtual | ||
193 | * channel (VC) | ||
194 | * @vc: An instance for struct virt_dma_chan | ||
195 | * @issue_completion: The wait for all issued descriptors completited | ||
196 | * @issue_synchronize: Bool indicating channel synchronization starts | ||
197 | * @desc_hw_processing: List those descriptors the hardware is processing, | ||
198 | * which is protected by vc.lock | ||
199 | */ | ||
200 | struct mtk_hsdma_vchan { | ||
201 | struct virt_dma_chan vc; | ||
202 | struct completion issue_completion; | ||
203 | bool issue_synchronize; | ||
204 | struct list_head desc_hw_processing; | ||
205 | }; | ||
206 | |||
207 | /** | ||
208 | * struct mtk_hsdma_soc - This is the struct holding differences among SoCs | ||
209 | * @ddone: Bit mask for DDONE | ||
210 | * @ls0: Bit mask for LS0 | ||
211 | */ | ||
212 | struct mtk_hsdma_soc { | ||
213 | __le32 ddone; | ||
214 | __le32 ls0; | ||
215 | }; | ||
216 | |||
217 | /** | ||
218 | * struct mtk_hsdma_device - This is the struct holding info describing HSDMA | ||
219 | * device | ||
220 | * @ddev: An instance for struct dma_device | ||
221 | * @base: The mapped register I/O base | ||
222 | * @clk: The clock that device internal is using | ||
223 | * @irq: The IRQ that device are using | ||
224 | * @dma_requests: The number of VCs the device supports to | ||
225 | * @vc: The pointer to all available VCs | ||
226 | * @pc: The pointer to the underlying PC | ||
227 | * @pc_refcnt: Track how many VCs are using the PC | ||
228 | * @lock: Lock protect agaisting multiple VCs access PC | ||
229 | * @soc: The pointer to area holding differences among | ||
230 | * vaious platform | ||
231 | */ | ||
232 | struct mtk_hsdma_device { | ||
233 | struct dma_device ddev; | ||
234 | void __iomem *base; | ||
235 | struct clk *clk; | ||
236 | u32 irq; | ||
237 | |||
238 | u32 dma_requests; | ||
239 | struct mtk_hsdma_vchan *vc; | ||
240 | struct mtk_hsdma_pchan *pc; | ||
241 | refcount_t pc_refcnt; | ||
242 | |||
243 | /* Lock used to protect against multiple VCs access PC */ | ||
244 | spinlock_t lock; | ||
245 | |||
246 | const struct mtk_hsdma_soc *soc; | ||
247 | }; | ||
248 | |||
249 | static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan) | ||
250 | { | ||
251 | return container_of(chan->device, struct mtk_hsdma_device, ddev); | ||
252 | } | ||
253 | |||
254 | static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan) | ||
255 | { | ||
256 | return container_of(chan, struct mtk_hsdma_vchan, vc.chan); | ||
257 | } | ||
258 | |||
259 | static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd) | ||
260 | { | ||
261 | return container_of(vd, struct mtk_hsdma_vdesc, vd); | ||
262 | } | ||
263 | |||
264 | static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma) | ||
265 | { | ||
266 | return hsdma->ddev.dev; | ||
267 | } | ||
268 | |||
269 | static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg) | ||
270 | { | ||
271 | return readl(hsdma->base + reg); | ||
272 | } | ||
273 | |||
274 | static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) | ||
275 | { | ||
276 | writel(val, hsdma->base + reg); | ||
277 | } | ||
278 | |||
279 | static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg, | ||
280 | u32 mask, u32 set) | ||
281 | { | ||
282 | u32 val; | ||
283 | |||
284 | val = mtk_dma_read(hsdma, reg); | ||
285 | val &= ~mask; | ||
286 | val |= set; | ||
287 | mtk_dma_write(hsdma, reg, val); | ||
288 | } | ||
289 | |||
290 | static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) | ||
291 | { | ||
292 | mtk_dma_rmw(hsdma, reg, 0, val); | ||
293 | } | ||
294 | |||
295 | static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) | ||
296 | { | ||
297 | mtk_dma_rmw(hsdma, reg, val, 0); | ||
298 | } | ||
299 | |||
300 | static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd) | ||
301 | { | ||
302 | kfree(container_of(vd, struct mtk_hsdma_vdesc, vd)); | ||
303 | } | ||
304 | |||
305 | static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma) | ||
306 | { | ||
307 | u32 status = 0; | ||
308 | |||
309 | return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status, | ||
310 | !(status & MTK_HSDMA_GLO_BUSY), | ||
311 | MTK_HSDMA_USEC_POLL, | ||
312 | MTK_HSDMA_TIMEOUT_POLL); | ||
313 | } | ||
314 | |||
315 | static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, | ||
316 | struct mtk_hsdma_pchan *pc) | ||
317 | { | ||
318 | struct mtk_hsdma_ring *ring = &pc->ring; | ||
319 | int err; | ||
320 | |||
321 | memset(pc, 0, sizeof(*pc)); | ||
322 | |||
323 | /* | ||
324 | * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring | ||
325 | * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. | ||
326 | */ | ||
327 | pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); | ||
328 | ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, | ||
329 | &ring->tphys, GFP_NOWAIT); | ||
330 | if (!ring->txd) | ||
331 | return -ENOMEM; | ||
332 | |||
333 | ring->rxd = &ring->txd[MTK_DMA_SIZE]; | ||
334 | ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd); | ||
335 | ring->cur_tptr = 0; | ||
336 | ring->cur_rptr = MTK_DMA_SIZE - 1; | ||
337 | |||
338 | ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT); | ||
339 | if (!ring->cb) { | ||
340 | err = -ENOMEM; | ||
341 | goto err_free_dma; | ||
342 | } | ||
343 | |||
344 | atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1); | ||
345 | |||
346 | /* Disable HSDMA and wait for the completion */ | ||
347 | mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); | ||
348 | err = mtk_hsdma_busy_wait(hsdma); | ||
349 | if (err) | ||
350 | goto err_free_cb; | ||
351 | |||
352 | /* Reset */ | ||
353 | mtk_dma_set(hsdma, MTK_HSDMA_RESET, | ||
354 | MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); | ||
355 | mtk_dma_clr(hsdma, MTK_HSDMA_RESET, | ||
356 | MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); | ||
357 | |||
358 | /* Setup HSDMA initial pointer in the ring */ | ||
359 | mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys); | ||
360 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE); | ||
361 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); | ||
362 | mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0); | ||
363 | mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys); | ||
364 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE); | ||
365 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr); | ||
366 | mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0); | ||
367 | |||
368 | /* Enable HSDMA */ | ||
369 | mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); | ||
370 | |||
371 | /* Setup delayed interrupt */ | ||
372 | mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT); | ||
373 | |||
374 | /* Enable interrupt */ | ||
375 | mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
376 | |||
377 | return 0; | ||
378 | |||
379 | err_free_cb: | ||
380 | kfree(ring->cb); | ||
381 | |||
382 | err_free_dma: | ||
383 | dma_free_coherent(hsdma2dev(hsdma), | ||
384 | pc->sz_ring, ring->txd, ring->tphys); | ||
385 | return err; | ||
386 | } | ||
387 | |||
388 | static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma, | ||
389 | struct mtk_hsdma_pchan *pc) | ||
390 | { | ||
391 | struct mtk_hsdma_ring *ring = &pc->ring; | ||
392 | |||
393 | /* Disable HSDMA and then wait for the completion */ | ||
394 | mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); | ||
395 | mtk_hsdma_busy_wait(hsdma); | ||
396 | |||
397 | /* Reset pointer in the ring */ | ||
398 | mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
399 | mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0); | ||
400 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0); | ||
401 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0); | ||
402 | mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0); | ||
403 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0); | ||
404 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1); | ||
405 | |||
406 | kfree(ring->cb); | ||
407 | |||
408 | dma_free_coherent(hsdma2dev(hsdma), | ||
409 | pc->sz_ring, ring->txd, ring->tphys); | ||
410 | } | ||
411 | |||
412 | static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma, | ||
413 | struct mtk_hsdma_pchan *pc, | ||
414 | struct mtk_hsdma_vdesc *hvd) | ||
415 | { | ||
416 | struct mtk_hsdma_ring *ring = &pc->ring; | ||
417 | struct mtk_hsdma_pdesc *txd, *rxd; | ||
418 | u16 reserved, prev, tlen, num_sgs; | ||
419 | unsigned long flags; | ||
420 | |||
421 | /* Protect against PC is accessed by multiple VCs simultaneously */ | ||
422 | spin_lock_irqsave(&hsdma->lock, flags); | ||
423 | |||
424 | /* | ||
425 | * Reserve rooms, where pc->nr_free is used to track how many free | ||
426 | * rooms in the ring being updated in user and IRQ context. | ||
427 | */ | ||
428 | num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN); | ||
429 | reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free)); | ||
430 | |||
431 | if (!reserved) { | ||
432 | spin_unlock_irqrestore(&hsdma->lock, flags); | ||
433 | return -ENOSPC; | ||
434 | } | ||
435 | |||
436 | atomic_sub(reserved, &pc->nr_free); | ||
437 | |||
438 | while (reserved--) { | ||
439 | /* Limit size by PD capability for valid data moving */ | ||
440 | tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ? | ||
441 | MTK_HSDMA_MAX_LEN : hvd->len; | ||
442 | |||
443 | /* | ||
444 | * Setup PDs using the remaining VD info mapped on those | ||
445 | * reserved rooms. And since RXD is shared memory between the | ||
446 | * host and the device allocated by dma_alloc_coherent call, | ||
447 | * the helper macro WRITE_ONCE can ensure the data written to | ||
448 | * RAM would really happens. | ||
449 | */ | ||
450 | txd = &ring->txd[ring->cur_tptr]; | ||
451 | WRITE_ONCE(txd->desc1, hvd->src); | ||
452 | WRITE_ONCE(txd->desc2, | ||
453 | hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen)); | ||
454 | |||
455 | rxd = &ring->rxd[ring->cur_tptr]; | ||
456 | WRITE_ONCE(rxd->desc1, hvd->dest); | ||
457 | WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen)); | ||
458 | |||
459 | /* Associate VD, the PD belonged to */ | ||
460 | ring->cb[ring->cur_tptr].vd = &hvd->vd; | ||
461 | |||
462 | /* Move forward the pointer of TX ring */ | ||
463 | ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr, | ||
464 | MTK_DMA_SIZE); | ||
465 | |||
466 | /* Update VD with remaining data */ | ||
467 | hvd->src += tlen; | ||
468 | hvd->dest += tlen; | ||
469 | hvd->len -= tlen; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * Tagging flag for the last PD for VD will be responsible for | ||
474 | * completing VD. | ||
475 | */ | ||
476 | if (!hvd->len) { | ||
477 | prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE); | ||
478 | ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED; | ||
479 | } | ||
480 | |||
481 | /* Ensure all changes indeed done before we're going on */ | ||
482 | wmb(); | ||
483 | |||
484 | /* | ||
485 | * Updating into hardware the pointer of TX ring lets HSDMA to take | ||
486 | * action for those pending PDs. | ||
487 | */ | ||
488 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); | ||
489 | |||
490 | spin_unlock_irqrestore(&hsdma->lock, flags); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma, | ||
496 | struct mtk_hsdma_vchan *hvc) | ||
497 | { | ||
498 | struct virt_dma_desc *vd, *vd2; | ||
499 | int err; | ||
500 | |||
501 | lockdep_assert_held(&hvc->vc.lock); | ||
502 | |||
503 | list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) { | ||
504 | struct mtk_hsdma_vdesc *hvd; | ||
505 | |||
506 | hvd = to_hsdma_vdesc(vd); | ||
507 | |||
508 | /* Map VD into PC and all VCs shares a single PC */ | ||
509 | err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd); | ||
510 | |||
511 | /* | ||
512 | * Move VD from desc_issued to desc_hw_processing when entire | ||
513 | * VD is fit into available PDs. Otherwise, the uncompleted | ||
514 | * VDs would stay in list desc_issued and then restart the | ||
515 | * processing as soon as possible once underlying ring space | ||
516 | * got freed. | ||
517 | */ | ||
518 | if (err == -ENOSPC || hvd->len > 0) | ||
519 | break; | ||
520 | |||
521 | /* | ||
522 | * The extra list desc_hw_processing is used because | ||
523 | * hardware can't provide sufficient information allowing us | ||
524 | * to know what VDs are still working on the underlying ring. | ||
525 | * Through the additional list, it can help us to implement | ||
526 | * terminate_all, residue calculation and such thing needed | ||
527 | * to know detail descriptor status on the hardware. | ||
528 | */ | ||
529 | list_move_tail(&vd->node, &hvc->desc_hw_processing); | ||
530 | } | ||
531 | } | ||
532 | |||
533 | static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) | ||
534 | { | ||
535 | struct mtk_hsdma_vchan *hvc; | ||
536 | struct mtk_hsdma_pdesc *rxd; | ||
537 | struct mtk_hsdma_vdesc *hvd; | ||
538 | struct mtk_hsdma_pchan *pc; | ||
539 | struct mtk_hsdma_cb *cb; | ||
540 | int i = MTK_DMA_SIZE; | ||
541 | __le32 desc2; | ||
542 | u32 status; | ||
543 | u16 next; | ||
544 | |||
545 | /* Read IRQ status */ | ||
546 | status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS); | ||
547 | if (unlikely(!(status & MTK_HSDMA_INT_RXDONE))) | ||
548 | goto rx_done; | ||
549 | |||
550 | pc = hsdma->pc; | ||
551 | |||
552 | /* | ||
553 | * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to | ||
554 | * reclaim these finished descriptors: The most number of PDs the ISR | ||
555 | * can handle at one time shouldn't be more than MTK_DMA_SIZE so we | ||
556 | * take it as limited count instead of just using a dangerous infinite | ||
557 | * poll. | ||
558 | */ | ||
559 | while (i--) { | ||
560 | next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr, | ||
561 | MTK_DMA_SIZE); | ||
562 | rxd = &pc->ring.rxd[next]; | ||
563 | |||
564 | /* | ||
565 | * If MTK_HSDMA_DESC_DDONE is no specified, that means data | ||
566 | * moving for the PD is still under going. | ||
567 | */ | ||
568 | desc2 = READ_ONCE(rxd->desc2); | ||
569 | if (!(desc2 & hsdma->soc->ddone)) | ||
570 | break; | ||
571 | |||
572 | cb = &pc->ring.cb[next]; | ||
573 | if (unlikely(!cb->vd)) { | ||
574 | dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n"); | ||
575 | break; | ||
576 | } | ||
577 | |||
578 | /* Update residue of VD the associated PD belonged to */ | ||
579 | hvd = to_hsdma_vdesc(cb->vd); | ||
580 | hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2); | ||
581 | |||
582 | /* Complete VD until the relevant last PD is finished */ | ||
583 | if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) { | ||
584 | hvc = to_hsdma_vchan(cb->vd->tx.chan); | ||
585 | |||
586 | spin_lock(&hvc->vc.lock); | ||
587 | |||
588 | /* Remove VD from list desc_hw_processing */ | ||
589 | list_del(&cb->vd->node); | ||
590 | |||
591 | /* Add VD into list desc_completed */ | ||
592 | vchan_cookie_complete(cb->vd); | ||
593 | |||
594 | if (hvc->issue_synchronize && | ||
595 | list_empty(&hvc->desc_hw_processing)) { | ||
596 | complete(&hvc->issue_completion); | ||
597 | hvc->issue_synchronize = false; | ||
598 | } | ||
599 | spin_unlock(&hvc->vc.lock); | ||
600 | |||
601 | cb->flag = 0; | ||
602 | } | ||
603 | |||
604 | cb->vd = 0; | ||
605 | |||
606 | /* | ||
607 | * Recycle the RXD with the helper WRITE_ONCE that can ensure | ||
608 | * data written into RAM would really happens. | ||
609 | */ | ||
610 | WRITE_ONCE(rxd->desc1, 0); | ||
611 | WRITE_ONCE(rxd->desc2, 0); | ||
612 | pc->ring.cur_rptr = next; | ||
613 | |||
614 | /* Release rooms */ | ||
615 | atomic_inc(&pc->nr_free); | ||
616 | } | ||
617 | |||
618 | /* Ensure all changes indeed done before we're going on */ | ||
619 | wmb(); | ||
620 | |||
621 | /* Update CPU pointer for those completed PDs */ | ||
622 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr); | ||
623 | |||
624 | /* | ||
625 | * Acking the pending IRQ allows hardware no longer to keep the used | ||
626 | * IRQ line in certain trigger state when software has completed all | ||
627 | * the finished physical descriptors. | ||
628 | */ | ||
629 | if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1) | ||
630 | mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status); | ||
631 | |||
632 | /* ASAP handles pending VDs in all VCs after freeing some rooms */ | ||
633 | for (i = 0; i < hsdma->dma_requests; i++) { | ||
634 | hvc = &hsdma->vc[i]; | ||
635 | spin_lock(&hvc->vc.lock); | ||
636 | mtk_hsdma_issue_vchan_pending(hsdma, hvc); | ||
637 | spin_unlock(&hvc->vc.lock); | ||
638 | } | ||
639 | |||
640 | rx_done: | ||
641 | /* All completed PDs are cleaned up, so enable interrupt again */ | ||
642 | mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
643 | } | ||
644 | |||
645 | static irqreturn_t mtk_hsdma_irq(int irq, void *devid) | ||
646 | { | ||
647 | struct mtk_hsdma_device *hsdma = devid; | ||
648 | |||
649 | /* | ||
650 | * Disable interrupt until all completed PDs are cleaned up in | ||
651 | * mtk_hsdma_free_rooms call. | ||
652 | */ | ||
653 | mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
654 | |||
655 | mtk_hsdma_free_rooms_in_ring(hsdma); | ||
656 | |||
657 | return IRQ_HANDLED; | ||
658 | } | ||
659 | |||
660 | static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c, | ||
661 | dma_cookie_t cookie) | ||
662 | { | ||
663 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
664 | struct virt_dma_desc *vd; | ||
665 | |||
666 | list_for_each_entry(vd, &hvc->desc_hw_processing, node) | ||
667 | if (vd->tx.cookie == cookie) | ||
668 | return vd; | ||
669 | |||
670 | list_for_each_entry(vd, &hvc->vc.desc_issued, node) | ||
671 | if (vd->tx.cookie == cookie) | ||
672 | return vd; | ||
673 | |||
674 | return NULL; | ||
675 | } | ||
676 | |||
677 | static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, | ||
678 | dma_cookie_t cookie, | ||
679 | struct dma_tx_state *txstate) | ||
680 | { | ||
681 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
682 | struct mtk_hsdma_vdesc *hvd; | ||
683 | struct virt_dma_desc *vd; | ||
684 | enum dma_status ret; | ||
685 | unsigned long flags; | ||
686 | size_t bytes = 0; | ||
687 | |||
688 | ret = dma_cookie_status(c, cookie, txstate); | ||
689 | if (ret == DMA_COMPLETE || !txstate) | ||
690 | return ret; | ||
691 | |||
692 | spin_lock_irqsave(&hvc->vc.lock, flags); | ||
693 | vd = mtk_hsdma_find_active_desc(c, cookie); | ||
694 | spin_unlock_irqrestore(&hvc->vc.lock, flags); | ||
695 | |||
696 | if (vd) { | ||
697 | hvd = to_hsdma_vdesc(vd); | ||
698 | bytes = hvd->residue; | ||
699 | } | ||
700 | |||
701 | dma_set_residue(txstate, bytes); | ||
702 | |||
703 | return ret; | ||
704 | } | ||
705 | |||
706 | static void mtk_hsdma_issue_pending(struct dma_chan *c) | ||
707 | { | ||
708 | struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); | ||
709 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
710 | unsigned long flags; | ||
711 | |||
712 | spin_lock_irqsave(&hvc->vc.lock, flags); | ||
713 | |||
714 | if (vchan_issue_pending(&hvc->vc)) | ||
715 | mtk_hsdma_issue_vchan_pending(hsdma, hvc); | ||
716 | |||
717 | spin_unlock_irqrestore(&hvc->vc.lock, flags); | ||
718 | } | ||
719 | |||
720 | static struct dma_async_tx_descriptor * | ||
721 | mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, | ||
722 | dma_addr_t src, size_t len, unsigned long flags) | ||
723 | { | ||
724 | struct mtk_hsdma_vdesc *hvd; | ||
725 | |||
726 | hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT); | ||
727 | if (!hvd) | ||
728 | return NULL; | ||
729 | |||
730 | hvd->len = len; | ||
731 | hvd->residue = len; | ||
732 | hvd->src = src; | ||
733 | hvd->dest = dest; | ||
734 | |||
735 | return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags); | ||
736 | } | ||
737 | |||
738 | static int mtk_hsdma_free_inactive_desc(struct dma_chan *c) | ||
739 | { | ||
740 | struct virt_dma_chan *vc = to_virt_chan(c); | ||
741 | unsigned long flags; | ||
742 | LIST_HEAD(head); | ||
743 | |||
744 | spin_lock_irqsave(&vc->lock, flags); | ||
745 | list_splice_tail_init(&vc->desc_allocated, &head); | ||
746 | list_splice_tail_init(&vc->desc_submitted, &head); | ||
747 | list_splice_tail_init(&vc->desc_issued, &head); | ||
748 | spin_unlock_irqrestore(&vc->lock, flags); | ||
749 | |||
750 | /* At the point, we don't expect users put descriptor into VC again */ | ||
751 | vchan_dma_desc_free_list(vc, &head); | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static void mtk_hsdma_free_active_desc(struct dma_chan *c) | ||
757 | { | ||
758 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
759 | bool sync_needed = false; | ||
760 | |||
761 | /* | ||
762 | * Once issue_synchronize is being set, which means once the hardware | ||
763 | * consumes all descriptors for the channel in the ring, the | ||
764 | * synchronization must be be notified immediately it is completed. | ||
765 | */ | ||
766 | spin_lock(&hvc->vc.lock); | ||
767 | if (!list_empty(&hvc->desc_hw_processing)) { | ||
768 | hvc->issue_synchronize = true; | ||
769 | sync_needed = true; | ||
770 | } | ||
771 | spin_unlock(&hvc->vc.lock); | ||
772 | |||
773 | if (sync_needed) | ||
774 | wait_for_completion(&hvc->issue_completion); | ||
775 | /* | ||
776 | * At the point, we expect that all remaining descriptors in the ring | ||
777 | * for the channel should be all processing done. | ||
778 | */ | ||
779 | WARN_ONCE(!list_empty(&hvc->desc_hw_processing), | ||
780 | "Desc pending still in list desc_hw_processing\n"); | ||
781 | |||
782 | /* Free all descriptors in list desc_completed */ | ||
783 | vchan_synchronize(&hvc->vc); | ||
784 | |||
785 | WARN_ONCE(!list_empty(&hvc->vc.desc_completed), | ||
786 | "Desc pending still in list desc_completed\n"); | ||
787 | } | ||
788 | |||
789 | static int mtk_hsdma_terminate_all(struct dma_chan *c) | ||
790 | { | ||
791 | /* | ||
792 | * Free pending descriptors not processed yet by hardware that have | ||
793 | * previously been submitted to the channel. | ||
794 | */ | ||
795 | mtk_hsdma_free_inactive_desc(c); | ||
796 | |||
797 | /* | ||
798 | * However, the DMA engine doesn't provide any way to stop these | ||
799 | * descriptors being processed currently by hardware. The only way is | ||
800 | * to just waiting until these descriptors are all processed completely | ||
801 | * through mtk_hsdma_free_active_desc call. | ||
802 | */ | ||
803 | mtk_hsdma_free_active_desc(c); | ||
804 | |||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c) | ||
809 | { | ||
810 | struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); | ||
811 | int err; | ||
812 | |||
813 | /* | ||
814 | * Since HSDMA has only one PC, the resource for PC is being allocated | ||
815 | * when the first VC is being created and the other VCs would run on | ||
816 | * the same PC. | ||
817 | */ | ||
818 | if (!refcount_read(&hsdma->pc_refcnt)) { | ||
819 | err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc); | ||
820 | if (err) | ||
821 | return err; | ||
822 | /* | ||
823 | * refcount_inc would complain increment on 0; use-after-free. | ||
824 | * Thus, we need to explicitly set it as 1 initially. | ||
825 | */ | ||
826 | refcount_set(&hsdma->pc_refcnt, 1); | ||
827 | } else { | ||
828 | refcount_inc(&hsdma->pc_refcnt); | ||
829 | } | ||
830 | |||
831 | return 0; | ||
832 | } | ||
833 | |||
834 | static void mtk_hsdma_free_chan_resources(struct dma_chan *c) | ||
835 | { | ||
836 | struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); | ||
837 | |||
838 | /* Free all descriptors in all lists on the VC */ | ||
839 | mtk_hsdma_terminate_all(c); | ||
840 | |||
841 | /* The resource for PC is not freed until all the VCs are destroyed */ | ||
842 | if (!refcount_dec_and_test(&hsdma->pc_refcnt)) | ||
843 | return; | ||
844 | |||
845 | mtk_hsdma_free_pchan(hsdma, hsdma->pc); | ||
846 | } | ||
847 | |||
848 | static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma) | ||
849 | { | ||
850 | int err; | ||
851 | |||
852 | pm_runtime_enable(hsdma2dev(hsdma)); | ||
853 | pm_runtime_get_sync(hsdma2dev(hsdma)); | ||
854 | |||
855 | err = clk_prepare_enable(hsdma->clk); | ||
856 | if (err) | ||
857 | return err; | ||
858 | |||
859 | mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); | ||
860 | mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT); | ||
861 | |||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma) | ||
866 | { | ||
867 | mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0); | ||
868 | |||
869 | clk_disable_unprepare(hsdma->clk); | ||
870 | |||
871 | pm_runtime_put_sync(hsdma2dev(hsdma)); | ||
872 | pm_runtime_disable(hsdma2dev(hsdma)); | ||
873 | |||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | static const struct mtk_hsdma_soc mt7623_soc = { | ||
878 | .ddone = BIT(31), | ||
879 | .ls0 = BIT(30), | ||
880 | }; | ||
881 | |||
882 | static const struct mtk_hsdma_soc mt7622_soc = { | ||
883 | .ddone = BIT(15), | ||
884 | .ls0 = BIT(14), | ||
885 | }; | ||
886 | |||
887 | static const struct of_device_id mtk_hsdma_match[] = { | ||
888 | { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc}, | ||
889 | { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc}, | ||
890 | { /* sentinel */ } | ||
891 | }; | ||
892 | MODULE_DEVICE_TABLE(of, mtk_hsdma_match); | ||
893 | |||
894 | static int mtk_hsdma_probe(struct platform_device *pdev) | ||
895 | { | ||
896 | struct mtk_hsdma_device *hsdma; | ||
897 | struct mtk_hsdma_vchan *vc; | ||
898 | struct dma_device *dd; | ||
899 | struct resource *res; | ||
900 | int i, err; | ||
901 | |||
902 | hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); | ||
903 | if (!hsdma) | ||
904 | return -ENOMEM; | ||
905 | |||
906 | dd = &hsdma->ddev; | ||
907 | |||
908 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
909 | hsdma->base = devm_ioremap_resource(&pdev->dev, res); | ||
910 | if (IS_ERR(hsdma->base)) | ||
911 | return PTR_ERR(hsdma->base); | ||
912 | |||
913 | hsdma->soc = of_device_get_match_data(&pdev->dev); | ||
914 | if (!hsdma->soc) { | ||
915 | dev_err(&pdev->dev, "No device match found\n"); | ||
916 | return -ENODEV; | ||
917 | } | ||
918 | |||
919 | hsdma->clk = devm_clk_get(&pdev->dev, "hsdma"); | ||
920 | if (IS_ERR(hsdma->clk)) { | ||
921 | dev_err(&pdev->dev, "No clock for %s\n", | ||
922 | dev_name(&pdev->dev)); | ||
923 | return PTR_ERR(hsdma->clk); | ||
924 | } | ||
925 | |||
926 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
927 | if (!res) { | ||
928 | dev_err(&pdev->dev, "No irq resource for %s\n", | ||
929 | dev_name(&pdev->dev)); | ||
930 | return -EINVAL; | ||
931 | } | ||
932 | hsdma->irq = res->start; | ||
933 | |||
934 | refcount_set(&hsdma->pc_refcnt, 0); | ||
935 | spin_lock_init(&hsdma->lock); | ||
936 | |||
937 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | ||
938 | |||
939 | dd->copy_align = MTK_HSDMA_ALIGN_SIZE; | ||
940 | dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources; | ||
941 | dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; | ||
942 | dd->device_tx_status = mtk_hsdma_tx_status; | ||
943 | dd->device_issue_pending = mtk_hsdma_issue_pending; | ||
944 | dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; | ||
945 | dd->device_terminate_all = mtk_hsdma_terminate_all; | ||
946 | dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; | ||
947 | dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; | ||
948 | dd->directions = BIT(DMA_MEM_TO_MEM); | ||
949 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
950 | dd->dev = &pdev->dev; | ||
951 | INIT_LIST_HEAD(&dd->channels); | ||
952 | |||
953 | hsdma->dma_requests = MTK_HSDMA_NR_VCHANS; | ||
954 | if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, | ||
955 | "dma-requests", | ||
956 | &hsdma->dma_requests)) { | ||
957 | dev_info(&pdev->dev, | ||
958 | "Using %u as missing dma-requests property\n", | ||
959 | MTK_HSDMA_NR_VCHANS); | ||
960 | } | ||
961 | |||
962 | hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS, | ||
963 | sizeof(*hsdma->pc), GFP_KERNEL); | ||
964 | if (!hsdma->pc) | ||
965 | return -ENOMEM; | ||
966 | |||
967 | hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests, | ||
968 | sizeof(*hsdma->vc), GFP_KERNEL); | ||
969 | if (!hsdma->vc) | ||
970 | return -ENOMEM; | ||
971 | |||
972 | for (i = 0; i < hsdma->dma_requests; i++) { | ||
973 | vc = &hsdma->vc[i]; | ||
974 | vc->vc.desc_free = mtk_hsdma_vdesc_free; | ||
975 | vchan_init(&vc->vc, dd); | ||
976 | init_completion(&vc->issue_completion); | ||
977 | INIT_LIST_HEAD(&vc->desc_hw_processing); | ||
978 | } | ||
979 | |||
980 | err = dma_async_device_register(dd); | ||
981 | if (err) | ||
982 | return err; | ||
983 | |||
984 | err = of_dma_controller_register(pdev->dev.of_node, | ||
985 | of_dma_xlate_by_chan_id, hsdma); | ||
986 | if (err) { | ||
987 | dev_err(&pdev->dev, | ||
988 | "MediaTek HSDMA OF registration failed %d\n", err); | ||
989 | goto err_unregister; | ||
990 | } | ||
991 | |||
992 | mtk_hsdma_hw_init(hsdma); | ||
993 | |||
994 | err = devm_request_irq(&pdev->dev, hsdma->irq, | ||
995 | mtk_hsdma_irq, 0, | ||
996 | dev_name(&pdev->dev), hsdma); | ||
997 | if (err) { | ||
998 | dev_err(&pdev->dev, | ||
999 | "request_irq failed with err %d\n", err); | ||
1000 | goto err_unregister; | ||
1001 | } | ||
1002 | |||
1003 | platform_set_drvdata(pdev, hsdma); | ||
1004 | |||
1005 | dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n"); | ||
1006 | |||
1007 | return 0; | ||
1008 | |||
1009 | err_unregister: | ||
1010 | dma_async_device_unregister(dd); | ||
1011 | |||
1012 | return err; | ||
1013 | } | ||
1014 | |||
1015 | static int mtk_hsdma_remove(struct platform_device *pdev) | ||
1016 | { | ||
1017 | struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev); | ||
1018 | struct mtk_hsdma_vchan *vc; | ||
1019 | int i; | ||
1020 | |||
1021 | /* Kill VC task */ | ||
1022 | for (i = 0; i < hsdma->dma_requests; i++) { | ||
1023 | vc = &hsdma->vc[i]; | ||
1024 | |||
1025 | list_del(&vc->vc.chan.device_node); | ||
1026 | tasklet_kill(&vc->vc.task); | ||
1027 | } | ||
1028 | |||
1029 | /* Disable DMA interrupt */ | ||
1030 | mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); | ||
1031 | |||
1032 | /* Waits for any pending IRQ handlers to complete */ | ||
1033 | synchronize_irq(hsdma->irq); | ||
1034 | |||
1035 | /* Disable hardware */ | ||
1036 | mtk_hsdma_hw_deinit(hsdma); | ||
1037 | |||
1038 | dma_async_device_unregister(&hsdma->ddev); | ||
1039 | of_dma_controller_free(pdev->dev.of_node); | ||
1040 | |||
1041 | return 0; | ||
1042 | } | ||
1043 | |||
1044 | static struct platform_driver mtk_hsdma_driver = { | ||
1045 | .probe = mtk_hsdma_probe, | ||
1046 | .remove = mtk_hsdma_remove, | ||
1047 | .driver = { | ||
1048 | .name = KBUILD_MODNAME, | ||
1049 | .of_match_table = mtk_hsdma_match, | ||
1050 | }, | ||
1051 | }; | ||
1052 | module_platform_driver(mtk_hsdma_driver); | ||
1053 | |||
1054 | MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver"); | ||
1055 | MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); | ||
1056 | MODULE_LICENSE("GPL v2"); | ||