diff options
author | Geert Uytterhoeven <geert+renesas@glider.be> | 2015-12-04 10:56:29 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-12-09 23:13:33 -0500 |
commit | 4d42e95fc789393d267bbab8b4684936c1529378 (patch) | |
tree | 06e0a7e1899d91053c1081cad2f304bb7975c52c | |
parent | 0b2eed49875ec3605b7a71bdf05adc8c1cbd49fc (diff) |
dmaengine: sh: Remove unused R-Car HPB-DMAC driver
As of commit 4baadb9e05c68962 ("ARM: shmobile: r8a7778: remove obsolete
setup code"), the Renesas R-Car HPB-DMAC driver is no longer used.
In theory it could still be used on R-Car Gen1 SoCs, but that requires
adding DT support to the driver, which is not planned.
Remove the driver, it can be resurrected from git history when needed.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Acked-by: Simon Horman <horms+renesas@verge.net.au>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/sh/Kconfig | 6 | ||||
-rw-r--r-- | drivers/dma/sh/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-hpbdma.c | 669 | ||||
-rw-r--r-- | include/linux/platform_data/dma-rcar-hpbdma.h | 103 |
4 files changed, 0 insertions, 779 deletions
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 9fda65af841e..f32c430eb16c 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -47,12 +47,6 @@ config RCAR_DMAC | |||
47 | This driver supports the general purpose DMA controller found in the | 47 | This driver supports the general purpose DMA controller found in the |
48 | Renesas R-Car second generation SoCs. | 48 | Renesas R-Car second generation SoCs. |
49 | 49 | ||
50 | config RCAR_HPB_DMAE | ||
51 | tristate "Renesas R-Car HPB DMAC support" | ||
52 | depends on SH_DMAE_BASE | ||
53 | help | ||
54 | Enable support for the Renesas R-Car series DMA controllers. | ||
55 | |||
56 | config RENESAS_USB_DMAC | 50 | config RENESAS_USB_DMAC |
57 | tristate "Renesas USB-DMA Controller" | 51 | tristate "Renesas USB-DMA Controller" |
58 | depends on ARCH_SHMOBILE || COMPILE_TEST | 52 | depends on ARCH_SHMOBILE || COMPILE_TEST |
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 0133e4658196..f1e2fd64f279 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -14,6 +14,5 @@ shdma-objs := $(shdma-y) | |||
14 | obj-$(CONFIG_SH_DMAE) += shdma.o | 14 | obj-$(CONFIG_SH_DMAE) += shdma.o |
15 | 15 | ||
16 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o | 16 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o |
17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | ||
18 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o | 17 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o |
19 | obj-$(CONFIG_SUDMAC) += sudmac.o | 18 | obj-$(CONFIG_SUDMAC) += sudmac.o |
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c deleted file mode 100644 index 749f26ecd3b3..000000000000 --- a/drivers/dma/sh/rcar-hpbdma.c +++ /dev/null | |||
@@ -1,669 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2013 Renesas Electronics Corporation | ||
3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
4 | * | ||
5 | * This file is based on the drivers/dma/sh/shdma.c | ||
6 | * | ||
7 | * Renesas SuperH DMA Engine support | ||
8 | * | ||
9 | * This is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
15 | * - max DMA size is 16MB. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/dmaengine.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_data/dma-rcar-hpbdma.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/pm_runtime.h> | ||
28 | #include <linux/shdma-base.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | /* DMA channel registers */ | ||
32 | #define HPB_DMAE_DSAR0 0x00 | ||
33 | #define HPB_DMAE_DDAR0 0x04 | ||
34 | #define HPB_DMAE_DTCR0 0x08 | ||
35 | #define HPB_DMAE_DSAR1 0x0C | ||
36 | #define HPB_DMAE_DDAR1 0x10 | ||
37 | #define HPB_DMAE_DTCR1 0x14 | ||
38 | #define HPB_DMAE_DSASR 0x18 | ||
39 | #define HPB_DMAE_DDASR 0x1C | ||
40 | #define HPB_DMAE_DTCSR 0x20 | ||
41 | #define HPB_DMAE_DPTR 0x24 | ||
42 | #define HPB_DMAE_DCR 0x28 | ||
43 | #define HPB_DMAE_DCMDR 0x2C | ||
44 | #define HPB_DMAE_DSTPR 0x30 | ||
45 | #define HPB_DMAE_DSTSR 0x34 | ||
46 | #define HPB_DMAE_DDBGR 0x38 | ||
47 | #define HPB_DMAE_DDBGR2 0x3C | ||
48 | #define HPB_DMAE_CHAN(n) (0x40 * (n)) | ||
49 | |||
50 | /* DMA command register (DCMDR) bits */ | ||
51 | #define HPB_DMAE_DCMDR_BDOUT BIT(7) | ||
52 | #define HPB_DMAE_DCMDR_DQSPD BIT(6) | ||
53 | #define HPB_DMAE_DCMDR_DQSPC BIT(5) | ||
54 | #define HPB_DMAE_DCMDR_DMSPD BIT(4) | ||
55 | #define HPB_DMAE_DCMDR_DMSPC BIT(3) | ||
56 | #define HPB_DMAE_DCMDR_DQEND BIT(2) | ||
57 | #define HPB_DMAE_DCMDR_DNXT BIT(1) | ||
58 | #define HPB_DMAE_DCMDR_DMEN BIT(0) | ||
59 | |||
60 | /* DMA forced stop register (DSTPR) bits */ | ||
61 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) | ||
62 | |||
63 | /* DMA status register (DSTSR) bits */ | ||
64 | #define HPB_DMAE_DSTSR_DQSTS BIT(2) | ||
65 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) | ||
66 | |||
67 | /* DMA common registers */ | ||
68 | #define HPB_DMAE_DTIMR 0x00 | ||
69 | #define HPB_DMAE_DINTSR0 0x0C | ||
70 | #define HPB_DMAE_DINTSR1 0x10 | ||
71 | #define HPB_DMAE_DINTCR0 0x14 | ||
72 | #define HPB_DMAE_DINTCR1 0x18 | ||
73 | #define HPB_DMAE_DINTMR0 0x1C | ||
74 | #define HPB_DMAE_DINTMR1 0x20 | ||
75 | #define HPB_DMAE_DACTSR0 0x24 | ||
76 | #define HPB_DMAE_DACTSR1 0x28 | ||
77 | #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4) | ||
78 | #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4) | ||
79 | #define HPB_DMAE_HPB_DMLVLR0 0x160 | ||
80 | #define HPB_DMAE_HPB_DMLVLR1 0x164 | ||
81 | #define HPB_DMAE_HPB_DMSHPT0 0x168 | ||
82 | #define HPB_DMAE_HPB_DMSHPT1 0x16C | ||
83 | |||
84 | #define HPB_DMA_SLAVE_NUMBER 256 | ||
85 | #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */ | ||
86 | |||
87 | struct hpb_dmae_chan { | ||
88 | struct shdma_chan shdma_chan; | ||
89 | int xfer_mode; /* DMA transfer mode */ | ||
90 | #define XFER_SINGLE 1 | ||
91 | #define XFER_DOUBLE 2 | ||
92 | unsigned plane_idx; /* current DMA information set */ | ||
93 | bool first_desc; /* first/next transfer */ | ||
94 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
95 | void __iomem *base; | ||
96 | const struct hpb_dmae_slave_config *cfg; | ||
97 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
98 | dma_addr_t slave_addr; | ||
99 | }; | ||
100 | |||
101 | struct hpb_dmae_device { | ||
102 | struct shdma_dev shdma_dev; | ||
103 | spinlock_t reg_lock; /* comm_reg operation lock */ | ||
104 | struct hpb_dmae_pdata *pdata; | ||
105 | void __iomem *chan_reg; | ||
106 | void __iomem *comm_reg; | ||
107 | void __iomem *reset_reg; | ||
108 | void __iomem *mode_reg; | ||
109 | }; | ||
110 | |||
111 | struct hpb_dmae_regs { | ||
112 | u32 sar; /* SAR / source address */ | ||
113 | u32 dar; /* DAR / destination address */ | ||
114 | u32 tcr; /* TCR / transfer count */ | ||
115 | }; | ||
116 | |||
117 | struct hpb_desc { | ||
118 | struct shdma_desc shdma_desc; | ||
119 | struct hpb_dmae_regs hw; | ||
120 | unsigned plane_idx; | ||
121 | }; | ||
122 | |||
123 | #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan) | ||
124 | #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc) | ||
125 | #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \ | ||
126 | struct hpb_dmae_device, shdma_dev.dma_dev) | ||
127 | |||
128 | static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg) | ||
129 | { | ||
130 | iowrite32(data, hpb_dc->base + reg); | ||
131 | } | ||
132 | |||
133 | static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg) | ||
134 | { | ||
135 | return ioread32(hpb_dc->base + reg); | ||
136 | } | ||
137 | |||
138 | static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
139 | { | ||
140 | iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR); | ||
141 | } | ||
142 | |||
143 | static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
144 | { | ||
145 | iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch)); | ||
146 | } | ||
147 | |||
148 | static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch) | ||
149 | { | ||
150 | u32 v; | ||
151 | |||
152 | if (ch < 32) | ||
153 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch; | ||
154 | else | ||
155 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32); | ||
156 | return v & 0x1; | ||
157 | } | ||
158 | |||
159 | static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
160 | { | ||
161 | if (ch < 32) | ||
162 | iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0); | ||
163 | else | ||
164 | iowrite32((0x1 << (ch - 32)), | ||
165 | hpbdev->comm_reg + HPB_DMAE_DINTCR1); | ||
166 | } | ||
167 | |||
168 | static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
169 | { | ||
170 | iowrite32(data, hpbdev->mode_reg); | ||
171 | } | ||
172 | |||
173 | static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev) | ||
174 | { | ||
175 | return ioread32(hpbdev->mode_reg); | ||
176 | } | ||
177 | |||
178 | static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch) | ||
179 | { | ||
180 | u32 intreg; | ||
181 | |||
182 | spin_lock_irq(&hpbdev->reg_lock); | ||
183 | if (ch < 32) { | ||
184 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
185 | iowrite32(BIT(ch) | intreg, | ||
186 | hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
187 | } else { | ||
188 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
189 | iowrite32(BIT(ch - 32) | intreg, | ||
190 | hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
191 | } | ||
192 | spin_unlock_irq(&hpbdev->reg_lock); | ||
193 | } | ||
194 | |||
195 | static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data) | ||
196 | { | ||
197 | u32 rstr; | ||
198 | int timeout = 10000; /* 100 ms */ | ||
199 | |||
200 | spin_lock(&hpbdev->reg_lock); | ||
201 | rstr = ioread32(hpbdev->reset_reg); | ||
202 | rstr |= data; | ||
203 | iowrite32(rstr, hpbdev->reset_reg); | ||
204 | do { | ||
205 | rstr = ioread32(hpbdev->reset_reg); | ||
206 | if ((rstr & data) == data) | ||
207 | break; | ||
208 | udelay(10); | ||
209 | } while (timeout--); | ||
210 | |||
211 | if (timeout < 0) | ||
212 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
213 | "%s timeout\n", __func__); | ||
214 | |||
215 | rstr &= ~data; | ||
216 | iowrite32(rstr, hpbdev->reset_reg); | ||
217 | spin_unlock(&hpbdev->reg_lock); | ||
218 | } | ||
219 | |||
220 | static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev, | ||
221 | u32 mask, u32 data) | ||
222 | { | ||
223 | u32 mode; | ||
224 | |||
225 | spin_lock_irq(&hpbdev->reg_lock); | ||
226 | mode = asyncmdr_read(hpbdev); | ||
227 | mode &= ~mask; | ||
228 | mode |= data; | ||
229 | asyncmdr_write(hpbdev, mode); | ||
230 | spin_unlock_irq(&hpbdev->reg_lock); | ||
231 | } | ||
232 | |||
233 | static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev) | ||
234 | { | ||
235 | dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD); | ||
236 | } | ||
237 | |||
238 | static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev) | ||
239 | { | ||
240 | u32 ch; | ||
241 | |||
242 | for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++) | ||
243 | hsrstr_write(hpbdev, ch); | ||
244 | } | ||
245 | |||
246 | static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan) | ||
247 | { | ||
248 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
249 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
250 | int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR); | ||
251 | int i; | ||
252 | |||
253 | switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) { | ||
254 | case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT: | ||
255 | default: | ||
256 | i = XMIT_SZ_8BIT; | ||
257 | break; | ||
258 | case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT: | ||
259 | i = XMIT_SZ_16BIT; | ||
260 | break; | ||
261 | case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT: | ||
262 | i = XMIT_SZ_32BIT; | ||
263 | break; | ||
264 | } | ||
265 | return pdata->ts_shift[i]; | ||
266 | } | ||
267 | |||
268 | static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan, | ||
269 | struct hpb_dmae_regs *hw, unsigned plane) | ||
270 | { | ||
271 | ch_reg_write(hpb_chan, hw->sar, | ||
272 | plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0); | ||
273 | ch_reg_write(hpb_chan, hw->dar, | ||
274 | plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0); | ||
275 | ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift, | ||
276 | plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
277 | } | ||
278 | |||
279 | static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next) | ||
280 | { | ||
281 | ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) | | ||
282 | HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR); | ||
283 | } | ||
284 | |||
285 | static void hpb_dmae_halt(struct shdma_chan *schan) | ||
286 | { | ||
287 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
288 | |||
289 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); | ||
290 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); | ||
291 | |||
292 | chan->plane_idx = 0; | ||
293 | chan->first_desc = true; | ||
294 | } | ||
295 | |||
296 | static const struct hpb_dmae_slave_config * | ||
297 | hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id) | ||
298 | { | ||
299 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
300 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
301 | int i; | ||
302 | |||
303 | if (slave_id >= HPB_DMA_SLAVE_NUMBER) | ||
304 | return NULL; | ||
305 | |||
306 | for (i = 0; i < pdata->num_slaves; i++) | ||
307 | if (pdata->slaves[i].id == slave_id) | ||
308 | return pdata->slaves + i; | ||
309 | |||
310 | return NULL; | ||
311 | } | ||
312 | |||
313 | static void hpb_dmae_start_xfer(struct shdma_chan *schan, | ||
314 | struct shdma_desc *sdesc) | ||
315 | { | ||
316 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
317 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
318 | struct hpb_desc *desc = to_desc(sdesc); | ||
319 | |||
320 | if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET) | ||
321 | hpb_dmae_async_reset(hpbdev, chan->cfg->rstr); | ||
322 | |||
323 | desc->plane_idx = chan->plane_idx; | ||
324 | hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx); | ||
325 | hpb_dmae_start(chan, !chan->first_desc); | ||
326 | |||
327 | if (chan->xfer_mode == XFER_DOUBLE) { | ||
328 | chan->plane_idx ^= 1; | ||
329 | chan->first_desc = false; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | static bool hpb_dmae_desc_completed(struct shdma_chan *schan, | ||
334 | struct shdma_desc *sdesc) | ||
335 | { | ||
336 | /* | ||
337 | * This is correct since we always have at most single | ||
338 | * outstanding DMA transfer per channel, and by the time | ||
339 | * we get completion interrupt the transfer is completed. | ||
340 | * This will change if we ever use alternating DMA | ||
341 | * information sets and submit two descriptors at once. | ||
342 | */ | ||
343 | return true; | ||
344 | } | ||
345 | |||
346 | static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
347 | { | ||
348 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
349 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
350 | int ch = chan->cfg->dma_ch; | ||
351 | |||
352 | /* Check Complete DMA Transfer */ | ||
353 | if (dintsr_read(hpbdev, ch)) { | ||
354 | /* Clear Interrupt status */ | ||
355 | dintcr_write(hpbdev, ch); | ||
356 | return true; | ||
357 | } | ||
358 | return false; | ||
359 | } | ||
360 | |||
361 | static int hpb_dmae_desc_setup(struct shdma_chan *schan, | ||
362 | struct shdma_desc *sdesc, | ||
363 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
364 | { | ||
365 | struct hpb_desc *desc = to_desc(sdesc); | ||
366 | |||
367 | if (*len > (size_t)HPB_DMA_TCR_MAX) | ||
368 | *len = (size_t)HPB_DMA_TCR_MAX; | ||
369 | |||
370 | desc->hw.sar = src; | ||
371 | desc->hw.dar = dst; | ||
372 | desc->hw.tcr = *len; | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | static size_t hpb_dmae_get_partial(struct shdma_chan *schan, | ||
378 | struct shdma_desc *sdesc) | ||
379 | { | ||
380 | struct hpb_desc *desc = to_desc(sdesc); | ||
381 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
382 | u32 tcr = ch_reg_read(chan, desc->plane_idx ? | ||
383 | HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
384 | |||
385 | return (desc->hw.tcr - tcr) << chan->xmit_shift; | ||
386 | } | ||
387 | |||
388 | static bool hpb_dmae_channel_busy(struct shdma_chan *schan) | ||
389 | { | ||
390 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
391 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); | ||
392 | |||
393 | if (chan->xfer_mode == XFER_DOUBLE) | ||
394 | return dstsr & HPB_DMAE_DSTSR_DQSTS; | ||
395 | else | ||
396 | return dstsr & HPB_DMAE_DSTSR_DMSTS; | ||
397 | } | ||
398 | |||
399 | static int | ||
400 | hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, | ||
401 | const struct hpb_dmae_slave_config *cfg) | ||
402 | { | ||
403 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
404 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
405 | const struct hpb_dmae_channel *channel = pdata->channels; | ||
406 | int slave_id = cfg->id; | ||
407 | int i, err; | ||
408 | |||
409 | for (i = 0; i < pdata->num_channels; i++, channel++) { | ||
410 | if (channel->s_id == slave_id) { | ||
411 | struct device *dev = hpb_chan->shdma_chan.dev; | ||
412 | |||
413 | hpb_chan->base = hpbdev->chan_reg + | ||
414 | HPB_DMAE_CHAN(cfg->dma_ch); | ||
415 | |||
416 | dev_dbg(dev, "Detected Slave device\n"); | ||
417 | dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id); | ||
418 | dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch); | ||
419 | dev_dbg(dev, " -- channel->ch_irq: %d\n", | ||
420 | channel->ch_irq); | ||
421 | break; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq, | ||
426 | IRQF_SHARED, hpb_chan->dev_id); | ||
427 | if (err) { | ||
428 | dev_err(hpb_chan->shdma_chan.dev, | ||
429 | "DMA channel request_irq %d failed with error %d\n", | ||
430 | channel->ch_irq, err); | ||
431 | return err; | ||
432 | } | ||
433 | |||
434 | hpb_chan->plane_idx = 0; | ||
435 | hpb_chan->first_desc = true; | ||
436 | |||
437 | if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) { | ||
438 | hpb_chan->xfer_mode = XFER_SINGLE; | ||
439 | } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == | ||
440 | (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) { | ||
441 | hpb_chan->xfer_mode = XFER_DOUBLE; | ||
442 | } else { | ||
443 | dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); | ||
444 | return -EINVAL; | ||
445 | } | ||
446 | |||
447 | if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE) | ||
448 | hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr); | ||
449 | ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR); | ||
450 | ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR); | ||
451 | hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan); | ||
452 | hpb_dmae_enable_int(hpbdev, cfg->dma_ch); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, | ||
458 | dma_addr_t slave_addr, bool try) | ||
459 | { | ||
460 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
461 | const struct hpb_dmae_slave_config *sc = | ||
462 | hpb_dmae_find_slave(chan, slave_id); | ||
463 | |||
464 | if (!sc) | ||
465 | return -ENODEV; | ||
466 | if (try) | ||
467 | return 0; | ||
468 | chan->cfg = sc; | ||
469 | chan->slave_addr = slave_addr ? : sc->addr; | ||
470 | return hpb_dmae_alloc_chan_resources(chan, sc); | ||
471 | } | ||
472 | |||
473 | static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) | ||
474 | { | ||
475 | } | ||
476 | |||
477 | static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan) | ||
478 | { | ||
479 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
480 | |||
481 | return chan->slave_addr; | ||
482 | } | ||
483 | |||
484 | static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) | ||
485 | { | ||
486 | return &((struct hpb_desc *)buf)[i].shdma_desc; | ||
487 | } | ||
488 | |||
489 | static const struct shdma_ops hpb_dmae_ops = { | ||
490 | .desc_completed = hpb_dmae_desc_completed, | ||
491 | .halt_channel = hpb_dmae_halt, | ||
492 | .channel_busy = hpb_dmae_channel_busy, | ||
493 | .slave_addr = hpb_dmae_slave_addr, | ||
494 | .desc_setup = hpb_dmae_desc_setup, | ||
495 | .set_slave = hpb_dmae_set_slave, | ||
496 | .setup_xfer = hpb_dmae_setup_xfer, | ||
497 | .start_xfer = hpb_dmae_start_xfer, | ||
498 | .embedded_desc = hpb_dmae_embedded_desc, | ||
499 | .chan_irq = hpb_dmae_chan_irq, | ||
500 | .get_partial = hpb_dmae_get_partial, | ||
501 | }; | ||
502 | |||
503 | static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | ||
504 | { | ||
505 | struct shdma_dev *sdev = &hpbdev->shdma_dev; | ||
506 | struct platform_device *pdev = | ||
507 | to_platform_device(hpbdev->shdma_dev.dma_dev.dev); | ||
508 | struct hpb_dmae_chan *new_hpb_chan; | ||
509 | struct shdma_chan *schan; | ||
510 | |||
511 | /* Alloc channel */ | ||
512 | new_hpb_chan = devm_kzalloc(&pdev->dev, | ||
513 | sizeof(struct hpb_dmae_chan), GFP_KERNEL); | ||
514 | if (!new_hpb_chan) { | ||
515 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
516 | "No free memory for allocating DMA channels!\n"); | ||
517 | return -ENOMEM; | ||
518 | } | ||
519 | |||
520 | schan = &new_hpb_chan->shdma_chan; | ||
521 | schan->max_xfer_len = HPB_DMA_TCR_MAX; | ||
522 | |||
523 | shdma_chan_probe(sdev, schan, id); | ||
524 | |||
525 | if (pdev->id >= 0) | ||
526 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
527 | "hpb-dmae%d.%d", pdev->id, id); | ||
528 | else | ||
529 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
530 | "hpb-dma.%d", id); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static int hpb_dmae_probe(struct platform_device *pdev) | ||
536 | { | ||
537 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | | ||
538 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
539 | struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; | ||
540 | struct hpb_dmae_device *hpbdev; | ||
541 | struct dma_device *dma_dev; | ||
542 | struct resource *chan, *comm, *rest, *mode, *irq_res; | ||
543 | int err, i; | ||
544 | |||
545 | /* Get platform data */ | ||
546 | if (!pdata || !pdata->num_channels) | ||
547 | return -ENODEV; | ||
548 | |||
549 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
550 | comm = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
551 | rest = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
552 | mode = platform_get_resource(pdev, IORESOURCE_MEM, 3); | ||
553 | |||
554 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
555 | if (!irq_res) | ||
556 | return -ENODEV; | ||
557 | |||
558 | hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device), | ||
559 | GFP_KERNEL); | ||
560 | if (!hpbdev) { | ||
561 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
562 | return -ENOMEM; | ||
563 | } | ||
564 | |||
565 | hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); | ||
566 | if (IS_ERR(hpbdev->chan_reg)) | ||
567 | return PTR_ERR(hpbdev->chan_reg); | ||
568 | |||
569 | hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm); | ||
570 | if (IS_ERR(hpbdev->comm_reg)) | ||
571 | return PTR_ERR(hpbdev->comm_reg); | ||
572 | |||
573 | hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest); | ||
574 | if (IS_ERR(hpbdev->reset_reg)) | ||
575 | return PTR_ERR(hpbdev->reset_reg); | ||
576 | |||
577 | hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode); | ||
578 | if (IS_ERR(hpbdev->mode_reg)) | ||
579 | return PTR_ERR(hpbdev->mode_reg); | ||
580 | |||
581 | dma_dev = &hpbdev->shdma_dev.dma_dev; | ||
582 | |||
583 | spin_lock_init(&hpbdev->reg_lock); | ||
584 | |||
585 | /* Platform data */ | ||
586 | hpbdev->pdata = pdata; | ||
587 | |||
588 | pm_runtime_enable(&pdev->dev); | ||
589 | err = pm_runtime_get_sync(&pdev->dev); | ||
590 | if (err < 0) | ||
591 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
592 | |||
593 | /* Reset DMA controller */ | ||
594 | hpb_dmae_reset(hpbdev); | ||
595 | |||
596 | pm_runtime_put(&pdev->dev); | ||
597 | |||
598 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
599 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
600 | dma_dev->src_addr_widths = widths; | ||
601 | dma_dev->dst_addr_widths = widths; | ||
602 | dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
603 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
604 | |||
605 | hpbdev->shdma_dev.ops = &hpb_dmae_ops; | ||
606 | hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); | ||
607 | err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels); | ||
608 | if (err < 0) | ||
609 | goto error; | ||
610 | |||
611 | /* Create DMA channels */ | ||
612 | for (i = 0; i < pdata->num_channels; i++) | ||
613 | hpb_dmae_chan_probe(hpbdev, i); | ||
614 | |||
615 | platform_set_drvdata(pdev, hpbdev); | ||
616 | err = dma_async_device_register(dma_dev); | ||
617 | if (!err) | ||
618 | return 0; | ||
619 | |||
620 | shdma_cleanup(&hpbdev->shdma_dev); | ||
621 | error: | ||
622 | pm_runtime_disable(&pdev->dev); | ||
623 | return err; | ||
624 | } | ||
625 | |||
626 | static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) | ||
627 | { | ||
628 | struct shdma_chan *schan; | ||
629 | int i; | ||
630 | |||
631 | shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { | ||
632 | BUG_ON(!schan); | ||
633 | |||
634 | shdma_chan_remove(schan); | ||
635 | } | ||
636 | } | ||
637 | |||
638 | static int hpb_dmae_remove(struct platform_device *pdev) | ||
639 | { | ||
640 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
641 | |||
642 | dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev); | ||
643 | |||
644 | pm_runtime_disable(&pdev->dev); | ||
645 | |||
646 | hpb_dmae_chan_remove(hpbdev); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static void hpb_dmae_shutdown(struct platform_device *pdev) | ||
652 | { | ||
653 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
654 | hpb_dmae_ctl_stop(hpbdev); | ||
655 | } | ||
656 | |||
657 | static struct platform_driver hpb_dmae_driver = { | ||
658 | .probe = hpb_dmae_probe, | ||
659 | .remove = hpb_dmae_remove, | ||
660 | .shutdown = hpb_dmae_shutdown, | ||
661 | .driver = { | ||
662 | .name = "hpb-dma-engine", | ||
663 | }, | ||
664 | }; | ||
665 | module_platform_driver(hpb_dmae_driver); | ||
666 | |||
667 | MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>"); | ||
668 | MODULE_DESCRIPTION("Renesas HPB DMA Engine driver"); | ||
669 | MODULE_LICENSE("GPL"); | ||
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h deleted file mode 100644 index 648b8ea61a22..000000000000 --- a/include/linux/platform_data/dma-rcar-hpbdma.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2013 Renesas Electronics Corporation | ||
3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __DMA_RCAR_HPBDMA_H | ||
11 | #define __DMA_RCAR_HPBDMA_H | ||
12 | |||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/types.h> | ||
15 | |||
16 | /* Transmit sizes and respective register values */ | ||
17 | enum { | ||
18 | XMIT_SZ_8BIT = 0, | ||
19 | XMIT_SZ_16BIT = 1, | ||
20 | XMIT_SZ_32BIT = 2, | ||
21 | XMIT_SZ_MAX | ||
22 | }; | ||
23 | |||
24 | /* DMA control register (DCR) bits */ | ||
25 | #define HPB_DMAE_DCR_DTAMD (1u << 26) | ||
26 | #define HPB_DMAE_DCR_DTAC (1u << 25) | ||
27 | #define HPB_DMAE_DCR_DTAU (1u << 24) | ||
28 | #define HPB_DMAE_DCR_DTAU1 (1u << 23) | ||
29 | #define HPB_DMAE_DCR_SWMD (1u << 22) | ||
30 | #define HPB_DMAE_DCR_BTMD (1u << 21) | ||
31 | #define HPB_DMAE_DCR_PKMD (1u << 20) | ||
32 | #define HPB_DMAE_DCR_CT (1u << 18) | ||
33 | #define HPB_DMAE_DCR_ACMD (1u << 17) | ||
34 | #define HPB_DMAE_DCR_DIP (1u << 16) | ||
35 | #define HPB_DMAE_DCR_SMDL (1u << 13) | ||
36 | #define HPB_DMAE_DCR_SPDAM (1u << 12) | ||
37 | #define HPB_DMAE_DCR_SDRMD_MASK (3u << 10) | ||
38 | #define HPB_DMAE_DCR_SDRMD_MOD (0u << 10) | ||
39 | #define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10) | ||
40 | #define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10) | ||
41 | #define HPB_DMAE_DCR_SPDS_MASK (3u << 8) | ||
42 | #define HPB_DMAE_DCR_SPDS_8BIT (0u << 8) | ||
43 | #define HPB_DMAE_DCR_SPDS_16BIT (1u << 8) | ||
44 | #define HPB_DMAE_DCR_SPDS_32BIT (2u << 8) | ||
45 | #define HPB_DMAE_DCR_DMDL (1u << 5) | ||
46 | #define HPB_DMAE_DCR_DPDAM (1u << 4) | ||
47 | #define HPB_DMAE_DCR_DDRMD_MASK (3u << 2) | ||
48 | #define HPB_DMAE_DCR_DDRMD_MOD (0u << 2) | ||
49 | #define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2) | ||
50 | #define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2) | ||
51 | #define HPB_DMAE_DCR_DPDS_MASK (3u << 0) | ||
52 | #define HPB_DMAE_DCR_DPDS_8BIT (0u << 0) | ||
53 | #define HPB_DMAE_DCR_DPDS_16BIT (1u << 0) | ||
54 | #define HPB_DMAE_DCR_DPDS_32BIT (2u << 0) | ||
55 | |||
56 | /* Asynchronous reset register (ASYNCRSTR) bits */ | ||
57 | #define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10) | ||
58 | #define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9) | ||
59 | #define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8) | ||
60 | #define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7) | ||
61 | #define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6) | ||
62 | #define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5) | ||
63 | #define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4) | ||
64 | #define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3) | ||
65 | #define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2) | ||
66 | #define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1) | ||
67 | #define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0) | ||
68 | |||
69 | struct hpb_dmae_slave_config { | ||
70 | unsigned int id; | ||
71 | dma_addr_t addr; | ||
72 | u32 dcr; | ||
73 | u32 port; | ||
74 | u32 rstr; | ||
75 | u32 mdr; | ||
76 | u32 mdm; | ||
77 | u32 flags; | ||
78 | #define HPB_DMAE_SET_ASYNC_RESET BIT(0) | ||
79 | #define HPB_DMAE_SET_ASYNC_MODE BIT(1) | ||
80 | u32 dma_ch; | ||
81 | }; | ||
82 | |||
83 | #define HPB_DMAE_CHANNEL(_irq, _s_id) \ | ||
84 | { \ | ||
85 | .ch_irq = _irq, \ | ||
86 | .s_id = _s_id, \ | ||
87 | } | ||
88 | |||
89 | struct hpb_dmae_channel { | ||
90 | unsigned int ch_irq; | ||
91 | unsigned int s_id; | ||
92 | }; | ||
93 | |||
94 | struct hpb_dmae_pdata { | ||
95 | const struct hpb_dmae_slave_config *slaves; | ||
96 | int num_slaves; | ||
97 | const struct hpb_dmae_channel *channels; | ||
98 | int num_channels; | ||
99 | const unsigned int ts_shift[XMIT_SZ_MAX]; | ||
100 | int num_hw_channels; | ||
101 | }; | ||
102 | |||
103 | #endif | ||