diff options
author | Max Filippov <max.filippov@cogentembedded.com> | 2013-08-24 16:33:24 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2013-09-02 08:05:55 -0400 |
commit | c4f6c41ba790bbbfcebb4c47a709ac8ff1fe1af9 (patch) | |
tree | b64ceb621ac27ec244321c3459046bbdfdae2509 /drivers/dma | |
parent | a576b7fe5e6027d02fe9c1320422c7e2b892f4d5 (diff) |
dma: add driver for R-Car HPB-DMAC
Add support for HPB-DMAC found in Renesas R-Car SoCs, using 'shdma-base' DMA
driver framework.
Based on the original patch by Phil Edworthy <phil.edworthy@renesas.com>.
Signed-off-by: Max Filippov <max.filippov@cogentembedded.com>
[Sergei: removed useless #include, sorted #include's, fixed HPB_DMA_TCR_MAX,
fixed formats and removed line breaks in the dev_dbg() calls, rephrased and
added IRQ # to the shdma_request_irq() failure message, added MODULE_AUTHOR(),
removed '__init'/'__exit' annotations from the probe()/remove() methods, removed
'__initdata' annotation from 'hpb_dmae_driver', fixed guard macro name in the
header file, fixed #define ASYNCRSTR_ASRST20, added #define ASYNCRSTR_ASRST24,
added the necessary runtime PM calls to the probe() and remove() methods,
handled errors returned by dma_async_device_register(), beautified comments
and #define's.]
Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/sh/Kconfig | 6 | ||||
-rw-r--r-- | drivers/dma/sh/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-hpbdma.c | 655 |
3 files changed, 662 insertions, 0 deletions
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 5c1dee20c13e..e2b94d16f41f 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -22,3 +22,9 @@ config SUDMAC | |||
22 | depends on SH_DMAE_BASE | 22 | depends on SH_DMAE_BASE |
23 | help | 23 | help |
24 | Enable support for the Renesas SUDMAC controllers. | 24 | Enable support for the Renesas SUDMAC controllers. |
25 | |||
26 | config RCAR_HPB_DMAE | ||
27 | tristate "Renesas R-Car HPB DMAC support" | ||
28 | depends on SH_DMAE_BASE | ||
29 | help | ||
30 | Enable support for the Renesas R-Car series DMA controllers. | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index c962138dde96..ccf17cb5af10 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o | 1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o |
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | 2 | obj-$(CONFIG_SH_DMAE) += shdma.o |
3 | obj-$(CONFIG_SUDMAC) += sudmac.o | 3 | obj-$(CONFIG_SUDMAC) += sudmac.o |
4 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | ||
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c new file mode 100644 index 000000000000..45a520281ce1 --- /dev/null +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -0,0 +1,655 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2013 Renesas Electronics Corporation | ||
3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
4 | * | ||
5 | * This file is based on the drivers/dma/sh/shdma.c | ||
6 | * | ||
7 | * Renesas SuperH DMA Engine support | ||
8 | * | ||
9 | * This is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
15 | * - max DMA size is 16MB. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/dmaengine.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_data/dma-rcar-hpbdma.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/shdma-base.h> | ||
28 | #include <linux/slab.h> | ||
29 | |||
30 | /* DMA channel registers */ | ||
31 | #define HPB_DMAE_DSAR0 0x00 | ||
32 | #define HPB_DMAE_DDAR0 0x04 | ||
33 | #define HPB_DMAE_DTCR0 0x08 | ||
34 | #define HPB_DMAE_DSAR1 0x0C | ||
35 | #define HPB_DMAE_DDAR1 0x10 | ||
36 | #define HPB_DMAE_DTCR1 0x14 | ||
37 | #define HPB_DMAE_DSASR 0x18 | ||
38 | #define HPB_DMAE_DDASR 0x1C | ||
39 | #define HPB_DMAE_DTCSR 0x20 | ||
40 | #define HPB_DMAE_DPTR 0x24 | ||
41 | #define HPB_DMAE_DCR 0x28 | ||
42 | #define HPB_DMAE_DCMDR 0x2C | ||
43 | #define HPB_DMAE_DSTPR 0x30 | ||
44 | #define HPB_DMAE_DSTSR 0x34 | ||
45 | #define HPB_DMAE_DDBGR 0x38 | ||
46 | #define HPB_DMAE_DDBGR2 0x3C | ||
47 | #define HPB_DMAE_CHAN(n) (0x40 * (n)) | ||
48 | |||
49 | /* DMA command register (DCMDR) bits */ | ||
50 | #define HPB_DMAE_DCMDR_BDOUT BIT(7) | ||
51 | #define HPB_DMAE_DCMDR_DQSPD BIT(6) | ||
52 | #define HPB_DMAE_DCMDR_DQSPC BIT(5) | ||
53 | #define HPB_DMAE_DCMDR_DMSPD BIT(4) | ||
54 | #define HPB_DMAE_DCMDR_DMSPC BIT(3) | ||
55 | #define HPB_DMAE_DCMDR_DQEND BIT(2) | ||
56 | #define HPB_DMAE_DCMDR_DNXT BIT(1) | ||
57 | #define HPB_DMAE_DCMDR_DMEN BIT(0) | ||
58 | |||
59 | /* DMA forced stop register (DSTPR) bits */ | ||
60 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) | ||
61 | |||
62 | /* DMA status register (DSTSR) bits */ | ||
63 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) | ||
64 | |||
65 | /* DMA common registers */ | ||
66 | #define HPB_DMAE_DTIMR 0x00 | ||
67 | #define HPB_DMAE_DINTSR0 0x0C | ||
68 | #define HPB_DMAE_DINTSR1 0x10 | ||
69 | #define HPB_DMAE_DINTCR0 0x14 | ||
70 | #define HPB_DMAE_DINTCR1 0x18 | ||
71 | #define HPB_DMAE_DINTMR0 0x1C | ||
72 | #define HPB_DMAE_DINTMR1 0x20 | ||
73 | #define HPB_DMAE_DACTSR0 0x24 | ||
74 | #define HPB_DMAE_DACTSR1 0x28 | ||
75 | #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4) | ||
76 | #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4) | ||
77 | #define HPB_DMAE_HPB_DMLVLR0 0x160 | ||
78 | #define HPB_DMAE_HPB_DMLVLR1 0x164 | ||
79 | #define HPB_DMAE_HPB_DMSHPT0 0x168 | ||
80 | #define HPB_DMAE_HPB_DMSHPT1 0x16C | ||
81 | |||
82 | #define HPB_DMA_SLAVE_NUMBER 256 | ||
83 | #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */ | ||
84 | |||
85 | struct hpb_dmae_chan { | ||
86 | struct shdma_chan shdma_chan; | ||
87 | int xfer_mode; /* DMA transfer mode */ | ||
88 | #define XFER_SINGLE 1 | ||
89 | #define XFER_DOUBLE 2 | ||
90 | unsigned plane_idx; /* current DMA information set */ | ||
91 | bool first_desc; /* first/next transfer */ | ||
92 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
93 | void __iomem *base; | ||
94 | const struct hpb_dmae_slave_config *cfg; | ||
95 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
96 | }; | ||
97 | |||
98 | struct hpb_dmae_device { | ||
99 | struct shdma_dev shdma_dev; | ||
100 | spinlock_t reg_lock; /* comm_reg operation lock */ | ||
101 | struct hpb_dmae_pdata *pdata; | ||
102 | void __iomem *chan_reg; | ||
103 | void __iomem *comm_reg; | ||
104 | void __iomem *reset_reg; | ||
105 | void __iomem *mode_reg; | ||
106 | }; | ||
107 | |||
108 | struct hpb_dmae_regs { | ||
109 | u32 sar; /* SAR / source address */ | ||
110 | u32 dar; /* DAR / destination address */ | ||
111 | u32 tcr; /* TCR / transfer count */ | ||
112 | }; | ||
113 | |||
114 | struct hpb_desc { | ||
115 | struct shdma_desc shdma_desc; | ||
116 | struct hpb_dmae_regs hw; | ||
117 | unsigned plane_idx; | ||
118 | }; | ||
119 | |||
120 | #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan) | ||
121 | #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc) | ||
122 | #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \ | ||
123 | struct hpb_dmae_device, shdma_dev.dma_dev) | ||
124 | |||
125 | static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg) | ||
126 | { | ||
127 | iowrite32(data, hpb_dc->base + reg); | ||
128 | } | ||
129 | |||
130 | static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg) | ||
131 | { | ||
132 | return ioread32(hpb_dc->base + reg); | ||
133 | } | ||
134 | |||
135 | static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
136 | { | ||
137 | iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR); | ||
138 | } | ||
139 | |||
140 | static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
141 | { | ||
142 | iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch)); | ||
143 | } | ||
144 | |||
145 | static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch) | ||
146 | { | ||
147 | u32 v; | ||
148 | |||
149 | if (ch < 32) | ||
150 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch; | ||
151 | else | ||
152 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32); | ||
153 | return v & 0x1; | ||
154 | } | ||
155 | |||
156 | static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
157 | { | ||
158 | if (ch < 32) | ||
159 | iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0); | ||
160 | else | ||
161 | iowrite32((0x1 << (ch - 32)), | ||
162 | hpbdev->comm_reg + HPB_DMAE_DINTCR1); | ||
163 | } | ||
164 | |||
165 | static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
166 | { | ||
167 | iowrite32(data, hpbdev->mode_reg); | ||
168 | } | ||
169 | |||
170 | static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev) | ||
171 | { | ||
172 | return ioread32(hpbdev->mode_reg); | ||
173 | } | ||
174 | |||
175 | static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch) | ||
176 | { | ||
177 | u32 intreg; | ||
178 | |||
179 | spin_lock_irq(&hpbdev->reg_lock); | ||
180 | if (ch < 32) { | ||
181 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
182 | iowrite32(BIT(ch) | intreg, | ||
183 | hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
184 | } else { | ||
185 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
186 | iowrite32(BIT(ch - 32) | intreg, | ||
187 | hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
188 | } | ||
189 | spin_unlock_irq(&hpbdev->reg_lock); | ||
190 | } | ||
191 | |||
192 | static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data) | ||
193 | { | ||
194 | u32 rstr; | ||
195 | int timeout = 10000; /* 100 ms */ | ||
196 | |||
197 | spin_lock(&hpbdev->reg_lock); | ||
198 | rstr = ioread32(hpbdev->reset_reg); | ||
199 | rstr |= data; | ||
200 | iowrite32(rstr, hpbdev->reset_reg); | ||
201 | do { | ||
202 | rstr = ioread32(hpbdev->reset_reg); | ||
203 | if ((rstr & data) == data) | ||
204 | break; | ||
205 | udelay(10); | ||
206 | } while (timeout--); | ||
207 | |||
208 | if (timeout < 0) | ||
209 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
210 | "%s timeout\n", __func__); | ||
211 | |||
212 | rstr &= ~data; | ||
213 | iowrite32(rstr, hpbdev->reset_reg); | ||
214 | spin_unlock(&hpbdev->reg_lock); | ||
215 | } | ||
216 | |||
217 | static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev, | ||
218 | u32 mask, u32 data) | ||
219 | { | ||
220 | u32 mode; | ||
221 | |||
222 | spin_lock_irq(&hpbdev->reg_lock); | ||
223 | mode = asyncmdr_read(hpbdev); | ||
224 | mode &= ~mask; | ||
225 | mode |= data; | ||
226 | asyncmdr_write(hpbdev, mode); | ||
227 | spin_unlock_irq(&hpbdev->reg_lock); | ||
228 | } | ||
229 | |||
230 | static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev) | ||
231 | { | ||
232 | dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD); | ||
233 | } | ||
234 | |||
235 | static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev) | ||
236 | { | ||
237 | u32 ch; | ||
238 | |||
239 | for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++) | ||
240 | hsrstr_write(hpbdev, ch); | ||
241 | } | ||
242 | |||
243 | static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan) | ||
244 | { | ||
245 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
246 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
247 | int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR); | ||
248 | int i; | ||
249 | |||
250 | switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) { | ||
251 | case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT: | ||
252 | default: | ||
253 | i = XMIT_SZ_8BIT; | ||
254 | break; | ||
255 | case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT: | ||
256 | i = XMIT_SZ_16BIT; | ||
257 | break; | ||
258 | case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT: | ||
259 | i = XMIT_SZ_32BIT; | ||
260 | break; | ||
261 | } | ||
262 | return pdata->ts_shift[i]; | ||
263 | } | ||
264 | |||
265 | static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan, | ||
266 | struct hpb_dmae_regs *hw, unsigned plane) | ||
267 | { | ||
268 | ch_reg_write(hpb_chan, hw->sar, | ||
269 | plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0); | ||
270 | ch_reg_write(hpb_chan, hw->dar, | ||
271 | plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0); | ||
272 | ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift, | ||
273 | plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
274 | } | ||
275 | |||
276 | static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next) | ||
277 | { | ||
278 | ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) | | ||
279 | HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR); | ||
280 | } | ||
281 | |||
282 | static void hpb_dmae_halt(struct shdma_chan *schan) | ||
283 | { | ||
284 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
285 | |||
286 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); | ||
287 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); | ||
288 | } | ||
289 | |||
290 | static const struct hpb_dmae_slave_config * | ||
291 | hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id) | ||
292 | { | ||
293 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
294 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
295 | int i; | ||
296 | |||
297 | if (slave_id >= HPB_DMA_SLAVE_NUMBER) | ||
298 | return NULL; | ||
299 | |||
300 | for (i = 0; i < pdata->num_slaves; i++) | ||
301 | if (pdata->slaves[i].id == slave_id) | ||
302 | return pdata->slaves + i; | ||
303 | |||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | static void hpb_dmae_start_xfer(struct shdma_chan *schan, | ||
308 | struct shdma_desc *sdesc) | ||
309 | { | ||
310 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
311 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
312 | struct hpb_desc *desc = to_desc(sdesc); | ||
313 | |||
314 | if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET) | ||
315 | hpb_dmae_async_reset(hpbdev, chan->cfg->rstr); | ||
316 | |||
317 | desc->plane_idx = chan->plane_idx; | ||
318 | hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx); | ||
319 | hpb_dmae_start(chan, !chan->first_desc); | ||
320 | |||
321 | if (chan->xfer_mode == XFER_DOUBLE) { | ||
322 | chan->plane_idx ^= 1; | ||
323 | chan->first_desc = false; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | static bool hpb_dmae_desc_completed(struct shdma_chan *schan, | ||
328 | struct shdma_desc *sdesc) | ||
329 | { | ||
330 | /* | ||
331 | * This is correct since we always have at most single | ||
332 | * outstanding DMA transfer per channel, and by the time | ||
333 | * we get completion interrupt the transfer is completed. | ||
334 | * This will change if we ever use alternating DMA | ||
335 | * information sets and submit two descriptors at once. | ||
336 | */ | ||
337 | return true; | ||
338 | } | ||
339 | |||
340 | static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
341 | { | ||
342 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
343 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
344 | int ch = chan->cfg->dma_ch; | ||
345 | |||
346 | /* Check Complete DMA Transfer */ | ||
347 | if (dintsr_read(hpbdev, ch)) { | ||
348 | /* Clear Interrupt status */ | ||
349 | dintcr_write(hpbdev, ch); | ||
350 | return true; | ||
351 | } | ||
352 | return false; | ||
353 | } | ||
354 | |||
355 | static int hpb_dmae_desc_setup(struct shdma_chan *schan, | ||
356 | struct shdma_desc *sdesc, | ||
357 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
358 | { | ||
359 | struct hpb_desc *desc = to_desc(sdesc); | ||
360 | |||
361 | if (*len > (size_t)HPB_DMA_TCR_MAX) | ||
362 | *len = (size_t)HPB_DMA_TCR_MAX; | ||
363 | |||
364 | desc->hw.sar = src; | ||
365 | desc->hw.dar = dst; | ||
366 | desc->hw.tcr = *len; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static size_t hpb_dmae_get_partial(struct shdma_chan *schan, | ||
372 | struct shdma_desc *sdesc) | ||
373 | { | ||
374 | struct hpb_desc *desc = to_desc(sdesc); | ||
375 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
376 | u32 tcr = ch_reg_read(chan, desc->plane_idx ? | ||
377 | HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
378 | |||
379 | return (desc->hw.tcr - tcr) << chan->xmit_shift; | ||
380 | } | ||
381 | |||
382 | static bool hpb_dmae_channel_busy(struct shdma_chan *schan) | ||
383 | { | ||
384 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
385 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); | ||
386 | |||
387 | return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; | ||
388 | } | ||
389 | |||
390 | static int | ||
391 | hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, | ||
392 | const struct hpb_dmae_slave_config *cfg) | ||
393 | { | ||
394 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
395 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
396 | const struct hpb_dmae_channel *channel = pdata->channels; | ||
397 | int slave_id = cfg->id; | ||
398 | int i, err; | ||
399 | |||
400 | for (i = 0; i < pdata->num_channels; i++, channel++) { | ||
401 | if (channel->s_id == slave_id) { | ||
402 | struct device *dev = hpb_chan->shdma_chan.dev; | ||
403 | |||
404 | hpb_chan->base = hpbdev->chan_reg + | ||
405 | HPB_DMAE_CHAN(cfg->dma_ch); | ||
406 | |||
407 | dev_dbg(dev, "Detected Slave device\n"); | ||
408 | dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id); | ||
409 | dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch); | ||
410 | dev_dbg(dev, " -- channel->ch_irq: %d\n", | ||
411 | channel->ch_irq); | ||
412 | break; | ||
413 | } | ||
414 | } | ||
415 | |||
416 | err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq, | ||
417 | IRQF_SHARED, hpb_chan->dev_id); | ||
418 | if (err) { | ||
419 | dev_err(hpb_chan->shdma_chan.dev, | ||
420 | "DMA channel request_irq %d failed with error %d\n", | ||
421 | channel->ch_irq, err); | ||
422 | return err; | ||
423 | } | ||
424 | |||
425 | hpb_chan->plane_idx = 0; | ||
426 | hpb_chan->first_desc = true; | ||
427 | |||
428 | if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) { | ||
429 | hpb_chan->xfer_mode = XFER_SINGLE; | ||
430 | } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == | ||
431 | (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) { | ||
432 | hpb_chan->xfer_mode = XFER_DOUBLE; | ||
433 | } else { | ||
434 | dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); | ||
435 | shdma_free_irq(&hpb_chan->shdma_chan); | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | |||
439 | if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE) | ||
440 | hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr); | ||
441 | ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR); | ||
442 | ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR); | ||
443 | hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan); | ||
444 | hpb_dmae_enable_int(hpbdev, cfg->dma_ch); | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) | ||
450 | { | ||
451 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
452 | const struct hpb_dmae_slave_config *sc = | ||
453 | hpb_dmae_find_slave(chan, slave_id); | ||
454 | |||
455 | if (!sc) | ||
456 | return -ENODEV; | ||
457 | if (try) | ||
458 | return 0; | ||
459 | chan->cfg = sc; | ||
460 | return hpb_dmae_alloc_chan_resources(chan, sc); | ||
461 | } | ||
462 | |||
463 | static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) | ||
464 | { | ||
465 | } | ||
466 | |||
467 | static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan) | ||
468 | { | ||
469 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
470 | |||
471 | return chan->cfg->addr; | ||
472 | } | ||
473 | |||
474 | static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) | ||
475 | { | ||
476 | return &((struct hpb_desc *)buf)[i].shdma_desc; | ||
477 | } | ||
478 | |||
479 | static const struct shdma_ops hpb_dmae_ops = { | ||
480 | .desc_completed = hpb_dmae_desc_completed, | ||
481 | .halt_channel = hpb_dmae_halt, | ||
482 | .channel_busy = hpb_dmae_channel_busy, | ||
483 | .slave_addr = hpb_dmae_slave_addr, | ||
484 | .desc_setup = hpb_dmae_desc_setup, | ||
485 | .set_slave = hpb_dmae_set_slave, | ||
486 | .setup_xfer = hpb_dmae_setup_xfer, | ||
487 | .start_xfer = hpb_dmae_start_xfer, | ||
488 | .embedded_desc = hpb_dmae_embedded_desc, | ||
489 | .chan_irq = hpb_dmae_chan_irq, | ||
490 | .get_partial = hpb_dmae_get_partial, | ||
491 | }; | ||
492 | |||
493 | static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | ||
494 | { | ||
495 | struct shdma_dev *sdev = &hpbdev->shdma_dev; | ||
496 | struct platform_device *pdev = | ||
497 | to_platform_device(hpbdev->shdma_dev.dma_dev.dev); | ||
498 | struct hpb_dmae_chan *new_hpb_chan; | ||
499 | struct shdma_chan *schan; | ||
500 | |||
501 | /* Alloc channel */ | ||
502 | new_hpb_chan = devm_kzalloc(&pdev->dev, | ||
503 | sizeof(struct hpb_dmae_chan), GFP_KERNEL); | ||
504 | if (!new_hpb_chan) { | ||
505 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
506 | "No free memory for allocating DMA channels!\n"); | ||
507 | return -ENOMEM; | ||
508 | } | ||
509 | |||
510 | schan = &new_hpb_chan->shdma_chan; | ||
511 | shdma_chan_probe(sdev, schan, id); | ||
512 | |||
513 | if (pdev->id >= 0) | ||
514 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
515 | "hpb-dmae%d.%d", pdev->id, id); | ||
516 | else | ||
517 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
518 | "hpb-dma.%d", id); | ||
519 | |||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | static int hpb_dmae_probe(struct platform_device *pdev) | ||
524 | { | ||
525 | struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; | ||
526 | struct hpb_dmae_device *hpbdev; | ||
527 | struct dma_device *dma_dev; | ||
528 | struct resource *chan, *comm, *rest, *mode, *irq_res; | ||
529 | int err, i; | ||
530 | |||
531 | /* Get platform data */ | ||
532 | if (!pdata || !pdata->num_channels) | ||
533 | return -ENODEV; | ||
534 | |||
535 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
536 | comm = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
537 | rest = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
538 | mode = platform_get_resource(pdev, IORESOURCE_MEM, 3); | ||
539 | |||
540 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
541 | if (!irq_res) | ||
542 | return -ENODEV; | ||
543 | |||
544 | hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device), | ||
545 | GFP_KERNEL); | ||
546 | if (!hpbdev) { | ||
547 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
548 | return -ENOMEM; | ||
549 | } | ||
550 | |||
551 | hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); | ||
552 | if (IS_ERR(hpbdev->chan_reg)) | ||
553 | return PTR_ERR(hpbdev->chan_reg); | ||
554 | |||
555 | hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm); | ||
556 | if (IS_ERR(hpbdev->comm_reg)) | ||
557 | return PTR_ERR(hpbdev->comm_reg); | ||
558 | |||
559 | hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest); | ||
560 | if (IS_ERR(hpbdev->reset_reg)) | ||
561 | return PTR_ERR(hpbdev->reset_reg); | ||
562 | |||
563 | hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode); | ||
564 | if (IS_ERR(hpbdev->mode_reg)) | ||
565 | return PTR_ERR(hpbdev->mode_reg); | ||
566 | |||
567 | dma_dev = &hpbdev->shdma_dev.dma_dev; | ||
568 | |||
569 | spin_lock_init(&hpbdev->reg_lock); | ||
570 | |||
571 | /* Platform data */ | ||
572 | hpbdev->pdata = pdata; | ||
573 | |||
574 | pm_runtime_enable(&pdev->dev); | ||
575 | err = pm_runtime_get_sync(&pdev->dev); | ||
576 | if (err < 0) | ||
577 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
578 | |||
579 | /* Reset DMA controller */ | ||
580 | hpb_dmae_reset(hpbdev); | ||
581 | |||
582 | pm_runtime_put(&pdev->dev); | ||
583 | |||
584 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
585 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
586 | |||
587 | hpbdev->shdma_dev.ops = &hpb_dmae_ops; | ||
588 | hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); | ||
589 | err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels); | ||
590 | if (err < 0) | ||
591 | goto error; | ||
592 | |||
593 | /* Create DMA channels */ | ||
594 | for (i = 0; i < pdata->num_channels; i++) | ||
595 | hpb_dmae_chan_probe(hpbdev, i); | ||
596 | |||
597 | platform_set_drvdata(pdev, hpbdev); | ||
598 | err = dma_async_device_register(dma_dev); | ||
599 | if (!err) | ||
600 | return 0; | ||
601 | |||
602 | shdma_cleanup(&hpbdev->shdma_dev); | ||
603 | error: | ||
604 | pm_runtime_disable(&pdev->dev); | ||
605 | return err; | ||
606 | } | ||
607 | |||
608 | static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) | ||
609 | { | ||
610 | struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev; | ||
611 | struct shdma_chan *schan; | ||
612 | int i; | ||
613 | |||
614 | shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { | ||
615 | BUG_ON(!schan); | ||
616 | |||
617 | shdma_free_irq(schan); | ||
618 | shdma_chan_remove(schan); | ||
619 | } | ||
620 | dma_dev->chancnt = 0; | ||
621 | } | ||
622 | |||
623 | static int hpb_dmae_remove(struct platform_device *pdev) | ||
624 | { | ||
625 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
626 | |||
627 | dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev); | ||
628 | |||
629 | pm_runtime_disable(&pdev->dev); | ||
630 | |||
631 | hpb_dmae_chan_remove(hpbdev); | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static void hpb_dmae_shutdown(struct platform_device *pdev) | ||
637 | { | ||
638 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
639 | hpb_dmae_ctl_stop(hpbdev); | ||
640 | } | ||
641 | |||
642 | static struct platform_driver hpb_dmae_driver = { | ||
643 | .probe = hpb_dmae_probe, | ||
644 | .remove = hpb_dmae_remove, | ||
645 | .shutdown = hpb_dmae_shutdown, | ||
646 | .driver = { | ||
647 | .owner = THIS_MODULE, | ||
648 | .name = "hpb-dma-engine", | ||
649 | }, | ||
650 | }; | ||
651 | module_platform_driver(hpb_dmae_driver); | ||
652 | |||
653 | MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>"); | ||
654 | MODULE_DESCRIPTION("Renesas HPB DMA Engine driver"); | ||
655 | MODULE_LICENSE("GPL"); | ||