diff options
Diffstat (limited to 'drivers/dma/sh/shdma.c')
-rw-r--r-- | drivers/dma/sh/shdma.c | 944 |
1 files changed, 0 insertions, 944 deletions
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c deleted file mode 100644 index 3b4bee933dd7..000000000000 --- a/drivers/dma/sh/shdma.c +++ /dev/null | |||
@@ -1,944 +0,0 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * base is drivers/dma/flsdma.c | ||
5 | * | ||
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
10 | * | ||
11 | * This is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
17 | * - MAX DMA size is 16MB. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/dmaengine.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/sh_dma.h> | ||
30 | #include <linux/notifier.h> | ||
31 | #include <linux/kdebug.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/rculist.h> | ||
34 | |||
35 | #include "../dmaengine.h" | ||
36 | #include "shdma.h" | ||
37 | |||
38 | #define SH_DMAE_DRV_NAME "sh-dma-engine" | ||
39 | |||
40 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | ||
41 | #define LOG2_DEFAULT_XFER_SIZE 2 | ||
42 | #define SH_DMA_SLAVE_NUMBER 256 | ||
43 | #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) | ||
44 | |||
45 | /* | ||
46 | * Used for write-side mutual exclusion for the global device list, | ||
47 | * read-side synchronization by way of RCU, and per-controller data. | ||
48 | */ | ||
49 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
50 | static LIST_HEAD(sh_dmae_devices); | ||
51 | |||
52 | /* | ||
53 | * Different DMAC implementations provide different ways to clear DMA channels: | ||
54 | * (1) none - no CHCLR registers are available | ||
55 | * (2) one CHCLR register per channel - 0 has to be written to it to clear | ||
56 | * channel buffers | ||
57 | * (3) one CHCLR per several channels - 1 has to be written to the bit, | ||
58 | * corresponding to the specific channel to reset it | ||
59 | */ | ||
60 | static void channel_clear(struct sh_dmae_chan *sh_dc) | ||
61 | { | ||
62 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
63 | const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + | ||
64 | sh_dc->shdma_chan.id; | ||
65 | u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; | ||
66 | |||
67 | __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); | ||
68 | } | ||
69 | |||
70 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | ||
71 | { | ||
72 | __raw_writel(data, sh_dc->base + reg); | ||
73 | } | ||
74 | |||
75 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | ||
76 | { | ||
77 | return __raw_readl(sh_dc->base + reg); | ||
78 | } | ||
79 | |||
80 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
81 | { | ||
82 | void __iomem *addr = shdev->chan_reg + DMAOR; | ||
83 | |||
84 | if (shdev->pdata->dmaor_is_32bit) | ||
85 | return __raw_readl(addr); | ||
86 | else | ||
87 | return __raw_readw(addr); | ||
88 | } | ||
89 | |||
90 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
91 | { | ||
92 | void __iomem *addr = shdev->chan_reg + DMAOR; | ||
93 | |||
94 | if (shdev->pdata->dmaor_is_32bit) | ||
95 | __raw_writel(data, addr); | ||
96 | else | ||
97 | __raw_writew(data, addr); | ||
98 | } | ||
99 | |||
100 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
101 | { | ||
102 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
103 | |||
104 | __raw_writel(data, sh_dc->base + shdev->chcr_offset); | ||
105 | } | ||
106 | |||
107 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | ||
108 | { | ||
109 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
110 | |||
111 | return __raw_readl(sh_dc->base + shdev->chcr_offset); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Reset DMA controller | ||
116 | * | ||
117 | * SH7780 has two DMAOR register | ||
118 | */ | ||
119 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | ||
120 | { | ||
121 | unsigned short dmaor; | ||
122 | unsigned long flags; | ||
123 | |||
124 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
125 | |||
126 | dmaor = dmaor_read(shdev); | ||
127 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | ||
128 | |||
129 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
130 | } | ||
131 | |||
132 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | ||
133 | { | ||
134 | unsigned short dmaor; | ||
135 | unsigned long flags; | ||
136 | |||
137 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
138 | |||
139 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | ||
140 | |||
141 | if (shdev->pdata->chclr_present) { | ||
142 | int i; | ||
143 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
144 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
145 | if (sh_chan) | ||
146 | channel_clear(sh_chan); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | ||
151 | |||
152 | dmaor = dmaor_read(shdev); | ||
153 | |||
154 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
155 | |||
156 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | ||
157 | dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); | ||
158 | return -EIO; | ||
159 | } | ||
160 | if (shdev->pdata->dmaor_init & ~dmaor) | ||
161 | dev_warn(shdev->shdma_dev.dma_dev.dev, | ||
162 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | ||
163 | dmaor, shdev->pdata->dmaor_init); | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | ||
168 | { | ||
169 | u32 chcr = chcr_read(sh_chan); | ||
170 | |||
171 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | ||
172 | return true; /* working */ | ||
173 | |||
174 | return false; /* waiting */ | ||
175 | } | ||
176 | |||
177 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | ||
178 | { | ||
179 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
180 | const struct sh_dmae_pdata *pdata = shdev->pdata; | ||
181 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
182 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
183 | |||
184 | if (cnt >= pdata->ts_shift_num) | ||
185 | cnt = 0; | ||
186 | |||
187 | return pdata->ts_shift[cnt]; | ||
188 | } | ||
189 | |||
190 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
191 | { | ||
192 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
193 | const struct sh_dmae_pdata *pdata = shdev->pdata; | ||
194 | int i; | ||
195 | |||
196 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
197 | if (pdata->ts_shift[i] == l2size) | ||
198 | break; | ||
199 | |||
200 | if (i == pdata->ts_shift_num) | ||
201 | i = 0; | ||
202 | |||
203 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
204 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
205 | } | ||
206 | |||
207 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | ||
208 | { | ||
209 | sh_dmae_writel(sh_chan, hw->sar, SAR); | ||
210 | sh_dmae_writel(sh_chan, hw->dar, DAR); | ||
211 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); | ||
212 | } | ||
213 | |||
214 | static void dmae_start(struct sh_dmae_chan *sh_chan) | ||
215 | { | ||
216 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
217 | u32 chcr = chcr_read(sh_chan); | ||
218 | |||
219 | if (shdev->pdata->needs_tend_set) | ||
220 | sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); | ||
221 | |||
222 | chcr |= CHCR_DE | shdev->chcr_ie_bit; | ||
223 | chcr_write(sh_chan, chcr & ~CHCR_TE); | ||
224 | } | ||
225 | |||
226 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
227 | { | ||
228 | /* | ||
229 | * Default configuration for dual address memory-memory transfer. | ||
230 | * 0x400 represents auto-request. | ||
231 | */ | ||
232 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
233 | LOG2_DEFAULT_XFER_SIZE); | ||
234 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
235 | chcr_write(sh_chan, chcr); | ||
236 | } | ||
237 | |||
238 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | ||
239 | { | ||
240 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ | ||
241 | if (dmae_is_busy(sh_chan)) | ||
242 | return -EBUSY; | ||
243 | |||
244 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | ||
245 | chcr_write(sh_chan, val); | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | ||
251 | { | ||
252 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
253 | const struct sh_dmae_pdata *pdata = shdev->pdata; | ||
254 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; | ||
255 | void __iomem *addr = shdev->dmars; | ||
256 | unsigned int shift = chan_pdata->dmars_bit; | ||
257 | |||
258 | if (dmae_is_busy(sh_chan)) | ||
259 | return -EBUSY; | ||
260 | |||
261 | if (pdata->no_dmars) | ||
262 | return 0; | ||
263 | |||
264 | /* in the case of a missing DMARS resource use first memory window */ | ||
265 | if (!addr) | ||
266 | addr = shdev->chan_reg; | ||
267 | addr += chan_pdata->dmars; | ||
268 | |||
269 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | ||
270 | addr); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void sh_dmae_start_xfer(struct shdma_chan *schan, | ||
276 | struct shdma_desc *sdesc) | ||
277 | { | ||
278 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
279 | shdma_chan); | ||
280 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
281 | struct sh_dmae_desc, shdma_desc); | ||
282 | dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
283 | sdesc->async_tx.cookie, sh_chan->shdma_chan.id, | ||
284 | sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); | ||
285 | /* Get the ld start address from ld_queue */ | ||
286 | dmae_set_reg(sh_chan, &sh_desc->hw); | ||
287 | dmae_start(sh_chan); | ||
288 | } | ||
289 | |||
290 | static bool sh_dmae_channel_busy(struct shdma_chan *schan) | ||
291 | { | ||
292 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
293 | shdma_chan); | ||
294 | return dmae_is_busy(sh_chan); | ||
295 | } | ||
296 | |||
297 | static void sh_dmae_setup_xfer(struct shdma_chan *schan, | ||
298 | int slave_id) | ||
299 | { | ||
300 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
301 | shdma_chan); | ||
302 | |||
303 | if (slave_id >= 0) { | ||
304 | const struct sh_dmae_slave_config *cfg = | ||
305 | sh_chan->config; | ||
306 | |||
307 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
308 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
309 | } else { | ||
310 | dmae_init(sh_chan); | ||
311 | } | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Find a slave channel configuration from the contoller list by either a slave | ||
316 | * ID in the non-DT case, or by a MID/RID value in the DT case | ||
317 | */ | ||
318 | static const struct sh_dmae_slave_config *dmae_find_slave( | ||
319 | struct sh_dmae_chan *sh_chan, int match) | ||
320 | { | ||
321 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
322 | const struct sh_dmae_pdata *pdata = shdev->pdata; | ||
323 | const struct sh_dmae_slave_config *cfg; | ||
324 | int i; | ||
325 | |||
326 | if (!sh_chan->shdma_chan.dev->of_node) { | ||
327 | if (match >= SH_DMA_SLAVE_NUMBER) | ||
328 | return NULL; | ||
329 | |||
330 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
331 | if (cfg->slave_id == match) | ||
332 | return cfg; | ||
333 | } else { | ||
334 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
335 | if (cfg->mid_rid == match) { | ||
336 | sh_chan->shdma_chan.slave_id = cfg->slave_id; | ||
337 | return cfg; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | return NULL; | ||
342 | } | ||
343 | |||
344 | static int sh_dmae_set_slave(struct shdma_chan *schan, | ||
345 | int slave_id, bool try) | ||
346 | { | ||
347 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
348 | shdma_chan); | ||
349 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); | ||
350 | if (!cfg) | ||
351 | return -ENXIO; | ||
352 | |||
353 | if (!try) | ||
354 | sh_chan->config = cfg; | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | ||
360 | { | ||
361 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
362 | u32 chcr = chcr_read(sh_chan); | ||
363 | |||
364 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); | ||
365 | chcr_write(sh_chan, chcr); | ||
366 | } | ||
367 | |||
368 | static int sh_dmae_desc_setup(struct shdma_chan *schan, | ||
369 | struct shdma_desc *sdesc, | ||
370 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
371 | { | ||
372 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
373 | struct sh_dmae_desc, shdma_desc); | ||
374 | |||
375 | if (*len > schan->max_xfer_len) | ||
376 | *len = schan->max_xfer_len; | ||
377 | |||
378 | sh_desc->hw.sar = src; | ||
379 | sh_desc->hw.dar = dst; | ||
380 | sh_desc->hw.tcr = *len; | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static void sh_dmae_halt(struct shdma_chan *schan) | ||
386 | { | ||
387 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
388 | shdma_chan); | ||
389 | dmae_halt(sh_chan); | ||
390 | } | ||
391 | |||
392 | static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
393 | { | ||
394 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
395 | shdma_chan); | ||
396 | |||
397 | if (!(chcr_read(sh_chan) & CHCR_TE)) | ||
398 | return false; | ||
399 | |||
400 | /* DMA stop */ | ||
401 | dmae_halt(sh_chan); | ||
402 | |||
403 | return true; | ||
404 | } | ||
405 | |||
406 | static size_t sh_dmae_get_partial(struct shdma_chan *schan, | ||
407 | struct shdma_desc *sdesc) | ||
408 | { | ||
409 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
410 | shdma_chan); | ||
411 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
412 | struct sh_dmae_desc, shdma_desc); | ||
413 | return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
414 | sh_chan->xmit_shift; | ||
415 | } | ||
416 | |||
417 | /* Called from error IRQ or NMI */ | ||
418 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | ||
419 | { | ||
420 | bool ret; | ||
421 | |||
422 | /* halt the dma controller */ | ||
423 | sh_dmae_ctl_stop(shdev); | ||
424 | |||
425 | /* We cannot detect, which channel caused the error, have to reset all */ | ||
426 | ret = shdma_reset(&shdev->shdma_dev); | ||
427 | |||
428 | sh_dmae_rst(shdev); | ||
429 | |||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
434 | { | ||
435 | struct sh_dmae_device *shdev = data; | ||
436 | |||
437 | if (!(dmaor_read(shdev) & DMAOR_AE)) | ||
438 | return IRQ_NONE; | ||
439 | |||
440 | sh_dmae_reset(shdev); | ||
441 | return IRQ_HANDLED; | ||
442 | } | ||
443 | |||
444 | static bool sh_dmae_desc_completed(struct shdma_chan *schan, | ||
445 | struct shdma_desc *sdesc) | ||
446 | { | ||
447 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
448 | struct sh_dmae_chan, shdma_chan); | ||
449 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
450 | struct sh_dmae_desc, shdma_desc); | ||
451 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | ||
452 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
453 | |||
454 | return (sdesc->direction == DMA_DEV_TO_MEM && | ||
455 | (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || | ||
456 | (sdesc->direction != DMA_DEV_TO_MEM && | ||
457 | (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); | ||
458 | } | ||
459 | |||
460 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
461 | { | ||
462 | /* Fast path out if NMIF is not asserted for this controller */ | ||
463 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
464 | return false; | ||
465 | |||
466 | return sh_dmae_reset(shdev); | ||
467 | } | ||
468 | |||
469 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
470 | unsigned long cmd, void *data) | ||
471 | { | ||
472 | struct sh_dmae_device *shdev; | ||
473 | int ret = NOTIFY_DONE; | ||
474 | bool triggered; | ||
475 | |||
476 | /* | ||
477 | * Only concern ourselves with NMI events. | ||
478 | * | ||
479 | * Normally we would check the die chain value, but as this needs | ||
480 | * to be architecture independent, check for NMI context instead. | ||
481 | */ | ||
482 | if (!in_nmi()) | ||
483 | return NOTIFY_DONE; | ||
484 | |||
485 | rcu_read_lock(); | ||
486 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
487 | /* | ||
488 | * Only stop if one of the controllers has NMIF asserted, | ||
489 | * we do not want to interfere with regular address error | ||
490 | * handling or NMI events that don't concern the DMACs. | ||
491 | */ | ||
492 | triggered = sh_dmae_nmi_notify(shdev); | ||
493 | if (triggered == true) | ||
494 | ret = NOTIFY_OK; | ||
495 | } | ||
496 | rcu_read_unlock(); | ||
497 | |||
498 | return ret; | ||
499 | } | ||
500 | |||
501 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
502 | .notifier_call = sh_dmae_nmi_handler, | ||
503 | |||
504 | /* Run before NMI debug handler and KGDB */ | ||
505 | .priority = 1, | ||
506 | }; | ||
507 | |||
508 | static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | ||
509 | int irq, unsigned long flags) | ||
510 | { | ||
511 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | ||
512 | struct shdma_dev *sdev = &shdev->shdma_dev; | ||
513 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); | ||
514 | struct sh_dmae_chan *sh_chan; | ||
515 | struct shdma_chan *schan; | ||
516 | int err; | ||
517 | |||
518 | sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), | ||
519 | GFP_KERNEL); | ||
520 | if (!sh_chan) { | ||
521 | dev_err(sdev->dma_dev.dev, | ||
522 | "No free memory for allocating dma channels!\n"); | ||
523 | return -ENOMEM; | ||
524 | } | ||
525 | |||
526 | schan = &sh_chan->shdma_chan; | ||
527 | schan->max_xfer_len = SH_DMA_TCR_MAX + 1; | ||
528 | |||
529 | shdma_chan_probe(sdev, schan, id); | ||
530 | |||
531 | sh_chan->base = shdev->chan_reg + chan_pdata->offset; | ||
532 | |||
533 | /* set up channel irq */ | ||
534 | if (pdev->id >= 0) | ||
535 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), | ||
536 | "sh-dmae%d.%d", pdev->id, id); | ||
537 | else | ||
538 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), | ||
539 | "sh-dma%d", id); | ||
540 | |||
541 | err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); | ||
542 | if (err) { | ||
543 | dev_err(sdev->dma_dev.dev, | ||
544 | "DMA channel %d request_irq error %d\n", | ||
545 | id, err); | ||
546 | goto err_no_irq; | ||
547 | } | ||
548 | |||
549 | shdev->chan[id] = sh_chan; | ||
550 | return 0; | ||
551 | |||
552 | err_no_irq: | ||
553 | /* remove from dmaengine device node */ | ||
554 | shdma_chan_remove(schan); | ||
555 | return err; | ||
556 | } | ||
557 | |||
558 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | ||
559 | { | ||
560 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
561 | struct shdma_chan *schan; | ||
562 | int i; | ||
563 | |||
564 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { | ||
565 | BUG_ON(!schan); | ||
566 | |||
567 | shdma_chan_remove(schan); | ||
568 | } | ||
569 | dma_dev->chancnt = 0; | ||
570 | } | ||
571 | |||
572 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
573 | { | ||
574 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
575 | sh_dmae_ctl_stop(shdev); | ||
576 | } | ||
577 | |||
578 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
579 | { | ||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | static int sh_dmae_runtime_resume(struct device *dev) | ||
584 | { | ||
585 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
586 | |||
587 | return sh_dmae_rst(shdev); | ||
588 | } | ||
589 | |||
590 | #ifdef CONFIG_PM | ||
591 | static int sh_dmae_suspend(struct device *dev) | ||
592 | { | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static int sh_dmae_resume(struct device *dev) | ||
597 | { | ||
598 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
599 | int i, ret; | ||
600 | |||
601 | ret = sh_dmae_rst(shdev); | ||
602 | if (ret < 0) | ||
603 | dev_err(dev, "Failed to reset!\n"); | ||
604 | |||
605 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
606 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
607 | |||
608 | if (!sh_chan->shdma_chan.desc_num) | ||
609 | continue; | ||
610 | |||
611 | if (sh_chan->shdma_chan.slave_id >= 0) { | ||
612 | const struct sh_dmae_slave_config *cfg = sh_chan->config; | ||
613 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
614 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
615 | } else { | ||
616 | dmae_init(sh_chan); | ||
617 | } | ||
618 | } | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | #else | ||
623 | #define sh_dmae_suspend NULL | ||
624 | #define sh_dmae_resume NULL | ||
625 | #endif | ||
626 | |||
627 | const struct dev_pm_ops sh_dmae_pm = { | ||
628 | .suspend = sh_dmae_suspend, | ||
629 | .resume = sh_dmae_resume, | ||
630 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
631 | .runtime_resume = sh_dmae_runtime_resume, | ||
632 | }; | ||
633 | |||
634 | static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) | ||
635 | { | ||
636 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
637 | struct sh_dmae_chan, shdma_chan); | ||
638 | |||
639 | /* | ||
640 | * Implicit BUG_ON(!sh_chan->config) | ||
641 | * This is an exclusive slave DMA operation, may only be called after a | ||
642 | * successful slave configuration. | ||
643 | */ | ||
644 | return sh_chan->config->addr; | ||
645 | } | ||
646 | |||
647 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) | ||
648 | { | ||
649 | return &((struct sh_dmae_desc *)buf)[i].shdma_desc; | ||
650 | } | ||
651 | |||
652 | static const struct shdma_ops sh_dmae_shdma_ops = { | ||
653 | .desc_completed = sh_dmae_desc_completed, | ||
654 | .halt_channel = sh_dmae_halt, | ||
655 | .channel_busy = sh_dmae_channel_busy, | ||
656 | .slave_addr = sh_dmae_slave_addr, | ||
657 | .desc_setup = sh_dmae_desc_setup, | ||
658 | .set_slave = sh_dmae_set_slave, | ||
659 | .setup_xfer = sh_dmae_setup_xfer, | ||
660 | .start_xfer = sh_dmae_start_xfer, | ||
661 | .embedded_desc = sh_dmae_embedded_desc, | ||
662 | .chan_irq = sh_dmae_chan_irq, | ||
663 | .get_partial = sh_dmae_get_partial, | ||
664 | }; | ||
665 | |||
666 | static int sh_dmae_probe(struct platform_device *pdev) | ||
667 | { | ||
668 | const struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | ||
669 | unsigned long irqflags = IRQF_DISABLED, | ||
670 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | ||
671 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; | ||
672 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | ||
673 | struct sh_dmae_device *shdev; | ||
674 | struct dma_device *dma_dev; | ||
675 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
676 | |||
677 | /* get platform data */ | ||
678 | if (!pdata || !pdata->channel_num) | ||
679 | return -ENODEV; | ||
680 | |||
681 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
682 | /* DMARS area is optional */ | ||
683 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
684 | /* | ||
685 | * IRQ resources: | ||
686 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
687 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
688 | * start == end. If it is the only IRQ resource, all channels also | ||
689 | * use the same IRQ. | ||
690 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
691 | * ranges (start != end) | ||
692 | * 3. iff all events (channels and, optionally, error) on this | ||
693 | * controller use the same IRQ, only one IRQ resource can be | ||
694 | * specified, otherwise there must be one IRQ per channel, even if | ||
695 | * some of them are equal | ||
696 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
697 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
698 | * requested with the IRQF_SHARED flag | ||
699 | */ | ||
700 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
701 | if (!chan || !errirq_res) | ||
702 | return -ENODEV; | ||
703 | |||
704 | shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), | ||
705 | GFP_KERNEL); | ||
706 | if (!shdev) { | ||
707 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
708 | return -ENOMEM; | ||
709 | } | ||
710 | |||
711 | dma_dev = &shdev->shdma_dev.dma_dev; | ||
712 | |||
713 | shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); | ||
714 | if (IS_ERR(shdev->chan_reg)) | ||
715 | return PTR_ERR(shdev->chan_reg); | ||
716 | if (dmars) { | ||
717 | shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); | ||
718 | if (IS_ERR(shdev->dmars)) | ||
719 | return PTR_ERR(shdev->dmars); | ||
720 | } | ||
721 | |||
722 | if (!pdata->slave_only) | ||
723 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
724 | if (pdata->slave && pdata->slave_num) | ||
725 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
726 | |||
727 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
728 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
729 | |||
730 | shdev->shdma_dev.ops = &sh_dmae_shdma_ops; | ||
731 | shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); | ||
732 | err = shdma_init(&pdev->dev, &shdev->shdma_dev, | ||
733 | pdata->channel_num); | ||
734 | if (err < 0) | ||
735 | goto eshdma; | ||
736 | |||
737 | /* platform data */ | ||
738 | shdev->pdata = pdata; | ||
739 | |||
740 | if (pdata->chcr_offset) | ||
741 | shdev->chcr_offset = pdata->chcr_offset; | ||
742 | else | ||
743 | shdev->chcr_offset = CHCR; | ||
744 | |||
745 | if (pdata->chcr_ie_bit) | ||
746 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; | ||
747 | else | ||
748 | shdev->chcr_ie_bit = CHCR_IE; | ||
749 | |||
750 | platform_set_drvdata(pdev, shdev); | ||
751 | |||
752 | pm_runtime_enable(&pdev->dev); | ||
753 | err = pm_runtime_get_sync(&pdev->dev); | ||
754 | if (err < 0) | ||
755 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
756 | |||
757 | spin_lock_irq(&sh_dmae_lock); | ||
758 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
759 | spin_unlock_irq(&sh_dmae_lock); | ||
760 | |||
761 | /* reset dma controller - only needed as a test */ | ||
762 | err = sh_dmae_rst(shdev); | ||
763 | if (err) | ||
764 | goto rst_err; | ||
765 | |||
766 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
767 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | ||
768 | |||
769 | if (!chanirq_res) | ||
770 | chanirq_res = errirq_res; | ||
771 | else | ||
772 | irqres++; | ||
773 | |||
774 | if (chanirq_res == errirq_res || | ||
775 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
776 | irqflags = IRQF_SHARED; | ||
777 | |||
778 | errirq = errirq_res->start; | ||
779 | |||
780 | err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, | ||
781 | "DMAC Address Error", shdev); | ||
782 | if (err) { | ||
783 | dev_err(&pdev->dev, | ||
784 | "DMA failed requesting irq #%d, error %d\n", | ||
785 | errirq, err); | ||
786 | goto eirq_err; | ||
787 | } | ||
788 | |||
789 | #else | ||
790 | chanirq_res = errirq_res; | ||
791 | #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ | ||
792 | |||
793 | if (chanirq_res->start == chanirq_res->end && | ||
794 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | ||
795 | /* Special case - all multiplexed */ | ||
796 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | ||
797 | if (irq_cnt < SH_DMAE_MAX_CHANNELS) { | ||
798 | chan_irq[irq_cnt] = chanirq_res->start; | ||
799 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
800 | } else { | ||
801 | irq_cap = 1; | ||
802 | break; | ||
803 | } | ||
804 | } | ||
805 | } else { | ||
806 | do { | ||
807 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
808 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { | ||
809 | irq_cap = 1; | ||
810 | break; | ||
811 | } | ||
812 | |||
813 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
814 | IORESOURCE_IRQ_SHAREABLE) | ||
815 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
816 | else | ||
817 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
818 | dev_dbg(&pdev->dev, | ||
819 | "Found IRQ %d for channel %d\n", | ||
820 | i, irq_cnt); | ||
821 | chan_irq[irq_cnt++] = i; | ||
822 | } | ||
823 | |||
824 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) | ||
825 | break; | ||
826 | |||
827 | chanirq_res = platform_get_resource(pdev, | ||
828 | IORESOURCE_IRQ, ++irqres); | ||
829 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
830 | } | ||
831 | |||
832 | /* Create DMA Channel */ | ||
833 | for (i = 0; i < irq_cnt; i++) { | ||
834 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | ||
835 | if (err) | ||
836 | goto chan_probe_err; | ||
837 | } | ||
838 | |||
839 | if (irq_cap) | ||
840 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | ||
841 | "channels when a maximum of %d are supported.\n", | ||
842 | pdata->channel_num, SH_DMAE_MAX_CHANNELS); | ||
843 | |||
844 | pm_runtime_put(&pdev->dev); | ||
845 | |||
846 | err = dma_async_device_register(&shdev->shdma_dev.dma_dev); | ||
847 | if (err < 0) | ||
848 | goto edmadevreg; | ||
849 | |||
850 | return err; | ||
851 | |||
852 | edmadevreg: | ||
853 | pm_runtime_get(&pdev->dev); | ||
854 | |||
855 | chan_probe_err: | ||
856 | sh_dmae_chan_remove(shdev); | ||
857 | |||
858 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
859 | eirq_err: | ||
860 | #endif | ||
861 | rst_err: | ||
862 | spin_lock_irq(&sh_dmae_lock); | ||
863 | list_del_rcu(&shdev->node); | ||
864 | spin_unlock_irq(&sh_dmae_lock); | ||
865 | |||
866 | pm_runtime_put(&pdev->dev); | ||
867 | pm_runtime_disable(&pdev->dev); | ||
868 | |||
869 | platform_set_drvdata(pdev, NULL); | ||
870 | shdma_cleanup(&shdev->shdma_dev); | ||
871 | eshdma: | ||
872 | synchronize_rcu(); | ||
873 | |||
874 | return err; | ||
875 | } | ||
876 | |||
877 | static int sh_dmae_remove(struct platform_device *pdev) | ||
878 | { | ||
879 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
880 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
881 | struct resource *res; | ||
882 | int errirq = platform_get_irq(pdev, 0); | ||
883 | |||
884 | dma_async_device_unregister(dma_dev); | ||
885 | |||
886 | if (errirq > 0) | ||
887 | free_irq(errirq, shdev); | ||
888 | |||
889 | spin_lock_irq(&sh_dmae_lock); | ||
890 | list_del_rcu(&shdev->node); | ||
891 | spin_unlock_irq(&sh_dmae_lock); | ||
892 | |||
893 | pm_runtime_disable(&pdev->dev); | ||
894 | |||
895 | sh_dmae_chan_remove(shdev); | ||
896 | shdma_cleanup(&shdev->shdma_dev); | ||
897 | |||
898 | platform_set_drvdata(pdev, NULL); | ||
899 | |||
900 | synchronize_rcu(); | ||
901 | |||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static const struct of_device_id sh_dmae_of_match[] = { | ||
906 | { .compatible = "renesas,shdma", }, | ||
907 | { } | ||
908 | }; | ||
909 | MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | ||
910 | |||
911 | static struct platform_driver sh_dmae_driver = { | ||
912 | .driver = { | ||
913 | .owner = THIS_MODULE, | ||
914 | .pm = &sh_dmae_pm, | ||
915 | .name = SH_DMAE_DRV_NAME, | ||
916 | .of_match_table = sh_dmae_of_match, | ||
917 | }, | ||
918 | .remove = sh_dmae_remove, | ||
919 | .shutdown = sh_dmae_shutdown, | ||
920 | }; | ||
921 | |||
922 | static int __init sh_dmae_init(void) | ||
923 | { | ||
924 | /* Wire up NMI handling */ | ||
925 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
926 | if (err) | ||
927 | return err; | ||
928 | |||
929 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | ||
930 | } | ||
931 | module_init(sh_dmae_init); | ||
932 | |||
933 | static void __exit sh_dmae_exit(void) | ||
934 | { | ||
935 | platform_driver_unregister(&sh_dmae_driver); | ||
936 | |||
937 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
938 | } | ||
939 | module_exit(sh_dmae_exit); | ||
940 | |||
941 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | ||
942 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | ||
943 | MODULE_LICENSE("GPL"); | ||
944 | MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); | ||