summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>2015-04-01 02:22:45 -0400
committerVinod Koul <vinod.koul@intel.com>2015-04-02 03:27:32 -0400
commit0c1c8ff32fa29e425b4938934c21afdb81104431 (patch)
treefaf7202a17ebabad17fa449c6e300d93808af1ce
parenta7267831565030ab94dab76eb1afe5d4b63a1009 (diff)
dmaengine: usb-dmac: Add Renesas USB DMA Controller (USB-DMAC) driver
This DMAC is Renesas USB high-speed module DMA controller that supports slave transfer. This USB-DMAC has similar register sets with R-Car Gen2 DMAC, but the USB-DMAC has specific registers to control the USB transactions. If this code is added into the rcar-dmac driver, it will become unreadable. So, this driver is independent from the rcar-dmac. And, this USB-DMAC uses virt-dma infrastructure. Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/sh/Kconfig9
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/usb-dmac.c910
3 files changed, 920 insertions, 0 deletions
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 8190ad225a1b..f6ca002cb7c5 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -64,3 +64,12 @@ config RCAR_DMAC
64 help 64 help
65 This driver supports the general purpose DMA controller found in the 65 This driver supports the general purpose DMA controller found in the
66 Renesas R-Car second generation SoCs. 66 Renesas R-Car second generation SoCs.
67
68config RENESAS_USB_DMAC
69 tristate "Renesas USB-DMA Controller"
70 depends on ARCH_SHMOBILE || COMPILE_TEST
71 select RENESAS_DMA
72 select DMA_VIRTUAL_CHANNELS
73 help
74 This driver supports the USB-DMA controller found in the Renesas
75 SoCs.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 2852f9db61a4..221ab19b8f77 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_SUDMAC) += sudmac.o
17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o 17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o 18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
19obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o 19obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
20obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
new file mode 100644
index 000000000000..d5dad98bef0b
--- /dev/null
+++ b/drivers/dma/sh/usb-dmac.c
@@ -0,0 +1,910 @@
1/*
2 * Renesas USB DMA Controller Driver
3 *
4 * Copyright (C) 2015 Renesas Electronics Corporation
5 *
6 * based on rcar-dmac.c
7 * Copyright (C) 2014 Renesas Electronics Inc.
8 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmaengine.h>
18#include <linux/interrupt.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28
29#include "../dmaengine.h"
30#include "../virt-dma.h"
31
32/*
33 * struct usb_dmac_sg - Descriptor for a hardware transfer
34 * @mem_addr: memory address
35 * @size: transfer size in bytes
36 */
37struct usb_dmac_sg {
38 dma_addr_t mem_addr;
39 u32 size;
40};
41
42/*
43 * struct usb_dmac_desc - USB DMA Transfer Descriptor
44 * @vd: base virtual channel DMA transaction descriptor
45 * @direction: direction of the DMA transfer
46 * @sg_allocated_len: length of allocated sg
47 * @sg_len: length of sg
48 * @sg_index: index of sg
49 * @residue: residue after the DMAC completed a transfer
50 * @node: node for desc_got and desc_freed
51 * @done_cookie: cookie after the DMAC completed a transfer
52 * @sg: information for the transfer
53 */
54struct usb_dmac_desc {
55 struct virt_dma_desc vd;
56 enum dma_transfer_direction direction;
57 unsigned int sg_allocated_len;
58 unsigned int sg_len;
59 unsigned int sg_index;
60 u32 residue;
61 struct list_head node;
62 dma_cookie_t done_cookie;
63 struct usb_dmac_sg sg[0];
64};
65
66#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
67
68/*
69 * struct usb_dmac_chan - USB DMA Controller Channel
70 * @vc: base virtual DMA channel object
71 * @iomem: channel I/O memory base
72 * @index: index of this channel in the controller
73 * @irq: irq number of this channel
74 * @desc: the current descriptor
75 * @descs_allocated: number of descriptors allocated
76 * @desc_got: got descriptors
77 * @desc_freed: freed descriptors after the DMAC completed a transfer
78 */
79struct usb_dmac_chan {
80 struct virt_dma_chan vc;
81 void __iomem *iomem;
82 unsigned int index;
83 int irq;
84 struct usb_dmac_desc *desc;
85 int descs_allocated;
86 struct list_head desc_got;
87 struct list_head desc_freed;
88};
89
90#define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
91
92/*
93 * struct usb_dmac - USB DMA Controller
94 * @engine: base DMA engine object
95 * @dev: the hardware device
96 * @iomem: remapped I/O memory base
97 * @n_channels: number of available channels
98 * @channels: array of DMAC channels
99 */
100struct usb_dmac {
101 struct dma_device engine;
102 struct device *dev;
103 void __iomem *iomem;
104
105 unsigned int n_channels;
106 struct usb_dmac_chan *channels;
107};
108
109#define to_usb_dmac(d) container_of(d, struct usb_dmac, engine)
110
111/* -----------------------------------------------------------------------------
112 * Registers
113 */
114
115#define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i))
116
117#define USB_DMASWR 0x0008
118#define USB_DMASWR_SWR (1 << 0)
119#define USB_DMAOR 0x0060
120#define USB_DMAOR_AE (1 << 2)
121#define USB_DMAOR_DME (1 << 0)
122
123#define USB_DMASAR 0x0000
124#define USB_DMADAR 0x0004
125#define USB_DMATCR 0x0008
126#define USB_DMATCR_MASK 0x00ffffff
127#define USB_DMACHCR 0x0014
128#define USB_DMACHCR_FTE (1 << 24)
129#define USB_DMACHCR_NULLE (1 << 16)
130#define USB_DMACHCR_NULL (1 << 12)
131#define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6))
132#define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6))
133#define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6))
134#define USB_DMACHCR_IE (1 << 5)
135#define USB_DMACHCR_SP (1 << 2)
136#define USB_DMACHCR_TE (1 << 1)
137#define USB_DMACHCR_DE (1 << 0)
138#define USB_DMATEND 0x0018
139
140/* Hardcode the xfer_shift to 5 (32bytes) */
141#define USB_DMAC_XFER_SHIFT 5
142#define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT)
143#define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B
144#define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
145
146/* for descriptors */
147#define USB_DMAC_INITIAL_NR_DESC 16
148#define USB_DMAC_INITIAL_NR_SG 8
149
150/* -----------------------------------------------------------------------------
151 * Device access
152 */
153
154static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
155{
156 writel(data, dmac->iomem + reg);
157}
158
159static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
160{
161 return readl(dmac->iomem + reg);
162}
163
164static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
165{
166 return readl(chan->iomem + reg);
167}
168
169static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
170{
171 writel(data, chan->iomem + reg);
172}
173
174/* -----------------------------------------------------------------------------
175 * Initialization and configuration
176 */
177
178static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
179{
180 u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
181
182 return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
183}
184
185static u32 usb_dmac_calc_tend(u32 size)
186{
187 /*
188 * Please refer to the Figure "Example of Final Transaction Valid
189 * Data Transfer Enable (EDTEN) Setting" in the data sheet.
190 */
191 return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
192 USB_DMAC_XFER_SIZE));
193}
194
195/* This function is already held by vc.lock */
196static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
197 unsigned int index)
198{
199 struct usb_dmac_desc *desc = chan->desc;
200 struct usb_dmac_sg *sg = desc->sg + index;
201 dma_addr_t src_addr = 0, dst_addr = 0;
202
203 WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
204
205 if (desc->direction == DMA_DEV_TO_MEM)
206 dst_addr = sg->mem_addr;
207 else
208 src_addr = sg->mem_addr;
209
210 dev_dbg(chan->vc.chan.device->dev,
211 "chan%u: queue sg %p: %u@%pad -> %pad\n",
212 chan->index, sg, sg->size, &src_addr, &dst_addr);
213
214 usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
215 usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
216 usb_dmac_chan_write(chan, USB_DMATCR,
217 DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
218 usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
219
220 usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
221 USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
222}
223
224/* This function is already held by vc.lock */
225static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
226{
227 struct virt_dma_desc *vd;
228
229 vd = vchan_next_desc(&chan->vc);
230 if (!vd) {
231 chan->desc = NULL;
232 return;
233 }
234
235 /*
236 * Remove this request from vc->desc_issued. Otherwise, this driver
237 * will get the previous value from vchan_next_desc() after a transfer
238 * was completed.
239 */
240 list_del(&vd->node);
241
242 chan->desc = to_usb_dmac_desc(vd);
243 chan->desc->sg_index = 0;
244 usb_dmac_chan_start_sg(chan, 0);
245}
246
247static int usb_dmac_init(struct usb_dmac *dmac)
248{
249 u16 dmaor;
250
251 /* Clear all channels and enable the DMAC globally. */
252 usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
253
254 dmaor = usb_dmac_read(dmac, USB_DMAOR);
255 if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
256 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
257 return -EIO;
258 }
259
260 return 0;
261}
262
263/* -----------------------------------------------------------------------------
264 * Descriptors allocation and free
265 */
266static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
267 gfp_t gfp)
268{
269 struct usb_dmac_desc *desc;
270 unsigned long flags;
271
272 desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp);
273 if (!desc)
274 return -ENOMEM;
275
276 desc->sg_allocated_len = sg_len;
277 INIT_LIST_HEAD(&desc->node);
278
279 spin_lock_irqsave(&chan->vc.lock, flags);
280 list_add_tail(&desc->node, &chan->desc_freed);
281 spin_unlock_irqrestore(&chan->vc.lock, flags);
282
283 return 0;
284}
285
286static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
287{
288 struct usb_dmac_desc *desc;
289 LIST_HEAD(list);
290
291 list_splice_init(&chan->desc_freed, &list);
292 list_splice_init(&chan->desc_got, &list);
293
294 list_for_each_entry(desc, &list, node) {
295 list_del(&desc->node);
296 kfree(desc);
297 }
298 chan->descs_allocated = 0;
299}
300
301static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
302 unsigned int sg_len, gfp_t gfp)
303{
304 struct usb_dmac_desc *desc = NULL;
305 unsigned long flags;
306
307 /* Get a freed descritpor */
308 spin_lock_irqsave(&chan->vc.lock, flags);
309 list_for_each_entry(desc, &chan->desc_freed, node) {
310 if (sg_len <= desc->sg_allocated_len) {
311 list_move_tail(&desc->node, &chan->desc_got);
312 spin_unlock_irqrestore(&chan->vc.lock, flags);
313 return desc;
314 }
315 }
316 spin_unlock_irqrestore(&chan->vc.lock, flags);
317
318 /* Allocate a new descriptor */
319 if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
320 /* If allocated the desc, it was added to tail of the list */
321 spin_lock_irqsave(&chan->vc.lock, flags);
322 desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
323 node);
324 list_move_tail(&desc->node, &chan->desc_got);
325 spin_unlock_irqrestore(&chan->vc.lock, flags);
326 return desc;
327 }
328
329 return NULL;
330}
331
332static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
333 struct usb_dmac_desc *desc)
334{
335 unsigned long flags;
336
337 spin_lock_irqsave(&chan->vc.lock, flags);
338 list_move_tail(&desc->node, &chan->desc_freed);
339 spin_unlock_irqrestore(&chan->vc.lock, flags);
340}
341
342/* -----------------------------------------------------------------------------
343 * Stop and reset
344 */
345
346static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
347{
348 struct dma_chan *chan = &uchan->vc.chan;
349 struct usb_dmac *dmac = to_usb_dmac(chan->device);
350 int i;
351
352 /* Don't issue soft reset if any one of channels is busy */
353 for (i = 0; i < dmac->n_channels; ++i) {
354 if (usb_dmac_chan_is_busy(uchan))
355 return;
356 }
357
358 usb_dmac_write(dmac, USB_DMAOR, 0);
359 usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
360 udelay(100);
361 usb_dmac_write(dmac, USB_DMASWR, 0);
362 usb_dmac_write(dmac, USB_DMAOR, 1);
363}
364
365static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
366{
367 u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
368
369 chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
370 usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
371
372 usb_dmac_soft_reset(chan);
373}
374
375static void usb_dmac_stop(struct usb_dmac *dmac)
376{
377 usb_dmac_write(dmac, USB_DMAOR, 0);
378}
379
380/* -----------------------------------------------------------------------------
381 * DMA engine operations
382 */
383
384static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
385{
386 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
387 int ret;
388
389 while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
390 ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
391 GFP_KERNEL);
392 if (ret < 0) {
393 usb_dmac_desc_free(uchan);
394 return ret;
395 }
396 uchan->descs_allocated++;
397 }
398
399 return pm_runtime_get_sync(chan->device->dev);
400}
401
402static void usb_dmac_free_chan_resources(struct dma_chan *chan)
403{
404 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
405 unsigned long flags;
406
407 /* Protect against ISR */
408 spin_lock_irqsave(&uchan->vc.lock, flags);
409 usb_dmac_chan_halt(uchan);
410 spin_unlock_irqrestore(&uchan->vc.lock, flags);
411
412 usb_dmac_desc_free(uchan);
413 vchan_free_chan_resources(&uchan->vc);
414
415 pm_runtime_put(chan->device->dev);
416}
417
418static struct dma_async_tx_descriptor *
419usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
420 unsigned int sg_len, enum dma_transfer_direction dir,
421 unsigned long dma_flags, void *context)
422{
423 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
424 struct usb_dmac_desc *desc;
425 struct scatterlist *sg;
426 int i;
427
428 if (!sg_len) {
429 dev_warn(chan->device->dev,
430 "%s: bad parameter: len=%d\n", __func__, sg_len);
431 return NULL;
432 }
433
434 desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
435 if (!desc)
436 return NULL;
437
438 desc->direction = dir;
439 desc->sg_len = sg_len;
440 for_each_sg(sgl, sg, sg_len, i) {
441 desc->sg[i].mem_addr = sg_dma_address(sg);
442 desc->sg[i].size = sg_dma_len(sg);
443 }
444
445 return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
446}
447
448static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
449{
450 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
451 struct usb_dmac_desc *desc;
452 unsigned long flags;
453 LIST_HEAD(head);
454 LIST_HEAD(list);
455
456 spin_lock_irqsave(&uchan->vc.lock, flags);
457 usb_dmac_chan_halt(uchan);
458 vchan_get_all_descriptors(&uchan->vc, &head);
459 if (uchan->desc)
460 uchan->desc = NULL;
461 list_splice_init(&uchan->desc_got, &list);
462 list_for_each_entry(desc, &list, node)
463 list_move_tail(&desc->node, &uchan->desc_freed);
464 spin_unlock_irqrestore(&uchan->vc.lock, flags);
465 vchan_dma_desc_free_list(&uchan->vc, &head);
466
467 return 0;
468}
469
470static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
471 struct usb_dmac_desc *desc,
472 int sg_index)
473{
474 struct usb_dmac_sg *sg = desc->sg + sg_index;
475 u32 mem_addr = sg->mem_addr & 0xffffffff;
476 unsigned int residue = sg->size;
477
478 /*
479 * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
480 * has unsuited value to calculate.
481 */
482 if (desc->direction == DMA_DEV_TO_MEM)
483 residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
484 else
485 residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
486
487 return residue;
488}
489
490static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
491 dma_cookie_t cookie)
492{
493 struct usb_dmac_desc *desc;
494 u32 residue = 0;
495
496 list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
497 if (desc->done_cookie == cookie) {
498 residue = desc->residue;
499 break;
500 }
501 }
502
503 return residue;
504}
505
506static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
507 dma_cookie_t cookie)
508{
509 u32 residue = 0;
510 struct virt_dma_desc *vd;
511 struct usb_dmac_desc *desc = chan->desc;
512 int i;
513
514 if (!desc) {
515 vd = vchan_find_desc(&chan->vc, cookie);
516 if (!vd)
517 return 0;
518 desc = to_usb_dmac_desc(vd);
519 }
520
521 /* Compute the size of all usb_dmac_sg still to be transferred */
522 for (i = desc->sg_index + 1; i < desc->sg_len; i++)
523 residue += desc->sg[i].size;
524
525 /* Add the residue for the current sg */
526 residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
527
528 return residue;
529}
530
531static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
532 dma_cookie_t cookie,
533 struct dma_tx_state *txstate)
534{
535 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
536 enum dma_status status;
537 unsigned int residue = 0;
538 unsigned long flags;
539
540 status = dma_cookie_status(chan, cookie, txstate);
541 /* a client driver will get residue after DMA_COMPLETE */
542 if (!txstate)
543 return status;
544
545 spin_lock_irqsave(&uchan->vc.lock, flags);
546 if (status == DMA_COMPLETE)
547 residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
548 else
549 residue = usb_dmac_chan_get_residue(uchan, cookie);
550 spin_unlock_irqrestore(&uchan->vc.lock, flags);
551
552 dma_set_residue(txstate, residue);
553
554 return status;
555}
556
557static void usb_dmac_issue_pending(struct dma_chan *chan)
558{
559 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
560 unsigned long flags;
561
562 spin_lock_irqsave(&uchan->vc.lock, flags);
563 if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
564 usb_dmac_chan_start_desc(uchan);
565 spin_unlock_irqrestore(&uchan->vc.lock, flags);
566}
567
568static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
569{
570 struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
571 struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
572
573 usb_dmac_desc_put(chan, desc);
574}
575
576/* -----------------------------------------------------------------------------
577 * IRQ handling
578 */
579
580static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
581{
582 struct usb_dmac_desc *desc = chan->desc;
583
584 BUG_ON(!desc);
585
586 if (++desc->sg_index < desc->sg_len) {
587 usb_dmac_chan_start_sg(chan, desc->sg_index);
588 } else {
589 desc->residue = usb_dmac_get_current_residue(chan, desc,
590 desc->sg_index - 1);
591 desc->done_cookie = desc->vd.tx.cookie;
592 vchan_cookie_complete(&desc->vd);
593
594 /* Restart the next transfer if this driver has a next desc */
595 usb_dmac_chan_start_desc(chan);
596 }
597}
598
599static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
600{
601 struct usb_dmac_chan *chan = dev;
602 irqreturn_t ret = IRQ_NONE;
603 u32 mask = USB_DMACHCR_TE;
604 u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
605 u32 chcr;
606
607 spin_lock(&chan->vc.lock);
608
609 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
610 if (chcr & check_bits)
611 mask |= USB_DMACHCR_DE | check_bits;
612 if (chcr & USB_DMACHCR_NULL) {
613 /* An interruption of TE will happen after we set FTE */
614 mask |= USB_DMACHCR_NULL;
615 chcr |= USB_DMACHCR_FTE;
616 ret |= IRQ_HANDLED;
617 }
618 usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
619
620 if (chcr & check_bits) {
621 usb_dmac_isr_transfer_end(chan);
622 ret |= IRQ_HANDLED;
623 }
624
625 spin_unlock(&chan->vc.lock);
626
627 return ret;
628}
629
630/* -----------------------------------------------------------------------------
631 * OF xlate and channel filter
632 */
633
634static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
635{
636 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
637 struct of_phandle_args *dma_spec = arg;
638
639 if (dma_spec->np != chan->device->dev->of_node)
640 return false;
641
642 /* USB-DMAC should be used with fixed usb controller's FIFO */
643 if (uchan->index != dma_spec->args[0])
644 return false;
645
646 return true;
647}
648
649static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
650 struct of_dma *ofdma)
651{
652 struct usb_dmac_chan *uchan;
653 struct dma_chan *chan;
654 dma_cap_mask_t mask;
655
656 if (dma_spec->args_count != 1)
657 return NULL;
658
659 /* Only slave DMA channels can be allocated via DT */
660 dma_cap_zero(mask);
661 dma_cap_set(DMA_SLAVE, mask);
662
663 chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec);
664 if (!chan)
665 return NULL;
666
667 uchan = to_usb_dmac_chan(chan);
668
669 return chan;
670}
671
672/* -----------------------------------------------------------------------------
673 * Power management
674 */
675
676static int usb_dmac_runtime_suspend(struct device *dev)
677{
678 struct usb_dmac *dmac = dev_get_drvdata(dev);
679 int i;
680
681 for (i = 0; i < dmac->n_channels; ++i)
682 usb_dmac_chan_halt(&dmac->channels[i]);
683
684 return 0;
685}
686
687static int usb_dmac_runtime_resume(struct device *dev)
688{
689 struct usb_dmac *dmac = dev_get_drvdata(dev);
690
691 return usb_dmac_init(dmac);
692}
693
694static const struct dev_pm_ops usb_dmac_pm = {
695 SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
696 NULL)
697};
698
699/* -----------------------------------------------------------------------------
700 * Probe and remove
701 */
702
703static int usb_dmac_chan_probe(struct usb_dmac *dmac,
704 struct usb_dmac_chan *uchan,
705 unsigned int index)
706{
707 struct platform_device *pdev = to_platform_device(dmac->dev);
708 char pdev_irqname[5];
709 char *irqname;
710 int ret;
711
712 uchan->index = index;
713 uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
714
715 /* Request the channel interrupt. */
716 sprintf(pdev_irqname, "ch%u", index);
717 uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
718 if (uchan->irq < 0) {
719 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
720 return -ENODEV;
721 }
722
723 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
724 dev_name(dmac->dev), index);
725 if (!irqname)
726 return -ENOMEM;
727
728 ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
729 IRQF_SHARED, irqname, uchan);
730 if (ret) {
731 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
732 uchan->irq, ret);
733 return ret;
734 }
735
736 uchan->vc.desc_free = usb_dmac_virt_desc_free;
737 vchan_init(&uchan->vc, &dmac->engine);
738 INIT_LIST_HEAD(&uchan->desc_freed);
739 INIT_LIST_HEAD(&uchan->desc_got);
740
741 return 0;
742}
743
744static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
745{
746 struct device_node *np = dev->of_node;
747 int ret;
748
749 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
750 if (ret < 0) {
751 dev_err(dev, "unable to read dma-channels property\n");
752 return ret;
753 }
754
755 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
756 dev_err(dev, "invalid number of channels %u\n",
757 dmac->n_channels);
758 return -EINVAL;
759 }
760
761 return 0;
762}
763
764static int usb_dmac_probe(struct platform_device *pdev)
765{
766 const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
767 struct dma_device *engine;
768 struct usb_dmac *dmac;
769 struct resource *mem;
770 unsigned int i;
771 int ret;
772
773 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
774 if (!dmac)
775 return -ENOMEM;
776
777 dmac->dev = &pdev->dev;
778 platform_set_drvdata(pdev, dmac);
779
780 ret = usb_dmac_parse_of(&pdev->dev, dmac);
781 if (ret < 0)
782 return ret;
783
784 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
785 sizeof(*dmac->channels), GFP_KERNEL);
786 if (!dmac->channels)
787 return -ENOMEM;
788
789 /* Request resources. */
790 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
791 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
792 if (IS_ERR(dmac->iomem))
793 return PTR_ERR(dmac->iomem);
794
795 /* Enable runtime PM and initialize the device. */
796 pm_runtime_enable(&pdev->dev);
797 ret = pm_runtime_get_sync(&pdev->dev);
798 if (ret < 0) {
799 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
800 return ret;
801 }
802
803 ret = usb_dmac_init(dmac);
804 pm_runtime_put(&pdev->dev);
805
806 if (ret) {
807 dev_err(&pdev->dev, "failed to reset device\n");
808 goto error;
809 }
810
811 /* Initialize the channels. */
812 INIT_LIST_HEAD(&dmac->engine.channels);
813
814 for (i = 0; i < dmac->n_channels; ++i) {
815 ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
816 if (ret < 0)
817 goto error;
818 }
819
820 /* Register the DMAC as a DMA provider for DT. */
821 ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
822 NULL);
823 if (ret < 0)
824 goto error;
825
826 /*
827 * Register the DMA engine device.
828 *
829 * Default transfer size of 32 bytes requires 32-byte alignment.
830 */
831 engine = &dmac->engine;
832 dma_cap_set(DMA_SLAVE, engine->cap_mask);
833
834 engine->dev = &pdev->dev;
835
836 engine->src_addr_widths = widths;
837 engine->dst_addr_widths = widths;
838 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
839 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
840
841 engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
842 engine->device_free_chan_resources = usb_dmac_free_chan_resources;
843 engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
844 engine->device_terminate_all = usb_dmac_chan_terminate_all;
845 engine->device_tx_status = usb_dmac_tx_status;
846 engine->device_issue_pending = usb_dmac_issue_pending;
847
848 ret = dma_async_device_register(engine);
849 if (ret < 0)
850 goto error;
851
852 return 0;
853
854error:
855 of_dma_controller_free(pdev->dev.of_node);
856 pm_runtime_disable(&pdev->dev);
857 return ret;
858}
859
860static void usb_dmac_chan_remove(struct usb_dmac *dmac,
861 struct usb_dmac_chan *uchan)
862{
863 usb_dmac_chan_halt(uchan);
864 devm_free_irq(dmac->dev, uchan->irq, uchan);
865}
866
867static int usb_dmac_remove(struct platform_device *pdev)
868{
869 struct usb_dmac *dmac = platform_get_drvdata(pdev);
870 int i;
871
872 for (i = 0; i < dmac->n_channels; ++i)
873 usb_dmac_chan_remove(dmac, &dmac->channels[i]);
874 of_dma_controller_free(pdev->dev.of_node);
875 dma_async_device_unregister(&dmac->engine);
876
877 pm_runtime_disable(&pdev->dev);
878
879 return 0;
880}
881
882static void usb_dmac_shutdown(struct platform_device *pdev)
883{
884 struct usb_dmac *dmac = platform_get_drvdata(pdev);
885
886 usb_dmac_stop(dmac);
887}
888
889static const struct of_device_id usb_dmac_of_ids[] = {
890 { .compatible = "renesas,usb-dmac", },
891 { /* Sentinel */ }
892};
893MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
894
895static struct platform_driver usb_dmac_driver = {
896 .driver = {
897 .pm = &usb_dmac_pm,
898 .name = "usb-dmac",
899 .of_match_table = usb_dmac_of_ids,
900 },
901 .probe = usb_dmac_probe,
902 .remove = usb_dmac_remove,
903 .shutdown = usb_dmac_shutdown,
904};
905
906module_platform_driver(usb_dmac_driver);
907
908MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
909MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
910MODULE_LICENSE("GPL v2");