aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/s3c24xx-dma.c
diff options
context:
space:
mode:
authorHeiko Stuebner <heiko@sntech.de>2013-10-07 17:42:10 -0400
committerKukjin Kim <kgene.kim@samsung.com>2013-10-07 17:42:10 -0400
commitddeccb8d6b5bbe2c1e3a29f8c74b52f170c2207d (patch)
tree808f494a72787c87c2504f6b93c8cd165489bda2 /drivers/dma/s3c24xx-dma.c
parent0fa93b914d45ed8dd248bd621d4396e9f790817c (diff)
dmaengine: add driver for Samsung s3c24xx SoCs
This adds a new driver to support the s3c24xx dma using the dmaengine and makes the old one in mach-s3c24xx obsolete in the long run. Conceptually the s3c24xx-dma feels like a distant relative of the pl08x with numerous virtual channels being mapped to a lot less physical ones. The driver therefore borrows a lot from the amba-pl08x driver in this regard. Functionality-wise the driver gains a memcpy ability in addition to the slave_sg one. The driver supports both the method for requesting the peripheral used by SoCs before the S3C2443 and the different method for S3C2443 and later. On earlier SoCs the hardware channels usable for specific peripherals is constrainted while on later SoCs all channels can be used for any peripheral. Tested on a s3c2416-based board, memcpy using the dmatest module and slave_sg partially using the spi-s3c64xx driver. Signed-off-by: Heiko Stuebner <heiko@sntech.de> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Vinod Koul <vinod.koul@intel.com> Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
Diffstat (limited to 'drivers/dma/s3c24xx-dma.c')
-rw-r--r--drivers/dma/s3c24xx-dma.c1340
1 files changed, 1340 insertions, 0 deletions
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
new file mode 100644
index 000000000000..56c92538bc4c
--- /dev/null
+++ b/drivers/dma/s3c24xx-dma.c
@@ -0,0 +1,1340 @@
1/*
2 * S3C24XX DMA handling
3 *
4 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
5 *
6 * based on amba-pl08x.c
7 *
8 * Copyright (c) 2006 ARM Ltd.
9 * Copyright (c) 2010 ST-Ericsson SA
10 *
11 * Author: Peter Pearse <peter.pearse@arm.com>
12 * Author: Linus Walleij <linus.walleij@stericsson.com>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
20 * that can be routed to any of the 4 to 8 hardware-channels.
21 *
22 * Therefore on these DMA controllers the number of channels
23 * and the number of incoming DMA signals are two totally different things.
24 * It is usually not possible to theoretically handle all physical signals,
25 * so a multiplexing scheme with possible denial of use is necessary.
26 *
27 * Open items:
28 * - bursts
29 */
30
31#include <linux/platform_device.h>
32#include <linux/types.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/interrupt.h>
36#include <linux/clk.h>
37#include <linux/module.h>
38#include <linux/slab.h>
39#include <linux/platform_data/dma-s3c24xx.h>
40
41#include "dmaengine.h"
42#include "virt-dma.h"
43
44#define MAX_DMA_CHANNELS 8
45
46#define S3C24XX_DISRC 0x00
47#define S3C24XX_DISRCC 0x04
48#define S3C24XX_DISRCC_INC_INCREMENT 0
49#define S3C24XX_DISRCC_INC_FIXED BIT(0)
50#define S3C24XX_DISRCC_LOC_AHB 0
51#define S3C24XX_DISRCC_LOC_APB BIT(1)
52
53#define S3C24XX_DIDST 0x08
54#define S3C24XX_DIDSTC 0x0c
55#define S3C24XX_DIDSTC_INC_INCREMENT 0
56#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
57#define S3C24XX_DIDSTC_LOC_AHB 0
58#define S3C24XX_DIDSTC_LOC_APB BIT(1)
59#define S3C24XX_DIDSTC_INT_TC0 0
60#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
61
62#define S3C24XX_DCON 0x10
63
64#define S3C24XX_DCON_TC_MASK 0xfffff
65#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
66#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
67#define S3C24XX_DCON_DSZ_WORD (2 << 20)
68#define S3C24XX_DCON_DSZ_MASK (3 << 20)
69#define S3C24XX_DCON_DSZ_SHIFT 20
70#define S3C24XX_DCON_AUTORELOAD 0
71#define S3C24XX_DCON_NORELOAD BIT(22)
72#define S3C24XX_DCON_HWTRIG BIT(23)
73#define S3C24XX_DCON_HWSRC_SHIFT 24
74#define S3C24XX_DCON_SERV_SINGLE 0
75#define S3C24XX_DCON_SERV_WHOLE BIT(27)
76#define S3C24XX_DCON_TSZ_UNIT 0
77#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
78#define S3C24XX_DCON_INT BIT(29)
79#define S3C24XX_DCON_SYNC_PCLK 0
80#define S3C24XX_DCON_SYNC_HCLK BIT(30)
81#define S3C24XX_DCON_DEMAND 0
82#define S3C24XX_DCON_HANDSHAKE BIT(31)
83
84#define S3C24XX_DSTAT 0x14
85#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
86#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
87
88#define S3C24XX_DMASKTRIG 0x20
89#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
90#define S3C24XX_DMASKTRIG_ON BIT(1)
91#define S3C24XX_DMASKTRIG_STOP BIT(2)
92
93#define S3C24XX_DMAREQSEL 0x24
94#define S3C24XX_DMAREQSEL_HW BIT(0)
95
96/*
97 * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
98 * for a DMA source. Instead only specific channels are valid.
99 * All of these SoCs have 4 physical channels and the number of request
100 * source bits is 3. Additionally we also need 1 bit to mark the channel
101 * as valid.
102 * Therefore we separate the chansel element of the channel data into 4
103 * parts of 4 bits each, to hold the information if the channel is valid
104 * and the hw request source to use.
105 *
106 * Example:
107 * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
108 * For it the chansel field would look like
109 *
110 * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
111 * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
112 * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
113 */
114#define S3C24XX_CHANSEL_WIDTH 4
115#define S3C24XX_CHANSEL_VALID BIT(3)
116#define S3C24XX_CHANSEL_REQ_MASK 7
117
118/*
119 * struct soc_data - vendor-specific config parameters for individual SoCs
120 * @stride: spacing between the registers of each channel
121 * @has_reqsel: does the controller use the newer requestselection mechanism
122 * @has_clocks: are controllable dma-clocks present
123 */
124struct soc_data {
125 int stride;
126 bool has_reqsel;
127 bool has_clocks;
128};
129
130/*
131 * enum s3c24xx_dma_chan_state - holds the virtual channel states
132 * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
133 * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
134 * channel and is running a transfer on it
135 * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
136 * channel to become available (only pertains to memcpy channels)
137 */
138enum s3c24xx_dma_chan_state {
139 S3C24XX_DMA_CHAN_IDLE,
140 S3C24XX_DMA_CHAN_RUNNING,
141 S3C24XX_DMA_CHAN_WAITING,
142};
143
144/*
145 * struct s3c24xx_sg - structure containing data per sg
146 * @src_addr: src address of sg
147 * @dst_addr: dst address of sg
148 * @len: transfer len in bytes
149 * @node: node for txd's dsg_list
150 */
151struct s3c24xx_sg {
152 dma_addr_t src_addr;
153 dma_addr_t dst_addr;
154 size_t len;
155 struct list_head node;
156};
157
158/*
159 * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
160 * @vd: virtual DMA descriptor
161 * @dsg_list: list of children sg's
162 * @at: sg currently being transfered
163 * @width: transfer width
164 * @disrcc: value for source control register
165 * @didstc: value for destination control register
166 * @dcon: base value for dcon register
167 */
168struct s3c24xx_txd {
169 struct virt_dma_desc vd;
170 struct list_head dsg_list;
171 struct list_head *at;
172 u8 width;
173 u32 disrcc;
174 u32 didstc;
175 u32 dcon;
176};
177
178struct s3c24xx_dma_chan;
179
180/*
181 * struct s3c24xx_dma_phy - holder for the physical channels
182 * @id: physical index to this channel
183 * @valid: does the channel have all required elements
184 * @base: virtual memory base (remapped) for the this channel
185 * @irq: interrupt for this channel
186 * @clk: clock for this channel
187 * @lock: a lock to use when altering an instance of this struct
188 * @serving: virtual channel currently being served by this physicalchannel
189 * @host: a pointer to the host (internal use)
190 */
191struct s3c24xx_dma_phy {
192 unsigned int id;
193 bool valid;
194 void __iomem *base;
195 unsigned int irq;
196 struct clk *clk;
197 spinlock_t lock;
198 struct s3c24xx_dma_chan *serving;
199 struct s3c24xx_dma_engine *host;
200};
201
202/*
203 * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
204 * @id: the id of the channel
205 * @name: name of the channel
206 * @vc: wrappped virtual channel
207 * @phy: the physical channel utilized by this channel, if there is one
208 * @runtime_addr: address for RX/TX according to the runtime config
209 * @at: active transaction on this channel
210 * @lock: a lock for this channel data
211 * @host: a pointer to the host (internal use)
212 * @state: whether the channel is idle, running etc
213 * @slave: whether this channel is a device (slave) or for memcpy
214 */
215struct s3c24xx_dma_chan {
216 int id;
217 const char *name;
218 struct virt_dma_chan vc;
219 struct s3c24xx_dma_phy *phy;
220 struct dma_slave_config cfg;
221 struct s3c24xx_txd *at;
222 struct s3c24xx_dma_engine *host;
223 enum s3c24xx_dma_chan_state state;
224 bool slave;
225};
226
227/*
228 * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
229 * @pdev: the corresponding platform device
230 * @pdata: platform data passed in from the platform/machine
231 * @base: virtual memory base (remapped)
232 * @slave: slave engine for this instance
233 * @memcpy: memcpy engine for this instance
234 * @phy_chans: array of data for the physical channels
235 */
236struct s3c24xx_dma_engine {
237 struct platform_device *pdev;
238 const struct s3c24xx_dma_platdata *pdata;
239 struct soc_data *sdata;
240 void __iomem *base;
241 struct dma_device slave;
242 struct dma_device memcpy;
243 struct s3c24xx_dma_phy *phy_chans;
244};
245
246/*
247 * Physical channel handling
248 */
249
250/*
251 * Check whether a certain channel is busy or not.
252 */
253static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
254{
255 unsigned int val = readl(phy->base + S3C24XX_DSTAT);
256 return val & S3C24XX_DSTAT_STAT_BUSY;
257}
258
259static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
260 struct s3c24xx_dma_phy *phy)
261{
262 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
263 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
264 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
265 int phyvalid;
266
267 /* every phy is valid for memcopy channels */
268 if (!s3cchan->slave)
269 return true;
270
271 /* On newer variants all phys can be used for all virtual channels */
272 if (s3cdma->sdata->has_reqsel)
273 return true;
274
275 phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
276 return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
277}
278
279/*
280 * Allocate a physical channel for a virtual channel
281 *
282 * Try to locate a physical channel to be used for this transfer. If all
283 * are taken return NULL and the requester will have to cope by using
284 * some fallback PIO mode or retrying later.
285 */
286static
287struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
288{
289 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
290 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
291 struct s3c24xx_dma_channel *cdata;
292 struct s3c24xx_dma_phy *phy = NULL;
293 unsigned long flags;
294 int i;
295 int ret;
296
297 if (s3cchan->slave)
298 cdata = &pdata->channels[s3cchan->id];
299
300 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
301 phy = &s3cdma->phy_chans[i];
302
303 if (!phy->valid)
304 continue;
305
306 if (!s3c24xx_dma_phy_valid(s3cchan, phy))
307 continue;
308
309 spin_lock_irqsave(&phy->lock, flags);
310
311 if (!phy->serving) {
312 phy->serving = s3cchan;
313 spin_unlock_irqrestore(&phy->lock, flags);
314 break;
315 }
316
317 spin_unlock_irqrestore(&phy->lock, flags);
318 }
319
320 /* No physical channel available, cope with it */
321 if (i == s3cdma->pdata->num_phy_channels) {
322 dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
323 return NULL;
324 }
325
326 /* start the phy clock */
327 if (s3cdma->sdata->has_clocks) {
328 ret = clk_enable(phy->clk);
329 if (ret) {
330 dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
331 phy->id, ret);
332 phy->serving = NULL;
333 return NULL;
334 }
335 }
336
337 return phy;
338}
339
340/*
341 * Mark the physical channel as free.
342 *
343 * This drops the link between the physical and virtual channel.
344 */
345static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
346{
347 struct s3c24xx_dma_engine *s3cdma = phy->host;
348
349 if (s3cdma->sdata->has_clocks)
350 clk_disable(phy->clk);
351
352 phy->serving = NULL;
353}
354
355/*
356 * Stops the channel by writing the stop bit.
357 * This should not be used for an on-going transfer, but as a method of
358 * shutting down a channel (eg, when it's no longer used) or terminating a
359 * transfer.
360 */
361static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
362{
363 writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
364}
365
366/*
367 * Virtual channel handling
368 */
369
370static inline
371struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
372{
373 return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
374}
375
376static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
377{
378 struct s3c24xx_dma_phy *phy = s3cchan->phy;
379 struct s3c24xx_txd *txd = s3cchan->at;
380 u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
381
382 return tc * txd->width;
383}
384
385static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
386 struct dma_slave_config *config)
387{
388 if (!s3cchan->slave)
389 return -EINVAL;
390
391 /* Reject definitely invalid configurations */
392 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
393 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
394 return -EINVAL;
395
396 s3cchan->cfg = *config;
397
398 return 0;
399}
400
401/*
402 * Transfer handling
403 */
404
405static inline
406struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
407{
408 return container_of(tx, struct s3c24xx_txd, vd.tx);
409}
410
411static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
412{
413 struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
414
415 if (txd) {
416 INIT_LIST_HEAD(&txd->dsg_list);
417 txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
418 }
419
420 return txd;
421}
422
423static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
424{
425 struct s3c24xx_sg *dsg, *_dsg;
426
427 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
428 list_del(&dsg->node);
429 kfree(dsg);
430 }
431
432 kfree(txd);
433}
434
435static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
436 struct s3c24xx_txd *txd)
437{
438 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
439 struct s3c24xx_dma_phy *phy = s3cchan->phy;
440 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
441 struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
442 u32 dcon = txd->dcon;
443 u32 val;
444
445 /* transfer-size and -count from len and width */
446 switch (txd->width) {
447 case 1:
448 dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
449 break;
450 case 2:
451 dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
452 break;
453 case 4:
454 dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
455 break;
456 }
457
458 if (s3cchan->slave) {
459 struct s3c24xx_dma_channel *cdata =
460 &pdata->channels[s3cchan->id];
461
462 if (s3cdma->sdata->has_reqsel) {
463 writel_relaxed((cdata->chansel << 1) |
464 S3C24XX_DMAREQSEL_HW,
465 phy->base + S3C24XX_DMAREQSEL);
466 } else {
467 int csel = cdata->chansel >> (phy->id *
468 S3C24XX_CHANSEL_WIDTH);
469
470 csel &= S3C24XX_CHANSEL_REQ_MASK;
471 dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
472 dcon |= S3C24XX_DCON_HWTRIG;
473 }
474 } else {
475 if (s3cdma->sdata->has_reqsel)
476 writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
477 }
478
479 writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
480 writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
481 writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
482 writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
483 writel_relaxed(dcon, phy->base + S3C24XX_DCON);
484
485 val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
486 val &= ~S3C24XX_DMASKTRIG_STOP;
487 val |= S3C24XX_DMASKTRIG_ON;
488
489 /* trigger the dma operation for memcpy transfers */
490 if (!s3cchan->slave)
491 val |= S3C24XX_DMASKTRIG_SWTRIG;
492
493 writel(val, phy->base + S3C24XX_DMASKTRIG);
494}
495
496/*
497 * Set the initial DMA register values and start first sg.
498 */
499static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
500{
501 struct s3c24xx_dma_phy *phy = s3cchan->phy;
502 struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
503 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
504
505 list_del(&txd->vd.node);
506
507 s3cchan->at = txd;
508
509 /* Wait for channel inactive */
510 while (s3c24xx_dma_phy_busy(phy))
511 cpu_relax();
512
513 /* point to the first element of the sg list */
514 txd->at = txd->dsg_list.next;
515 s3c24xx_dma_start_next_sg(s3cchan, txd);
516}
517
518static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
519 struct s3c24xx_dma_chan *s3cchan)
520{
521 LIST_HEAD(head);
522
523 vchan_get_all_descriptors(&s3cchan->vc, &head);
524 vchan_dma_desc_free_list(&s3cchan->vc, &head);
525}
526
527/*
528 * Try to allocate a physical channel. When successful, assign it to
529 * this virtual channel, and initiate the next descriptor. The
530 * virtual channel lock must be held at this point.
531 */
532static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
533{
534 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
535 struct s3c24xx_dma_phy *phy;
536
537 phy = s3c24xx_dma_get_phy(s3cchan);
538 if (!phy) {
539 dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
540 s3cchan->name);
541 s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
542 return;
543 }
544
545 dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
546 phy->id, s3cchan->name);
547
548 s3cchan->phy = phy;
549 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
550
551 s3c24xx_dma_start_next_txd(s3cchan);
552}
553
554static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
555 struct s3c24xx_dma_chan *s3cchan)
556{
557 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
558
559 dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
560 phy->id, s3cchan->name);
561
562 /*
563 * We do this without taking the lock; we're really only concerned
564 * about whether this pointer is NULL or not, and we're guaranteed
565 * that this will only be called when it _already_ is non-NULL.
566 */
567 phy->serving = s3cchan;
568 s3cchan->phy = phy;
569 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
570 s3c24xx_dma_start_next_txd(s3cchan);
571}
572
573/*
574 * Free a physical DMA channel, potentially reallocating it to another
575 * virtual channel if we have any pending.
576 */
577static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
578{
579 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
580 struct s3c24xx_dma_chan *p, *next;
581
582retry:
583 next = NULL;
584
585 /* Find a waiting virtual channel for the next transfer. */
586 list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
587 if (p->state == S3C24XX_DMA_CHAN_WAITING) {
588 next = p;
589 break;
590 }
591
592 if (!next) {
593 list_for_each_entry(p, &s3cdma->slave.channels,
594 vc.chan.device_node)
595 if (p->state == S3C24XX_DMA_CHAN_WAITING &&
596 s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
597 next = p;
598 break;
599 }
600 }
601
602 /* Ensure that the physical channel is stopped */
603 s3c24xx_dma_terminate_phy(s3cchan->phy);
604
605 if (next) {
606 bool success;
607
608 /*
609 * Eww. We know this isn't going to deadlock
610 * but lockdep probably doesn't.
611 */
612 spin_lock(&next->vc.lock);
613 /* Re-check the state now that we have the lock */
614 success = next->state == S3C24XX_DMA_CHAN_WAITING;
615 if (success)
616 s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
617 spin_unlock(&next->vc.lock);
618
619 /* If the state changed, try to find another channel */
620 if (!success)
621 goto retry;
622 } else {
623 /* No more jobs, so free up the physical channel */
624 s3c24xx_dma_put_phy(s3cchan->phy);
625 }
626
627 s3cchan->phy = NULL;
628 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
629}
630
631static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
632{
633 struct device *dev = txd->vd.tx.chan->device->dev;
634 struct s3c24xx_sg *dsg;
635
636 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
637 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
638 list_for_each_entry(dsg, &txd->dsg_list, node)
639 dma_unmap_single(dev, dsg->src_addr, dsg->len,
640 DMA_TO_DEVICE);
641 else {
642 list_for_each_entry(dsg, &txd->dsg_list, node)
643 dma_unmap_page(dev, dsg->src_addr, dsg->len,
644 DMA_TO_DEVICE);
645 }
646 }
647
648 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
649 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
650 list_for_each_entry(dsg, &txd->dsg_list, node)
651 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
652 DMA_FROM_DEVICE);
653 else
654 list_for_each_entry(dsg, &txd->dsg_list, node)
655 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
656 DMA_FROM_DEVICE);
657 }
658}
659
660static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
661{
662 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
663 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
664
665 if (!s3cchan->slave)
666 s3c24xx_dma_unmap_buffers(txd);
667
668 s3c24xx_dma_free_txd(txd);
669}
670
671static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
672{
673 struct s3c24xx_dma_phy *phy = data;
674 struct s3c24xx_dma_chan *s3cchan = phy->serving;
675 struct s3c24xx_txd *txd;
676
677 dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
678
679 /*
680 * Interrupts happen to notify the completion of a transfer and the
681 * channel should have moved into its stop state already on its own.
682 * Therefore interrupts on channels not bound to a virtual channel
683 * should never happen. Nevertheless send a terminate command to the
684 * channel if the unlikely case happens.
685 */
686 if (unlikely(!s3cchan)) {
687 dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
688 phy->id);
689
690 s3c24xx_dma_terminate_phy(phy);
691
692 return IRQ_HANDLED;
693 }
694
695 spin_lock(&s3cchan->vc.lock);
696 txd = s3cchan->at;
697 if (txd) {
698 /* when more sg's are in this txd, start the next one */
699 if (!list_is_last(txd->at, &txd->dsg_list)) {
700 txd->at = txd->at->next;
701 s3c24xx_dma_start_next_sg(s3cchan, txd);
702 } else {
703 s3cchan->at = NULL;
704 vchan_cookie_complete(&txd->vd);
705
706 /*
707 * And start the next descriptor (if any),
708 * otherwise free this channel.
709 */
710 if (vchan_next_desc(&s3cchan->vc))
711 s3c24xx_dma_start_next_txd(s3cchan);
712 else
713 s3c24xx_dma_phy_free(s3cchan);
714 }
715 }
716 spin_unlock(&s3cchan->vc.lock);
717
718 return IRQ_HANDLED;
719}
720
721/*
722 * The DMA ENGINE API
723 */
724
725static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
726 unsigned long arg)
727{
728 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
729 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
730 unsigned long flags;
731 int ret = 0;
732
733 spin_lock_irqsave(&s3cchan->vc.lock, flags);
734
735 switch (cmd) {
736 case DMA_SLAVE_CONFIG:
737 ret = s3c24xx_dma_set_runtime_config(s3cchan,
738 (struct dma_slave_config *)arg);
739 break;
740 case DMA_TERMINATE_ALL:
741 if (!s3cchan->phy && !s3cchan->at) {
742 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
743 s3cchan->id);
744 ret = -EINVAL;
745 break;
746 }
747
748 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
749
750 /* Mark physical channel as free */
751 if (s3cchan->phy)
752 s3c24xx_dma_phy_free(s3cchan);
753
754 /* Dequeue current job */
755 if (s3cchan->at) {
756 s3c24xx_dma_desc_free(&s3cchan->at->vd);
757 s3cchan->at = NULL;
758 }
759
760 /* Dequeue jobs not yet fired as well */
761 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
762 break;
763 default:
764 /* Unknown command */
765 ret = -ENXIO;
766 break;
767 }
768
769 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
770
771 return ret;
772}
773
774static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
775{
776 return 0;
777}
778
779static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
780{
781 /* Ensure all queued descriptors are freed */
782 vchan_free_chan_resources(to_virt_chan(chan));
783}
784
785static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
786 dma_cookie_t cookie, struct dma_tx_state *txstate)
787{
788 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
789 struct s3c24xx_txd *txd;
790 struct s3c24xx_sg *dsg;
791 struct virt_dma_desc *vd;
792 unsigned long flags;
793 enum dma_status ret;
794 size_t bytes = 0;
795
796 spin_lock_irqsave(&s3cchan->vc.lock, flags);
797 ret = dma_cookie_status(chan, cookie, txstate);
798 if (ret == DMA_SUCCESS) {
799 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
800 return ret;
801 }
802
803 /*
804 * There's no point calculating the residue if there's
805 * no txstate to store the value.
806 */
807 if (!txstate) {
808 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
809 return ret;
810 }
811
812 vd = vchan_find_desc(&s3cchan->vc, cookie);
813 if (vd) {
814 /* On the issued list, so hasn't been processed yet */
815 txd = to_s3c24xx_txd(&vd->tx);
816
817 list_for_each_entry(dsg, &txd->dsg_list, node)
818 bytes += dsg->len;
819 } else {
820 /*
821 * Currently running, so sum over the pending sg's and
822 * the currently active one.
823 */
824 txd = s3cchan->at;
825
826 dsg = list_entry(txd->at, struct s3c24xx_sg, node);
827 list_for_each_entry_from(dsg, &txd->dsg_list, node)
828 bytes += dsg->len;
829
830 bytes += s3c24xx_dma_getbytes_chan(s3cchan);
831 }
832 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
833
834 /*
835 * This cookie not complete yet
836 * Get number of bytes left in the active transactions and queue
837 */
838 dma_set_residue(txstate, bytes);
839
840 /* Whether waiting or running, we're in progress */
841 return ret;
842}
843
844/*
845 * Initialize a descriptor to be used by memcpy submit
846 */
847static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
848 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
849 size_t len, unsigned long flags)
850{
851 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
852 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
853 struct s3c24xx_txd *txd;
854 struct s3c24xx_sg *dsg;
855 int src_mod, dest_mod;
856
857 dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
858 len, s3cchan->name);
859
860 if ((len & S3C24XX_DCON_TC_MASK) != len) {
861 dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
862 return NULL;
863 }
864
865 txd = s3c24xx_dma_get_txd();
866 if (!txd)
867 return NULL;
868
869 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
870 if (!dsg) {
871 s3c24xx_dma_free_txd(txd);
872 return NULL;
873 }
874 list_add_tail(&dsg->node, &txd->dsg_list);
875
876 dsg->src_addr = src;
877 dsg->dst_addr = dest;
878 dsg->len = len;
879
880 /*
881 * Determine a suitable transfer width.
882 * The DMA controller cannot fetch/store information which is not
883 * naturally aligned on the bus, i.e., a 4 byte fetch must start at
884 * an address divisible by 4 - more generally addr % width must be 0.
885 */
886 src_mod = src % 4;
887 dest_mod = dest % 4;
888 switch (len % 4) {
889 case 0:
890 txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
891 break;
892 case 2:
893 txd->width = ((src_mod == 2 || src_mod == 0) &&
894 (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
895 break;
896 default:
897 txd->width = 1;
898 break;
899 }
900
901 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
902 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
903 txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
904 S3C24XX_DCON_SERV_WHOLE;
905
906 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
907}
908
909static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
910 struct dma_chan *chan, struct scatterlist *sgl,
911 unsigned int sg_len, enum dma_transfer_direction direction,
912 unsigned long flags, void *context)
913{
914 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
915 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
916 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
917 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
918 struct s3c24xx_txd *txd;
919 struct s3c24xx_sg *dsg;
920 struct scatterlist *sg;
921 dma_addr_t slave_addr;
922 u32 hwcfg = 0;
923 int tmp;
924
925 dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
926 sg_dma_len(sgl), s3cchan->name);
927
928 txd = s3c24xx_dma_get_txd();
929 if (!txd)
930 return NULL;
931
932 if (cdata->handshake)
933 txd->dcon |= S3C24XX_DCON_HANDSHAKE;
934
935 switch (cdata->bus) {
936 case S3C24XX_DMA_APB:
937 txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
938 hwcfg |= S3C24XX_DISRCC_LOC_APB;
939 break;
940 case S3C24XX_DMA_AHB:
941 txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
942 hwcfg |= S3C24XX_DISRCC_LOC_AHB;
943 break;
944 }
945
946 /*
947 * Always assume our peripheral desintation is a fixed
948 * address in memory.
949 */
950 hwcfg |= S3C24XX_DISRCC_INC_FIXED;
951
952 /*
953 * Individual dma operations are requested by the slave,
954 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
955 */
956 txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
957
958 if (direction == DMA_MEM_TO_DEV) {
959 txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
960 S3C24XX_DISRCC_INC_INCREMENT;
961 txd->didstc = hwcfg;
962 slave_addr = s3cchan->cfg.dst_addr;
963 txd->width = s3cchan->cfg.dst_addr_width;
964 } else if (direction == DMA_DEV_TO_MEM) {
965 txd->disrcc = hwcfg;
966 txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
967 S3C24XX_DIDSTC_INC_INCREMENT;
968 slave_addr = s3cchan->cfg.src_addr;
969 txd->width = s3cchan->cfg.src_addr_width;
970 } else {
971 s3c24xx_dma_free_txd(txd);
972 dev_err(&s3cdma->pdev->dev,
973 "direction %d unsupported\n", direction);
974 return NULL;
975 }
976
977 for_each_sg(sgl, sg, sg_len, tmp) {
978 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
979 if (!dsg) {
980 s3c24xx_dma_free_txd(txd);
981 return NULL;
982 }
983 list_add_tail(&dsg->node, &txd->dsg_list);
984
985 dsg->len = sg_dma_len(sg);
986 if (direction == DMA_MEM_TO_DEV) {
987 dsg->src_addr = sg_dma_address(sg);
988 dsg->dst_addr = slave_addr;
989 } else { /* DMA_DEV_TO_MEM */
990 dsg->src_addr = slave_addr;
991 dsg->dst_addr = sg_dma_address(sg);
992 }
993 break;
994 }
995
996 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
997}
998
999/*
1000 * Slave transactions callback to the slave device to allow
1001 * synchronization of slave DMA signals with the DMAC enable
1002 */
1003static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
1004{
1005 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
1006 unsigned long flags;
1007
1008 spin_lock_irqsave(&s3cchan->vc.lock, flags);
1009 if (vchan_issue_pending(&s3cchan->vc)) {
1010 if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
1011 s3c24xx_dma_phy_alloc_and_start(s3cchan);
1012 }
1013 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
1014}
1015
1016/*
1017 * Bringup and teardown
1018 */
1019
1020/*
1021 * Initialise the DMAC memcpy/slave channels.
1022 * Make a local wrapper to hold required data
1023 */
1024static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
1025 struct dma_device *dmadev, unsigned int channels, bool slave)
1026{
1027 struct s3c24xx_dma_chan *chan;
1028 int i;
1029
1030 INIT_LIST_HEAD(&dmadev->channels);
1031
1032 /*
1033 * Register as many many memcpy as we have physical channels,
1034 * we won't always be able to use all but the code will have
1035 * to cope with that situation.
1036 */
1037 for (i = 0; i < channels; i++) {
1038 chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
1039 if (!chan) {
1040 dev_err(dmadev->dev,
1041 "%s no memory for channel\n", __func__);
1042 return -ENOMEM;
1043 }
1044
1045 chan->id = i;
1046 chan->host = s3cdma;
1047 chan->state = S3C24XX_DMA_CHAN_IDLE;
1048
1049 if (slave) {
1050 chan->slave = true;
1051 chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
1052 if (!chan->name)
1053 return -ENOMEM;
1054 } else {
1055 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1056 if (!chan->name)
1057 return -ENOMEM;
1058 }
1059 dev_dbg(dmadev->dev,
1060 "initialize virtual channel \"%s\"\n",
1061 chan->name);
1062
1063 chan->vc.desc_free = s3c24xx_dma_desc_free;
1064 vchan_init(&chan->vc, dmadev);
1065 }
1066 dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
1067 i, slave ? "slave" : "memcpy");
1068 return i;
1069}
1070
1071static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
1072{
1073 struct s3c24xx_dma_chan *chan = NULL;
1074 struct s3c24xx_dma_chan *next;
1075
1076 list_for_each_entry_safe(chan,
1077 next, &dmadev->channels, vc.chan.device_node)
1078 list_del(&chan->vc.chan.device_node);
1079}
1080
1081/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
1082static struct soc_data soc_s3c2412 = {
1083 .stride = 0x40,
1084 .has_reqsel = true,
1085 .has_clocks = true,
1086};
1087
1088/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
1089static struct soc_data soc_s3c2443 = {
1090 .stride = 0x100,
1091 .has_reqsel = true,
1092 .has_clocks = true,
1093};
1094
1095static struct platform_device_id s3c24xx_dma_driver_ids[] = {
1096 {
1097 .name = "s3c2412-dma",
1098 .driver_data = (kernel_ulong_t)&soc_s3c2412,
1099 }, {
1100 .name = "s3c2443-dma",
1101 .driver_data = (kernel_ulong_t)&soc_s3c2443,
1102 },
1103 { },
1104};
1105
1106static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
1107{
1108 return (struct soc_data *)
1109 platform_get_device_id(pdev)->driver_data;
1110}
1111
1112static int s3c24xx_dma_probe(struct platform_device *pdev)
1113{
1114 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1115 struct s3c24xx_dma_engine *s3cdma;
1116 struct soc_data *sdata;
1117 struct resource *res;
1118 int ret;
1119 int i;
1120
1121 if (!pdata) {
1122 dev_err(&pdev->dev, "platform data missing\n");
1123 return -ENODEV;
1124 }
1125
1126 /* Basic sanity check */
1127 if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
1128 dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
1129 pdata->num_phy_channels, MAX_DMA_CHANNELS);
1130 return -EINVAL;
1131 }
1132
1133 sdata = s3c24xx_dma_get_soc_data(pdev);
1134 if (!sdata)
1135 return -EINVAL;
1136
1137 s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
1138 if (!s3cdma)
1139 return -ENOMEM;
1140
1141 s3cdma->pdev = pdev;
1142 s3cdma->pdata = pdata;
1143 s3cdma->sdata = sdata;
1144
1145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1146 s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
1147 if (IS_ERR(s3cdma->base))
1148 return PTR_ERR(s3cdma->base);
1149
1150 s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
1151 sizeof(struct s3c24xx_dma_phy) *
1152 pdata->num_phy_channels,
1153 GFP_KERNEL);
1154 if (!s3cdma->phy_chans)
1155 return -ENOMEM;
1156
1157 /* aquire irqs and clocks for all physical channels */
1158 for (i = 0; i < pdata->num_phy_channels; i++) {
1159 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1160 char clk_name[6];
1161
1162 phy->id = i;
1163 phy->base = s3cdma->base + (i * sdata->stride);
1164 phy->host = s3cdma;
1165
1166 phy->irq = platform_get_irq(pdev, i);
1167 if (phy->irq < 0) {
1168 dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
1169 i, phy->irq);
1170 continue;
1171 }
1172
1173 ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
1174 0, pdev->name, phy);
1175 if (ret) {
1176 dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
1177 i, ret);
1178 continue;
1179 }
1180
1181 if (sdata->has_clocks) {
1182 sprintf(clk_name, "dma.%d", i);
1183 phy->clk = devm_clk_get(&pdev->dev, clk_name);
1184 if (IS_ERR(phy->clk) && sdata->has_clocks) {
1185 dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
1186 i, PTR_ERR(phy->clk));
1187 continue;
1188 }
1189
1190 ret = clk_prepare(phy->clk);
1191 if (ret) {
1192 dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
1193 i, ret);
1194 continue;
1195 }
1196 }
1197
1198 spin_lock_init(&phy->lock);
1199 phy->valid = true;
1200
1201 dev_dbg(&pdev->dev, "physical channel %d is %s\n",
1202 i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
1203 }
1204
1205 /* Initialize memcpy engine */
1206 dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
1207 dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
1208 s3cdma->memcpy.dev = &pdev->dev;
1209 s3cdma->memcpy.device_alloc_chan_resources =
1210 s3c24xx_dma_alloc_chan_resources;
1211 s3cdma->memcpy.device_free_chan_resources =
1212 s3c24xx_dma_free_chan_resources;
1213 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1214 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1215 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1216 s3cdma->memcpy.device_control = s3c24xx_dma_control;
1217
1218 /* Initialize slave engine for SoC internal dedicated peripherals */
1219 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
1220 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
1221 s3cdma->slave.dev = &pdev->dev;
1222 s3cdma->slave.device_alloc_chan_resources =
1223 s3c24xx_dma_alloc_chan_resources;
1224 s3cdma->slave.device_free_chan_resources =
1225 s3c24xx_dma_free_chan_resources;
1226 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
1227 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1228 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1229 s3cdma->slave.device_control = s3c24xx_dma_control;
1230
1231 /* Register as many memcpy channels as there are physical channels */
1232 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
1233 pdata->num_phy_channels, false);
1234 if (ret <= 0) {
1235 dev_warn(&pdev->dev,
1236 "%s failed to enumerate memcpy channels - %d\n",
1237 __func__, ret);
1238 goto err_memcpy;
1239 }
1240
1241 /* Register slave channels */
1242 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
1243 pdata->num_channels, true);
1244 if (ret <= 0) {
1245 dev_warn(&pdev->dev,
1246 "%s failed to enumerate slave channels - %d\n",
1247 __func__, ret);
1248 goto err_slave;
1249 }
1250
1251 ret = dma_async_device_register(&s3cdma->memcpy);
1252 if (ret) {
1253 dev_warn(&pdev->dev,
1254 "%s failed to register memcpy as an async device - %d\n",
1255 __func__, ret);
1256 goto err_memcpy_reg;
1257 }
1258
1259 ret = dma_async_device_register(&s3cdma->slave);
1260 if (ret) {
1261 dev_warn(&pdev->dev,
1262 "%s failed to register slave as an async device - %d\n",
1263 __func__, ret);
1264 goto err_slave_reg;
1265 }
1266
1267 platform_set_drvdata(pdev, s3cdma);
1268 dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
1269 pdata->num_phy_channels);
1270
1271 return 0;
1272
1273err_slave_reg:
1274 dma_async_device_unregister(&s3cdma->memcpy);
1275err_memcpy_reg:
1276 s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1277err_slave:
1278 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1279err_memcpy:
1280 if (sdata->has_clocks)
1281 for (i = 0; i < pdata->num_phy_channels; i++) {
1282 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1283 if (phy->valid)
1284 clk_unprepare(phy->clk);
1285 }
1286
1287 return ret;
1288}
1289
1290static int s3c24xx_dma_remove(struct platform_device *pdev)
1291{
1292 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1293 struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
1294 struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
1295 int i;
1296
1297 dma_async_device_unregister(&s3cdma->slave);
1298 dma_async_device_unregister(&s3cdma->memcpy);
1299
1300 s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1301 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1302
1303 if (sdata->has_clocks)
1304 for (i = 0; i < pdata->num_phy_channels; i++) {
1305 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1306 if (phy->valid)
1307 clk_unprepare(phy->clk);
1308 }
1309
1310 return 0;
1311}
1312
1313static struct platform_driver s3c24xx_dma_driver = {
1314 .driver = {
1315 .name = "s3c24xx-dma",
1316 .owner = THIS_MODULE,
1317 },
1318 .id_table = s3c24xx_dma_driver_ids,
1319 .probe = s3c24xx_dma_probe,
1320 .remove = s3c24xx_dma_remove,
1321};
1322
1323module_platform_driver(s3c24xx_dma_driver);
1324
1325bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
1326{
1327 struct s3c24xx_dma_chan *s3cchan;
1328
1329 if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
1330 return false;
1331
1332 s3cchan = to_s3c24xx_dma_chan(chan);
1333
1334 return s3cchan->id == (int)param;
1335}
1336EXPORT_SYMBOL(s3c24xx_dma_filter);
1337
1338MODULE_DESCRIPTION("S3C24XX DMA Driver");
1339MODULE_AUTHOR("Heiko Stuebner");
1340MODULE_LICENSE("GPL v2");