diff options
author | Javier Martin <javier.martin@vista-silicon.com> | 2012-03-02 03:28:47 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-03-06 06:49:21 -0500 |
commit | 9e15db7ce949e9f2d8bb6ce32a74212a4f662370 (patch) | |
tree | be3b1acb24a55f9b3b88381ef1078fae0b00b3ba /drivers/dma/imx-dma.c | |
parent | 6c05f09155f40368c51ce00b8291401858e49bcb (diff) |
dmaengine: Add support for multiple descriptors for imx-dma.
dmaengine specifies the possibility that several descriptors
can be queued for transfer. It also indicates that tasklets
must be used for DMA callbacks.
Acked-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Javier Martin <javier.martin@vista-silicon.com>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/imx-dma.c')
-rw-r--r-- | drivers/dma/imx-dma.c | 332 |
1 files changed, 258 insertions, 74 deletions
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 9a1797873169..85b3d3c21d91 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * found on i.MX1/21/27 | 5 | * found on i.MX1/21/27 |
6 | * | 6 | * |
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | 7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> |
8 | * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> | ||
8 | * | 9 | * |
9 | * The code contained herein is licensed under the GNU General Public | 10 | * The code contained herein is licensed under the GNU General Public |
10 | * License. You may obtain a copy of the GNU General Public License | 11 | * License. You may obtain a copy of the GNU General Public License |
@@ -13,6 +14,7 @@ | |||
13 | * http://www.opensource.org/licenses/gpl-license.html | 14 | * http://www.opensource.org/licenses/gpl-license.html |
14 | * http://www.gnu.org/copyleft/gpl.html | 15 | * http://www.gnu.org/copyleft/gpl.html |
15 | */ | 16 | */ |
17 | |||
16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
17 | #include <linux/module.h> | 19 | #include <linux/module.h> |
18 | #include <linux/types.h> | 20 | #include <linux/types.h> |
@@ -29,19 +31,52 @@ | |||
29 | #include <mach/dma-v1.h> | 31 | #include <mach/dma-v1.h> |
30 | #include <mach/hardware.h> | 32 | #include <mach/hardware.h> |
31 | 33 | ||
34 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 | ||
35 | |||
36 | enum imxdma_prep_type { | ||
37 | IMXDMA_DESC_MEMCPY, | ||
38 | IMXDMA_DESC_INTERLEAVED, | ||
39 | IMXDMA_DESC_SLAVE_SG, | ||
40 | IMXDMA_DESC_CYCLIC, | ||
41 | }; | ||
42 | |||
43 | struct imxdma_desc { | ||
44 | struct list_head node; | ||
45 | struct dma_async_tx_descriptor desc; | ||
46 | enum dma_status status; | ||
47 | dma_addr_t src; | ||
48 | dma_addr_t dest; | ||
49 | size_t len; | ||
50 | unsigned int dmamode; | ||
51 | enum imxdma_prep_type type; | ||
52 | /* For memcpy and interleaved */ | ||
53 | unsigned int config_port; | ||
54 | unsigned int config_mem; | ||
55 | /* For interleaved transfers */ | ||
56 | unsigned int x; | ||
57 | unsigned int y; | ||
58 | unsigned int w; | ||
59 | /* For slave sg and cyclic */ | ||
60 | struct scatterlist *sg; | ||
61 | unsigned int sgcount; | ||
62 | }; | ||
63 | |||
32 | struct imxdma_channel { | 64 | struct imxdma_channel { |
33 | struct imxdma_engine *imxdma; | 65 | struct imxdma_engine *imxdma; |
34 | unsigned int channel; | 66 | unsigned int channel; |
35 | unsigned int imxdma_channel; | 67 | unsigned int imxdma_channel; |
36 | 68 | ||
69 | struct tasklet_struct dma_tasklet; | ||
70 | struct list_head ld_free; | ||
71 | struct list_head ld_queue; | ||
72 | struct list_head ld_active; | ||
73 | int descs_allocated; | ||
37 | enum dma_slave_buswidth word_size; | 74 | enum dma_slave_buswidth word_size; |
38 | dma_addr_t per_address; | 75 | dma_addr_t per_address; |
39 | u32 watermark_level; | 76 | u32 watermark_level; |
40 | struct dma_chan chan; | 77 | struct dma_chan chan; |
41 | spinlock_t lock; | 78 | spinlock_t lock; |
42 | struct dma_async_tx_descriptor desc; | ||
43 | dma_cookie_t last_completed; | 79 | dma_cookie_t last_completed; |
44 | enum dma_status status; | ||
45 | int dma_request; | 80 | int dma_request; |
46 | struct scatterlist *sg_list; | 81 | struct scatterlist *sg_list; |
47 | }; | 82 | }; |
@@ -60,27 +95,31 @@ static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | |||
60 | return container_of(chan, struct imxdma_channel, chan); | 95 | return container_of(chan, struct imxdma_channel, chan); |
61 | } | 96 | } |
62 | 97 | ||
63 | static void imxdma_handle(struct imxdma_channel *imxdmac) | 98 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
64 | { | 99 | { |
65 | if (imxdmac->desc.callback) | 100 | struct imxdma_desc *desc; |
66 | imxdmac->desc.callback(imxdmac->desc.callback_param); | 101 | |
67 | imxdmac->last_completed = imxdmac->desc.cookie; | 102 | if (!list_empty(&imxdmac->ld_active)) { |
103 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, | ||
104 | node); | ||
105 | if (desc->type == IMXDMA_DESC_CYCLIC) | ||
106 | return true; | ||
107 | } | ||
108 | return false; | ||
68 | } | 109 | } |
69 | 110 | ||
70 | static void imxdma_irq_handler(int channel, void *data) | 111 | static void imxdma_irq_handler(int channel, void *data) |
71 | { | 112 | { |
72 | struct imxdma_channel *imxdmac = data; | 113 | struct imxdma_channel *imxdmac = data; |
73 | 114 | ||
74 | imxdmac->status = DMA_SUCCESS; | 115 | tasklet_schedule(&imxdmac->dma_tasklet); |
75 | imxdma_handle(imxdmac); | ||
76 | } | 116 | } |
77 | 117 | ||
78 | static void imxdma_err_handler(int channel, void *data, int error) | 118 | static void imxdma_err_handler(int channel, void *data, int error) |
79 | { | 119 | { |
80 | struct imxdma_channel *imxdmac = data; | 120 | struct imxdma_channel *imxdmac = data; |
81 | 121 | ||
82 | imxdmac->status = DMA_ERROR; | 122 | tasklet_schedule(&imxdmac->dma_tasklet); |
83 | imxdma_handle(imxdmac); | ||
84 | } | 123 | } |
85 | 124 | ||
86 | static void imxdma_progression(int channel, void *data, | 125 | static void imxdma_progression(int channel, void *data, |
@@ -88,8 +127,88 @@ static void imxdma_progression(int channel, void *data, | |||
88 | { | 127 | { |
89 | struct imxdma_channel *imxdmac = data; | 128 | struct imxdma_channel *imxdmac = data; |
90 | 129 | ||
91 | imxdmac->status = DMA_SUCCESS; | 130 | tasklet_schedule(&imxdmac->dma_tasklet); |
92 | imxdma_handle(imxdmac); | 131 | } |
132 | |||
133 | static int imxdma_xfer_desc(struct imxdma_desc *d) | ||
134 | { | ||
135 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
136 | int ret; | ||
137 | |||
138 | /* Configure and enable */ | ||
139 | switch (d->type) { | ||
140 | case IMXDMA_DESC_MEMCPY: | ||
141 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | ||
142 | d->config_port, d->config_mem, 0, 0); | ||
143 | if (ret < 0) | ||
144 | return ret; | ||
145 | ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src, | ||
146 | d->len, d->dest, d->dmamode); | ||
147 | if (ret < 0) | ||
148 | return ret; | ||
149 | break; | ||
150 | case IMXDMA_DESC_CYCLIC: | ||
151 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, | ||
152 | imxdma_progression); | ||
153 | if (ret < 0) | ||
154 | return ret; | ||
155 | /* | ||
156 | * We fall through here since cyclic transfer is the same as | ||
157 | * slave_sg adding a progression handler and a specific sg | ||
158 | * configuration which is done in 'imxdma_prep_dma_cyclic'. | ||
159 | */ | ||
160 | case IMXDMA_DESC_SLAVE_SG: | ||
161 | if (d->dmamode == DMA_MODE_READ) | ||
162 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, | ||
163 | d->sgcount, d->len, d->src, d->dmamode); | ||
164 | else | ||
165 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, | ||
166 | d->sgcount, d->len, d->dest, d->dmamode); | ||
167 | if (ret < 0) | ||
168 | return ret; | ||
169 | break; | ||
170 | default: | ||
171 | return -EINVAL; | ||
172 | } | ||
173 | imx_dma_enable(imxdmac->imxdma_channel); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static void imxdma_tasklet(unsigned long data) | ||
178 | { | ||
179 | struct imxdma_channel *imxdmac = (void *)data; | ||
180 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
181 | struct imxdma_desc *desc; | ||
182 | |||
183 | spin_lock(&imxdmac->lock); | ||
184 | |||
185 | if (list_empty(&imxdmac->ld_active)) { | ||
186 | /* Someone might have called terminate all */ | ||
187 | goto out; | ||
188 | } | ||
189 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | ||
190 | |||
191 | if (desc->desc.callback) | ||
192 | desc->desc.callback(desc->desc.callback_param); | ||
193 | |||
194 | imxdmac->last_completed = desc->desc.cookie; | ||
195 | |||
196 | /* If we are dealing with a cyclic descriptor keep it on ld_active */ | ||
197 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | ||
198 | goto out; | ||
199 | |||
200 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | ||
201 | |||
202 | if (!list_empty(&imxdmac->ld_queue)) { | ||
203 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | ||
204 | node); | ||
205 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | ||
206 | if (imxdma_xfer_desc(desc) < 0) | ||
207 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | ||
208 | __func__, imxdmac->channel); | ||
209 | } | ||
210 | out: | ||
211 | spin_unlock(&imxdmac->lock); | ||
93 | } | 212 | } |
94 | 213 | ||
95 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 214 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -98,12 +217,17 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
98 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 217 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
99 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | 218 | struct dma_slave_config *dmaengine_cfg = (void *)arg; |
100 | int ret; | 219 | int ret; |
220 | unsigned long flags; | ||
101 | unsigned int mode = 0; | 221 | unsigned int mode = 0; |
102 | 222 | ||
103 | switch (cmd) { | 223 | switch (cmd) { |
104 | case DMA_TERMINATE_ALL: | 224 | case DMA_TERMINATE_ALL: |
105 | imxdmac->status = DMA_ERROR; | ||
106 | imx_dma_disable(imxdmac->imxdma_channel); | 225 | imx_dma_disable(imxdmac->imxdma_channel); |
226 | |||
227 | spin_lock_irqsave(&imxdmac->lock, flags); | ||
228 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
229 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
230 | spin_unlock_irqrestore(&imxdmac->lock, flags); | ||
107 | return 0; | 231 | return 0; |
108 | case DMA_SLAVE_CONFIG: | 232 | case DMA_SLAVE_CONFIG: |
109 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | 233 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
@@ -154,11 +278,14 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |||
154 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 278 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
155 | dma_cookie_t last_used; | 279 | dma_cookie_t last_used; |
156 | enum dma_status ret; | 280 | enum dma_status ret; |
281 | unsigned long flags; | ||
157 | 282 | ||
283 | spin_lock_irqsave(&imxdmac->lock, flags); | ||
158 | last_used = chan->cookie; | 284 | last_used = chan->cookie; |
159 | 285 | ||
160 | ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); | 286 | ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); |
161 | dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); | 287 | dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); |
288 | spin_unlock_irqrestore(&imxdmac->lock, flags); | ||
162 | 289 | ||
163 | return ret; | 290 | return ret; |
164 | } | 291 | } |
@@ -171,7 +298,6 @@ static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) | |||
171 | cookie = 1; | 298 | cookie = 1; |
172 | 299 | ||
173 | imxdma->chan.cookie = cookie; | 300 | imxdma->chan.cookie = cookie; |
174 | imxdma->desc.cookie = cookie; | ||
175 | 301 | ||
176 | return cookie; | 302 | return cookie; |
177 | } | 303 | } |
@@ -180,12 +306,15 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
180 | { | 306 | { |
181 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | 307 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); |
182 | dma_cookie_t cookie; | 308 | dma_cookie_t cookie; |
309 | unsigned long flags; | ||
183 | 310 | ||
184 | spin_lock_irq(&imxdmac->lock); | 311 | spin_lock_irqsave(&imxdmac->lock, flags); |
185 | 312 | ||
313 | list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); | ||
186 | cookie = imxdma_assign_cookie(imxdmac); | 314 | cookie = imxdma_assign_cookie(imxdmac); |
315 | tx->cookie = cookie; | ||
187 | 316 | ||
188 | spin_unlock_irq(&imxdmac->lock); | 317 | spin_unlock_irqrestore(&imxdmac->lock, flags); |
189 | 318 | ||
190 | return cookie; | 319 | return cookie; |
191 | } | 320 | } |
@@ -198,21 +327,48 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |||
198 | if (data != NULL) | 327 | if (data != NULL) |
199 | imxdmac->dma_request = data->dma_request; | 328 | imxdmac->dma_request = data->dma_request; |
200 | 329 | ||
201 | dma_async_tx_descriptor_init(&imxdmac->desc, chan); | 330 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { |
202 | imxdmac->desc.tx_submit = imxdma_tx_submit; | 331 | struct imxdma_desc *desc; |
203 | /* txd.flags will be overwritten in prep funcs */ | ||
204 | imxdmac->desc.flags = DMA_CTRL_ACK; | ||
205 | 332 | ||
206 | imxdmac->status = DMA_SUCCESS; | 333 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
334 | if (!desc) | ||
335 | break; | ||
336 | __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); | ||
337 | dma_async_tx_descriptor_init(&desc->desc, chan); | ||
338 | desc->desc.tx_submit = imxdma_tx_submit; | ||
339 | /* txd.flags will be overwritten in prep funcs */ | ||
340 | desc->desc.flags = DMA_CTRL_ACK; | ||
341 | desc->status = DMA_SUCCESS; | ||
342 | |||
343 | list_add_tail(&desc->node, &imxdmac->ld_free); | ||
344 | imxdmac->descs_allocated++; | ||
345 | } | ||
207 | 346 | ||
208 | return 0; | 347 | if (!imxdmac->descs_allocated) |
348 | return -ENOMEM; | ||
349 | |||
350 | return imxdmac->descs_allocated; | ||
209 | } | 351 | } |
210 | 352 | ||
211 | static void imxdma_free_chan_resources(struct dma_chan *chan) | 353 | static void imxdma_free_chan_resources(struct dma_chan *chan) |
212 | { | 354 | { |
213 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 355 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
356 | struct imxdma_desc *desc, *_desc; | ||
357 | unsigned long flags; | ||
358 | |||
359 | spin_lock_irqsave(&imxdmac->lock, flags); | ||
214 | 360 | ||
215 | imx_dma_disable(imxdmac->imxdma_channel); | 361 | imx_dma_disable(imxdmac->imxdma_channel); |
362 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
363 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
364 | |||
365 | spin_unlock_irqrestore(&imxdmac->lock, flags); | ||
366 | |||
367 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { | ||
368 | kfree(desc); | ||
369 | imxdmac->descs_allocated--; | ||
370 | } | ||
371 | INIT_LIST_HEAD(&imxdmac->ld_free); | ||
216 | 372 | ||
217 | if (imxdmac->sg_list) { | 373 | if (imxdmac->sg_list) { |
218 | kfree(imxdmac->sg_list); | 374 | kfree(imxdmac->sg_list); |
@@ -227,23 +383,19 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
227 | { | 383 | { |
228 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 384 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
229 | struct scatterlist *sg; | 385 | struct scatterlist *sg; |
230 | int i, ret, dma_length = 0; | 386 | int i, dma_length = 0; |
231 | unsigned int dmamode; | 387 | struct imxdma_desc *desc; |
232 | 388 | ||
233 | if (imxdmac->status == DMA_IN_PROGRESS) | 389 | if (list_empty(&imxdmac->ld_free) || |
390 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
234 | return NULL; | 391 | return NULL; |
235 | 392 | ||
236 | imxdmac->status = DMA_IN_PROGRESS; | 393 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
237 | 394 | ||
238 | for_each_sg(sgl, sg, sg_len, i) { | 395 | for_each_sg(sgl, sg, sg_len, i) { |
239 | dma_length += sg->length; | 396 | dma_length += sg->length; |
240 | } | 397 | } |
241 | 398 | ||
242 | if (direction == DMA_DEV_TO_MEM) | ||
243 | dmamode = DMA_MODE_READ; | ||
244 | else | ||
245 | dmamode = DMA_MODE_WRITE; | ||
246 | |||
247 | switch (imxdmac->word_size) { | 399 | switch (imxdmac->word_size) { |
248 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 400 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
249 | if (sgl->length & 3 || sgl->dma_address & 3) | 401 | if (sgl->length & 3 || sgl->dma_address & 3) |
@@ -259,12 +411,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
259 | return NULL; | 411 | return NULL; |
260 | } | 412 | } |
261 | 413 | ||
262 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | 414 | desc->type = IMXDMA_DESC_SLAVE_SG; |
263 | dma_length, imxdmac->per_address, dmamode); | 415 | desc->sg = sgl; |
264 | if (ret) | 416 | desc->sgcount = sg_len; |
265 | return NULL; | 417 | desc->len = dma_length; |
418 | if (direction == DMA_DEV_TO_MEM) { | ||
419 | desc->dmamode = DMA_MODE_READ; | ||
420 | desc->src = imxdmac->per_address; | ||
421 | } else { | ||
422 | desc->dmamode = DMA_MODE_WRITE; | ||
423 | desc->dest = imxdmac->per_address; | ||
424 | } | ||
425 | desc->desc.callback = NULL; | ||
426 | desc->desc.callback_param = NULL; | ||
266 | 427 | ||
267 | return &imxdmac->desc; | 428 | return &desc->desc; |
268 | } | 429 | } |
269 | 430 | ||
270 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | 431 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
@@ -273,23 +434,18 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
273 | { | 434 | { |
274 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 435 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
275 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 436 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
276 | int i, ret; | 437 | struct imxdma_desc *desc; |
438 | int i; | ||
277 | unsigned int periods = buf_len / period_len; | 439 | unsigned int periods = buf_len / period_len; |
278 | unsigned int dmamode; | ||
279 | 440 | ||
280 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | 441 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", |
281 | __func__, imxdmac->channel, buf_len, period_len); | 442 | __func__, imxdmac->channel, buf_len, period_len); |
282 | 443 | ||
283 | if (imxdmac->status == DMA_IN_PROGRESS) | 444 | if (list_empty(&imxdmac->ld_free) || |
445 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
284 | return NULL; | 446 | return NULL; |
285 | imxdmac->status = DMA_IN_PROGRESS; | ||
286 | 447 | ||
287 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, | 448 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
288 | imxdma_progression); | ||
289 | if (ret) { | ||
290 | dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); | ||
291 | return NULL; | ||
292 | } | ||
293 | 449 | ||
294 | if (imxdmac->sg_list) | 450 | if (imxdmac->sg_list) |
295 | kfree(imxdmac->sg_list); | 451 | kfree(imxdmac->sg_list); |
@@ -315,17 +471,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
315 | imxdmac->sg_list[periods].page_link = | 471 | imxdmac->sg_list[periods].page_link = |
316 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | 472 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; |
317 | 473 | ||
318 | if (direction == DMA_DEV_TO_MEM) | 474 | desc->type = IMXDMA_DESC_CYCLIC; |
319 | dmamode = DMA_MODE_READ; | 475 | desc->sg = imxdmac->sg_list; |
320 | else | 476 | desc->sgcount = periods; |
321 | dmamode = DMA_MODE_WRITE; | 477 | desc->len = IMX_DMA_LENGTH_LOOP; |
322 | 478 | if (direction == DMA_DEV_TO_MEM) { | |
323 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, | 479 | desc->dmamode = DMA_MODE_READ; |
324 | IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); | 480 | desc->src = imxdmac->per_address; |
325 | if (ret) | 481 | } else { |
326 | return NULL; | 482 | desc->dmamode = DMA_MODE_WRITE; |
483 | desc->dest = imxdmac->per_address; | ||
484 | } | ||
485 | desc->desc.callback = NULL; | ||
486 | desc->desc.callback_param = NULL; | ||
327 | 487 | ||
328 | return &imxdmac->desc; | 488 | return &desc->desc; |
329 | } | 489 | } |
330 | 490 | ||
331 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | 491 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( |
@@ -334,36 +494,53 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | |||
334 | { | 494 | { |
335 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 495 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
336 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 496 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
337 | int ret; | 497 | struct imxdma_desc *desc; |
338 | 498 | ||
339 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | 499 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", |
340 | __func__, imxdmac->channel, src, dest, len); | 500 | __func__, imxdmac->channel, src, dest, len); |
341 | 501 | ||
342 | if (imxdmac->status == DMA_IN_PROGRESS) | 502 | if (list_empty(&imxdmac->ld_free) || |
503 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
343 | return NULL; | 504 | return NULL; |
344 | imxdmac->status = DMA_IN_PROGRESS; | ||
345 | 505 | ||
346 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | 506 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
347 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | ||
348 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | ||
349 | 0, 0); | ||
350 | if (ret) | ||
351 | return NULL; | ||
352 | 507 | ||
353 | ret = imx_dma_setup_single(imxdmac->imxdma_channel, src, len, | 508 | desc->type = IMXDMA_DESC_MEMCPY; |
354 | dest, DMA_MODE_WRITE); | 509 | desc->src = src; |
355 | if (ret) | 510 | desc->dest = dest; |
356 | return NULL; | 511 | desc->len = len; |
512 | desc->dmamode = DMA_MODE_WRITE; | ||
513 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | ||
514 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | ||
515 | desc->desc.callback = NULL; | ||
516 | desc->desc.callback_param = NULL; | ||
357 | 517 | ||
358 | return &imxdmac->desc; | 518 | return &desc->desc; |
359 | } | 519 | } |
360 | 520 | ||
361 | static void imxdma_issue_pending(struct dma_chan *chan) | 521 | static void imxdma_issue_pending(struct dma_chan *chan) |
362 | { | 522 | { |
363 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 523 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
364 | 524 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
365 | if (imxdmac->status == DMA_IN_PROGRESS) | 525 | struct imxdma_desc *desc; |
366 | imx_dma_enable(imxdmac->imxdma_channel); | 526 | unsigned long flags; |
527 | |||
528 | spin_lock_irqsave(&imxdmac->lock, flags); | ||
529 | if (list_empty(&imxdmac->ld_active) && | ||
530 | !list_empty(&imxdmac->ld_queue)) { | ||
531 | desc = list_first_entry(&imxdmac->ld_queue, | ||
532 | struct imxdma_desc, node); | ||
533 | |||
534 | if (imxdma_xfer_desc(desc) < 0) { | ||
535 | dev_warn(imxdma->dev, | ||
536 | "%s: channel: %d couldn't issue DMA xfer\n", | ||
537 | __func__, imxdmac->channel); | ||
538 | } else { | ||
539 | list_move_tail(imxdmac->ld_queue.next, | ||
540 | &imxdmac->ld_active); | ||
541 | } | ||
542 | } | ||
543 | spin_unlock_irqrestore(&imxdmac->lock, flags); | ||
367 | } | 544 | } |
368 | 545 | ||
369 | static int __init imxdma_probe(struct platform_device *pdev) | 546 | static int __init imxdma_probe(struct platform_device *pdev) |
@@ -398,11 +575,18 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
398 | imxdmac->imxdma = imxdma; | 575 | imxdmac->imxdma = imxdma; |
399 | spin_lock_init(&imxdmac->lock); | 576 | spin_lock_init(&imxdmac->lock); |
400 | 577 | ||
578 | INIT_LIST_HEAD(&imxdmac->ld_queue); | ||
579 | INIT_LIST_HEAD(&imxdmac->ld_free); | ||
580 | INIT_LIST_HEAD(&imxdmac->ld_active); | ||
581 | |||
582 | tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, | ||
583 | (unsigned long)imxdmac); | ||
401 | imxdmac->chan.device = &imxdma->dma_device; | 584 | imxdmac->chan.device = &imxdma->dma_device; |
402 | imxdmac->channel = i; | 585 | imxdmac->channel = i; |
403 | 586 | ||
404 | /* Add the channel to the DMAC list */ | 587 | /* Add the channel to the DMAC list */ |
405 | list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); | 588 | list_add_tail(&imxdmac->chan.device_node, |
589 | &imxdma->dma_device.channels); | ||
406 | } | 590 | } |
407 | 591 | ||
408 | imxdma->dev = &pdev->dev; | 592 | imxdma->dev = &pdev->dev; |