diff options
author | Javier Martin <javier.martin@vista-silicon.com> | 2012-03-22 09:54:03 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-03-26 02:01:30 -0400 |
commit | 2efc3449d7b11f36f532180cb738364fd2c28e03 (patch) | |
tree | db7c65ad6146db4d08e850a3939c99bba722de4d /drivers/dma | |
parent | 232e3c2c7961fb3312a80df3747f1c29f0ed512e (diff) |
dmaengine: imx-dma: remove dma_mode member of internal structure.
dmaengine now provides 'enum dma_transfer_direction' to properly
specify DMA transfer direction. For this reason, DMA_MODE_* defines
are replaced by this new type and therefore dma_mode member becomes
redundant.
Signed-off-by: Javier Martin <javier.martin@vista-silicon.com>
Acked-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/imx-dma.c | 103 |
1 files changed, 45 insertions, 58 deletions
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 8603c75b0e1b..04a2c1446dc2 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -36,10 +36,6 @@ | |||
36 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 | 36 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 |
37 | #define IMX_DMA_CHANNELS 16 | 37 | #define IMX_DMA_CHANNELS 16 |
38 | 38 | ||
39 | #define DMA_MODE_READ 0 | ||
40 | #define DMA_MODE_WRITE 1 | ||
41 | #define DMA_MODE_MASK 1 | ||
42 | |||
43 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) | 39 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) |
44 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | 40 | #define IMX_DMA_MEMSIZE_32 (0 << 4) |
45 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | 41 | #define IMX_DMA_MEMSIZE_8 (1 << 4) |
@@ -133,7 +129,6 @@ enum imxdma_prep_type { | |||
133 | */ | 129 | */ |
134 | 130 | ||
135 | struct imxdma_channel_internal { | 131 | struct imxdma_channel_internal { |
136 | unsigned int dma_mode; | ||
137 | struct scatterlist *sg; | 132 | struct scatterlist *sg; |
138 | unsigned int resbytes; | 133 | unsigned int resbytes; |
139 | 134 | ||
@@ -154,7 +149,7 @@ struct imxdma_desc { | |||
154 | dma_addr_t src; | 149 | dma_addr_t src; |
155 | dma_addr_t dest; | 150 | dma_addr_t dest; |
156 | size_t len; | 151 | size_t len; |
157 | unsigned int dmamode; | 152 | enum dma_transfer_direction direction; |
158 | enum imxdma_prep_type type; | 153 | enum imxdma_prep_type type; |
159 | /* For memcpy and interleaved */ | 154 | /* For memcpy and interleaved */ |
160 | unsigned int config_port; | 155 | unsigned int config_port; |
@@ -239,8 +234,9 @@ static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) | |||
239 | /* | 234 | /* |
240 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | 235 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation |
241 | */ | 236 | */ |
242 | static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterlist *sg) | 237 | static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) |
243 | { | 238 | { |
239 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
244 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; | 240 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; |
245 | unsigned long now; | 241 | unsigned long now; |
246 | 242 | ||
@@ -248,7 +244,7 @@ static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterl | |||
248 | if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) | 244 | if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) |
249 | imxdma->resbytes -= now; | 245 | imxdma->resbytes -= now; |
250 | 246 | ||
251 | if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) | 247 | if (d->direction == DMA_DEV_TO_MEM) |
252 | imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); | 248 | imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); |
253 | else | 249 | else |
254 | imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); | 250 | imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); |
@@ -265,14 +261,12 @@ static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterl | |||
265 | } | 261 | } |
266 | 262 | ||
267 | static int | 263 | static int |
268 | imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, | 264 | imxdma_setup_mem2mem_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, |
269 | unsigned int dma_length, unsigned int dev_addr, | 265 | unsigned int dma_length, unsigned int dev_addr) |
270 | unsigned int dmamode) | ||
271 | { | 266 | { |
272 | int channel = imxdmac->channel; | 267 | int channel = imxdmac->channel; |
273 | 268 | ||
274 | imxdmac->internal.sg = NULL; | 269 | imxdmac->internal.sg = NULL; |
275 | imxdmac->internal.dma_mode = dmamode; | ||
276 | 270 | ||
277 | if (!dma_address) { | 271 | if (!dma_address) { |
278 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", | 272 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", |
@@ -286,38 +280,24 @@ imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, | |||
286 | return -EINVAL; | 280 | return -EINVAL; |
287 | } | 281 | } |
288 | 282 | ||
289 | if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { | 283 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " |
290 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " | 284 | "dev_addr=0x%08x for write\n", |
291 | "dev_addr=0x%08x for read\n", | 285 | channel, __func__, (unsigned int)dma_address, |
292 | channel, __func__, (unsigned int)dma_address, | 286 | dma_length, dev_addr); |
293 | dma_length, dev_addr); | ||
294 | 287 | ||
295 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | 288 | imx_dmav1_writel(dma_address, DMA_SAR(channel)); |
296 | imx_dmav1_writel(dma_address, DMA_DAR(channel)); | 289 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); |
297 | imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); | 290 | imx_dmav1_writel(imxdmac->internal.ccr_to_device, |
298 | } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { | 291 | DMA_CCR(channel)); |
299 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " | ||
300 | "dev_addr=0x%08x for write\n", | ||
301 | channel, __func__, (unsigned int)dma_address, | ||
302 | dma_length, dev_addr); | ||
303 | |||
304 | imx_dmav1_writel(dma_address, DMA_SAR(channel)); | ||
305 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | ||
306 | imx_dmav1_writel(imxdmac->internal.ccr_to_device, | ||
307 | DMA_CCR(channel)); | ||
308 | } else { | ||
309 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", | ||
310 | channel); | ||
311 | return -EINVAL; | ||
312 | } | ||
313 | 292 | ||
314 | imx_dmav1_writel(dma_length, DMA_CNTR(channel)); | 293 | imx_dmav1_writel(dma_length, DMA_CNTR(channel)); |
315 | 294 | ||
316 | return 0; | 295 | return 0; |
317 | } | 296 | } |
318 | 297 | ||
319 | static void imxdma_enable_hw(struct imxdma_channel *imxdmac) | 298 | static void imxdma_enable_hw(struct imxdma_desc *d) |
320 | { | 299 | { |
300 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
321 | int channel = imxdmac->channel; | 301 | int channel = imxdmac->channel; |
322 | unsigned long flags; | 302 | unsigned long flags; |
323 | 303 | ||
@@ -338,7 +318,7 @@ static void imxdma_enable_hw(struct imxdma_channel *imxdmac) | |||
338 | imxdmac->internal.sg = sg_next(imxdmac->internal.sg); | 318 | imxdmac->internal.sg = sg_next(imxdmac->internal.sg); |
339 | if (imxdmac->internal.sg) { | 319 | if (imxdmac->internal.sg) { |
340 | u32 tmp; | 320 | u32 tmp; |
341 | imxdma_sg_next(imxdmac, imxdmac->internal.sg); | 321 | imxdma_sg_next(d, imxdmac->internal.sg); |
342 | tmp = imx_dmav1_readl(DMA_CCR(channel)); | 322 | tmp = imx_dmav1_readl(DMA_CCR(channel)); |
343 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, | 323 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, |
344 | DMA_CCR(channel)); | 324 | DMA_CCR(channel)); |
@@ -395,18 +375,18 @@ imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_por | |||
395 | } | 375 | } |
396 | 376 | ||
397 | static int | 377 | static int |
398 | imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, | 378 | imxdma_setup_sg_hw(struct imxdma_desc *d, |
399 | struct scatterlist *sg, unsigned int sgcount, | 379 | struct scatterlist *sg, unsigned int sgcount, |
400 | unsigned int dma_length, unsigned int dev_addr, | 380 | unsigned int dma_length, unsigned int dev_addr, |
401 | unsigned int dmamode) | 381 | enum dma_transfer_direction direction) |
402 | { | 382 | { |
383 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
403 | int channel = imxdmac->channel; | 384 | int channel = imxdmac->channel; |
404 | 385 | ||
405 | if (imxdmac->internal.in_use) | 386 | if (imxdmac->internal.in_use) |
406 | return -EBUSY; | 387 | return -EBUSY; |
407 | 388 | ||
408 | imxdmac->internal.sg = sg; | 389 | imxdmac->internal.sg = sg; |
409 | imxdmac->internal.dma_mode = dmamode; | ||
410 | imxdmac->internal.resbytes = dma_length; | 390 | imxdmac->internal.resbytes = dma_length; |
411 | 391 | ||
412 | if (!sg || !sgcount) { | 392 | if (!sg || !sgcount) { |
@@ -421,14 +401,14 @@ imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, | |||
421 | return -EINVAL; | 401 | return -EINVAL; |
422 | } | 402 | } |
423 | 403 | ||
424 | if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { | 404 | if (direction == DMA_DEV_TO_MEM) { |
425 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | 405 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " |
426 | "dev_addr=0x%08x for read\n", | 406 | "dev_addr=0x%08x for read\n", |
427 | channel, __func__, sg, sgcount, dma_length, dev_addr); | 407 | channel, __func__, sg, sgcount, dma_length, dev_addr); |
428 | 408 | ||
429 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | 409 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); |
430 | imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); | 410 | imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); |
431 | } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { | 411 | } else if (direction == DMA_MEM_TO_DEV) { |
432 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | 412 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " |
433 | "dev_addr=0x%08x for write\n", | 413 | "dev_addr=0x%08x for write\n", |
434 | channel, __func__, sg, sgcount, dma_length, dev_addr); | 414 | channel, __func__, sg, sgcount, dma_length, dev_addr); |
@@ -441,7 +421,7 @@ imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, | |||
441 | return -EINVAL; | 421 | return -EINVAL; |
442 | } | 422 | } |
443 | 423 | ||
444 | imxdma_sg_next(imxdmac, sg); | 424 | imxdma_sg_next(d, sg); |
445 | 425 | ||
446 | return 0; | 426 | return 0; |
447 | } | 427 | } |
@@ -519,13 +499,26 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | |||
519 | { | 499 | { |
520 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; | 500 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; |
521 | int chno = imxdmac->channel; | 501 | int chno = imxdmac->channel; |
502 | struct imxdma_desc *desc; | ||
522 | 503 | ||
523 | if (imxdma->sg) { | 504 | if (imxdma->sg) { |
524 | u32 tmp; | 505 | u32 tmp; |
525 | imxdma->sg = sg_next(imxdma->sg); | 506 | imxdma->sg = sg_next(imxdma->sg); |
526 | 507 | ||
527 | if (imxdma->sg) { | 508 | if (imxdma->sg) { |
528 | imxdma_sg_next(imxdmac, imxdma->sg); | 509 | |
510 | spin_lock(&imxdmac->lock); | ||
511 | if (list_empty(&imxdmac->ld_active)) { | ||
512 | spin_unlock(&imxdmac->lock); | ||
513 | goto out; | ||
514 | } | ||
515 | |||
516 | desc = list_first_entry(&imxdmac->ld_active, | ||
517 | struct imxdma_desc, | ||
518 | node); | ||
519 | spin_unlock(&imxdmac->lock); | ||
520 | |||
521 | imxdma_sg_next(desc, imxdma->sg); | ||
529 | 522 | ||
530 | tmp = imx_dmav1_readl(DMA_CCR(chno)); | 523 | tmp = imx_dmav1_readl(DMA_CCR(chno)); |
531 | 524 | ||
@@ -558,6 +551,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | |||
558 | } | 551 | } |
559 | } | 552 | } |
560 | 553 | ||
554 | out: | ||
561 | imx_dmav1_writel(0, DMA_CCR(chno)); | 555 | imx_dmav1_writel(0, DMA_CCR(chno)); |
562 | imxdma->in_use = 0; | 556 | imxdma->in_use = 0; |
563 | /* Tasklet irq */ | 557 | /* Tasklet irq */ |
@@ -601,8 +595,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
601 | d->config_port, d->config_mem, 0, 0); | 595 | d->config_port, d->config_mem, 0, 0); |
602 | if (ret < 0) | 596 | if (ret < 0) |
603 | return ret; | 597 | return ret; |
604 | ret = imxdma_setup_single_hw(imxdmac, d->src, | 598 | ret = imxdma_setup_mem2mem_hw(imxdmac, d->src, d->len, d->dest); |
605 | d->len, d->dest, d->dmamode); | ||
606 | if (ret < 0) | 599 | if (ret < 0) |
607 | return ret; | 600 | return ret; |
608 | break; | 601 | break; |
@@ -610,19 +603,15 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
610 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | 603 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
611 | case IMXDMA_DESC_CYCLIC: | 604 | case IMXDMA_DESC_CYCLIC: |
612 | case IMXDMA_DESC_SLAVE_SG: | 605 | case IMXDMA_DESC_SLAVE_SG: |
613 | if (d->dmamode == DMA_MODE_READ) | 606 | ret = imxdma_setup_sg_hw(d, d->sg, d->sgcount, d->len, |
614 | ret = imxdma_setup_sg_hw(imxdmac, d->sg, | 607 | imxdmac->per_address, d->direction); |
615 | d->sgcount, d->len, d->src, d->dmamode); | ||
616 | else | ||
617 | ret = imxdma_setup_sg_hw(imxdmac, d->sg, | ||
618 | d->sgcount, d->len, d->dest, d->dmamode); | ||
619 | if (ret < 0) | 608 | if (ret < 0) |
620 | return ret; | 609 | return ret; |
621 | break; | 610 | break; |
622 | default: | 611 | default: |
623 | return -EINVAL; | 612 | return -EINVAL; |
624 | } | 613 | } |
625 | imxdma_enable_hw(imxdmac); | 614 | imxdma_enable_hw(d); |
626 | return 0; | 615 | return 0; |
627 | } | 616 | } |
628 | 617 | ||
@@ -839,11 +828,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
839 | desc->sg = sgl; | 828 | desc->sg = sgl; |
840 | desc->sgcount = sg_len; | 829 | desc->sgcount = sg_len; |
841 | desc->len = dma_length; | 830 | desc->len = dma_length; |
831 | desc->direction = direction; | ||
842 | if (direction == DMA_DEV_TO_MEM) { | 832 | if (direction == DMA_DEV_TO_MEM) { |
843 | desc->dmamode = DMA_MODE_READ; | ||
844 | desc->src = imxdmac->per_address; | 833 | desc->src = imxdmac->per_address; |
845 | } else { | 834 | } else { |
846 | desc->dmamode = DMA_MODE_WRITE; | ||
847 | desc->dest = imxdmac->per_address; | 835 | desc->dest = imxdmac->per_address; |
848 | } | 836 | } |
849 | desc->desc.callback = NULL; | 837 | desc->desc.callback = NULL; |
@@ -900,11 +888,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
900 | desc->sg = imxdmac->sg_list; | 888 | desc->sg = imxdmac->sg_list; |
901 | desc->sgcount = periods; | 889 | desc->sgcount = periods; |
902 | desc->len = IMX_DMA_LENGTH_LOOP; | 890 | desc->len = IMX_DMA_LENGTH_LOOP; |
891 | desc->direction = direction; | ||
903 | if (direction == DMA_DEV_TO_MEM) { | 892 | if (direction == DMA_DEV_TO_MEM) { |
904 | desc->dmamode = DMA_MODE_READ; | ||
905 | desc->src = imxdmac->per_address; | 893 | desc->src = imxdmac->per_address; |
906 | } else { | 894 | } else { |
907 | desc->dmamode = DMA_MODE_WRITE; | ||
908 | desc->dest = imxdmac->per_address; | 895 | desc->dest = imxdmac->per_address; |
909 | } | 896 | } |
910 | desc->desc.callback = NULL; | 897 | desc->desc.callback = NULL; |
@@ -934,7 +921,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | |||
934 | desc->src = src; | 921 | desc->src = src; |
935 | desc->dest = dest; | 922 | desc->dest = dest; |
936 | desc->len = len; | 923 | desc->len = len; |
937 | desc->dmamode = DMA_MODE_WRITE; | 924 | desc->direction = DMA_MEM_TO_MEM; |
938 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | 925 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; |
939 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | 926 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; |
940 | desc->desc.callback = NULL; | 927 | desc->desc.callback = NULL; |