diff options
author | Javier Martin <javier.martin@vista-silicon.com> | 2012-03-22 09:54:06 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-03-26 02:01:31 -0400 |
commit | 359291a1a095a8a402405cd9c4bab46684e7bcfe (patch) | |
tree | f938d19d4228f68575b56c762deb44f328c0fe91 /drivers/dma/imx-dma.c | |
parent | bdc0c7534c80c479b2336aed3e4016f4743f4853 (diff) |
dmaengine: imx-dma: remove 'imxdma_setup_sg_hw' function.
Removing this function allows moving 'ccr_to_device' and
'ccr_from_device' from internal struct to channel struct.
This repesents a step forward towards removing auxiliary
'internal' structure.
Signed-off-by: Javier Martin <javier.martin@vista-silicon.com>
Acked-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/imx-dma.c')
-rw-r--r-- | drivers/dma/imx-dma.c | 98 |
1 files changed, 35 insertions, 63 deletions
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 25b4108f6224..484f35365902 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -134,9 +134,6 @@ struct imxdma_channel_internal { | |||
134 | 134 | ||
135 | int in_use; | 135 | int in_use; |
136 | 136 | ||
137 | u32 ccr_from_device; | ||
138 | u32 ccr_to_device; | ||
139 | |||
140 | struct timer_list watchdog; | 137 | struct timer_list watchdog; |
141 | 138 | ||
142 | int hw_chaining; | 139 | int hw_chaining; |
@@ -182,6 +179,8 @@ struct imxdma_channel { | |||
182 | enum dma_status status; | 179 | enum dma_status status; |
183 | int dma_request; | 180 | int dma_request; |
184 | struct scatterlist *sg_list; | 181 | struct scatterlist *sg_list; |
182 | u32 ccr_from_device; | ||
183 | u32 ccr_to_device; | ||
185 | }; | 184 | }; |
186 | 185 | ||
187 | struct imxdma_engine { | 186 | struct imxdma_engine { |
@@ -313,58 +312,6 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | |||
313 | local_irq_restore(flags); | 312 | local_irq_restore(flags); |
314 | } | 313 | } |
315 | 314 | ||
316 | static int | ||
317 | imxdma_setup_sg_hw(struct imxdma_desc *d, | ||
318 | struct scatterlist *sg, unsigned int sgcount, | ||
319 | unsigned int dma_length, unsigned int dev_addr, | ||
320 | enum dma_transfer_direction direction) | ||
321 | { | ||
322 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
323 | int channel = imxdmac->channel; | ||
324 | |||
325 | if (imxdmac->internal.in_use) | ||
326 | return -EBUSY; | ||
327 | |||
328 | imxdmac->internal.sg = sg; | ||
329 | imxdmac->internal.resbytes = dma_length; | ||
330 | |||
331 | if (!sg || !sgcount) { | ||
332 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", | ||
333 | channel); | ||
334 | return -EINVAL; | ||
335 | } | ||
336 | |||
337 | if (!sg->length) { | ||
338 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", | ||
339 | channel); | ||
340 | return -EINVAL; | ||
341 | } | ||
342 | |||
343 | if (direction == DMA_DEV_TO_MEM) { | ||
344 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | ||
345 | "dev_addr=0x%08x for read\n", | ||
346 | channel, __func__, sg, sgcount, dma_length, dev_addr); | ||
347 | |||
348 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | ||
349 | imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); | ||
350 | } else if (direction == DMA_MEM_TO_DEV) { | ||
351 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | ||
352 | "dev_addr=0x%08x for write\n", | ||
353 | channel, __func__, sg, sgcount, dma_length, dev_addr); | ||
354 | |||
355 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | ||
356 | imx_dmav1_writel(imxdmac->internal.ccr_to_device, DMA_CCR(channel)); | ||
357 | } else { | ||
358 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", | ||
359 | channel); | ||
360 | return -EINVAL; | ||
361 | } | ||
362 | |||
363 | imxdma_sg_next(d, sg); | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static void imxdma_watchdog(unsigned long data) | 315 | static void imxdma_watchdog(unsigned long data) |
369 | { | 316 | { |
370 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; | 317 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; |
@@ -526,7 +473,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
526 | { | 473 | { |
527 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | 474 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
528 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 475 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
529 | int ret; | ||
530 | 476 | ||
531 | /* Configure and enable */ | 477 | /* Configure and enable */ |
532 | switch (d->type) { | 478 | switch (d->type) { |
@@ -548,10 +494,37 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
548 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | 494 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
549 | case IMXDMA_DESC_CYCLIC: | 495 | case IMXDMA_DESC_CYCLIC: |
550 | case IMXDMA_DESC_SLAVE_SG: | 496 | case IMXDMA_DESC_SLAVE_SG: |
551 | ret = imxdma_setup_sg_hw(d, d->sg, d->sgcount, d->len, | 497 | imxdmac->internal.sg = d->sg; |
552 | imxdmac->per_address, d->direction); | 498 | imxdmac->internal.resbytes = d->len; |
553 | if (ret < 0) | 499 | |
554 | return ret; | 500 | if (d->direction == DMA_DEV_TO_MEM) { |
501 | imx_dmav1_writel(imxdmac->per_address, | ||
502 | DMA_SAR(imxdmac->channel)); | ||
503 | imx_dmav1_writel(imxdmac->ccr_from_device, | ||
504 | DMA_CCR(imxdmac->channel)); | ||
505 | |||
506 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | ||
507 | "total length=%d dev_addr=0x%08x (dev2mem)\n", | ||
508 | __func__, imxdmac->channel, d->sg, d->sgcount, | ||
509 | d->len, imxdmac->per_address); | ||
510 | } else if (d->direction == DMA_MEM_TO_DEV) { | ||
511 | imx_dmav1_writel(imxdmac->per_address, | ||
512 | DMA_DAR(imxdmac->channel)); | ||
513 | imx_dmav1_writel(imxdmac->ccr_to_device, | ||
514 | DMA_CCR(imxdmac->channel)); | ||
515 | |||
516 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | ||
517 | "total length=%d dev_addr=0x%08x (mem2dev)\n", | ||
518 | __func__, imxdmac->channel, d->sg, d->sgcount, | ||
519 | d->len, imxdmac->per_address); | ||
520 | } else { | ||
521 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", | ||
522 | __func__, imxdmac->channel); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | |||
526 | imxdma_sg_next(d, d->sg); | ||
527 | |||
555 | break; | 528 | break; |
556 | default: | 529 | default: |
557 | return -EINVAL; | 530 | return -EINVAL; |
@@ -641,11 +614,10 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
641 | imxdmac->internal.hw_chaining = 1; | 614 | imxdmac->internal.hw_chaining = 1; |
642 | if (!imxdma_hw_chain(&imxdmac->internal)) | 615 | if (!imxdma_hw_chain(&imxdmac->internal)) |
643 | return -EINVAL; | 616 | return -EINVAL; |
644 | imxdmac->internal.ccr_from_device = | 617 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | |
645 | (mode | IMX_DMA_TYPE_FIFO) | | ||
646 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | | 618 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | |
647 | CCR_REN; | 619 | CCR_REN; |
648 | imxdmac->internal.ccr_to_device = | 620 | imxdmac->ccr_to_device = |
649 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | | 621 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | |
650 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | 622 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; |
651 | imx_dmav1_writel(imxdmac->dma_request, | 623 | imx_dmav1_writel(imxdmac->dma_request, |