aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2012-04-25 14:50:52 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-05-11 02:34:38 -0400
commitfdaf9c4b22247a6cc6cda9459be3e52764c14d95 (patch)
treec567736f1f47f91003a658b11075e974cee4321b /drivers/dma
parentcbb796ccd8c33c50249b876d9773dfa8e67d39cb (diff)
dmaengine: Use dma_sg_len(sg) instead of sg->length
sg->length may or may not contain the length of the dma region to transfer, depending on the architecture - dma_sg_len(sg) always will though. For the architectures which use the drivers modified by this patch it probably is the case that sg->length contains the dma transfer length. But to be consistent and future proof change them to use dma_sg_len. To quote Russel King: sg->length is meaningless to something performing DMA. In cases where sg_dma_len(sg) and sg->length are the same storage, then there's no problem. But scatterlists _can_ (and one some architectures) do split them - especially when you have an IOMMU which can allow you to combine a scatterlist into fewer entries. So, anything using sg->length for the size of a scatterlist's DMA transfer _after_ a call to dma_map_sg() is almost certainly buggy. The patch has been generated using the following coccinelle patch: <smpl> @@ struct scatterlist *sg; expression X; @@ -sg[X].length +sg_dma_len(&sg[X]) @@ struct scatterlist *sg; @@ -sg->length +sg_dma_len(sg) </smpl> Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/intel_mid_dma.c6
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/ste_dma40.c2
7 files changed, 16 insertions, 16 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 003220a60bcb..49ecbbb8932d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1328,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1328 int ret, tmp; 1328 int ret, tmp;
1329 1329
1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1331 __func__, sgl->length, plchan->name); 1331 __func__, sg_dma_len(sgl), plchan->name);
1332 1332
1333 txd = pl08x_get_txd(plchan, flags); 1333 txd = pl08x_get_txd(plchan, flags);
1334 if (!txd) { 1334 if (!txd) {
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index dc89455f5550..c0b650c70bbd 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1040,7 +1040,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1040 1040
1041 if (!sgl) 1041 if (!sgl)
1042 goto out; 1042 goto out;
1043 if (sgl->length == 0) 1043 if (sg_dma_len(sgl) == 0)
1044 goto out; 1044 goto out;
1045 1045
1046 spin_lock_irqsave(&cohc->lock, flg); 1046 spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8e1529..fcfeb3cd8d31 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
227 struct scatterlist *sg = d->sg; 227 struct scatterlist *sg = d->sg;
228 unsigned long now; 228 unsigned long now;
229 229
230 now = min(d->len, sg->length); 230 now = min(d->len, sg_dma_len(sg));
231 if (d->len != IMX_DMA_LENGTH_LOOP) 231 if (d->len != IMX_DMA_LENGTH_LOOP)
232 d->len -= now; 232 d->len -= now;
233 233
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
763 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 763 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
764 764
765 for_each_sg(sgl, sg, sg_len, i) { 765 for_each_sg(sgl, sg, sg_len, i) {
766 dma_length += sg->length; 766 dma_length += sg_dma_len(sg);
767 } 767 }
768 768
769 switch (imxdmac->word_size) { 769 switch (imxdmac->word_size) {
770 case DMA_SLAVE_BUSWIDTH_4_BYTES: 770 case DMA_SLAVE_BUSWIDTH_4_BYTES:
771 if (sgl->length & 3 || sgl->dma_address & 3) 771 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
772 return NULL; 772 return NULL;
773 break; 773 break;
774 case DMA_SLAVE_BUSWIDTH_2_BYTES: 774 case DMA_SLAVE_BUSWIDTH_2_BYTES:
775 if (sgl->length & 1 || sgl->dma_address & 1) 775 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
776 return NULL; 776 return NULL;
777 break; 777 break;
778 case DMA_SLAVE_BUSWIDTH_1_BYTE: 778 case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
831 imxdmac->sg_list[i].page_link = 0; 831 imxdmac->sg_list[i].page_link = 0;
832 imxdmac->sg_list[i].offset = 0; 832 imxdmac->sg_list[i].offset = 0;
833 imxdmac->sg_list[i].dma_address = dma_addr; 833 imxdmac->sg_list[i].dma_address = dma_addr;
834 imxdmac->sg_list[i].length = period_len; 834 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
835 dma_addr += period_len; 835 dma_addr += period_len;
836 } 836 }
837 837
838 /* close the loop */ 838 /* close the loop */
839 imxdmac->sg_list[periods].offset = 0; 839 imxdmac->sg_list[periods].offset = 0;
840 imxdmac->sg_list[periods].length = 0; 840 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
841 imxdmac->sg_list[periods].page_link = 841 imxdmac->sg_list[periods].page_link =
842 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 842 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
843 843
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 5a457777f5c0..cd0619a897ff 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -941,7 +941,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
941 941
942 bd->buffer_addr = sg->dma_address; 942 bd->buffer_addr = sg->dma_address;
943 943
944 count = sg->length; 944 count = sg_dma_len(sg);
945 945
946 if (count > 0xffff) { 946 if (count > 0xffff) {
947 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 947 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index d0ef5937fbf6..222e907bfaaa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,7 +394,7 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
394 } 394 }
395 } 395 }
396 /*Populate CTL_HI values*/ 396 /*Populate CTL_HI values*/
397 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
398 desc->width, 398 desc->width,
399 midc->dma->block_size); 399 midc->dma->block_size);
400 /*Populate SAR and DAR values*/ 400 /*Populate SAR and DAR values*/
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
747 txd = intel_mid_dma_prep_memcpy(chan, 747 txd = intel_mid_dma_prep_memcpy(chan,
748 mids->dma_slave.dst_addr, 748 mids->dma_slave.dst_addr,
749 mids->dma_slave.src_addr, 749 mids->dma_slave.src_addr,
750 sgl->length, 750 sg_dma_len(sgl),
751 flags); 751 flags);
752 return txd; 752 return txd;
753 } else { 753 } else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
760 sg_len, direction, flags); 760 sg_len, direction, flags);
761 761
762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
763 if (NULL == txd) { 763 if (NULL == txd) {
764 pr_err("MDMA: Prep memcpy failed\n"); 764 pr_err("MDMA: Prep memcpy failed\n");
765 return NULL; 765 return NULL;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 655d4ce6ed0d..3db3a48d3f01 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -415,9 +415,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
415 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 415 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
416 } else { 416 } else {
417 for_each_sg(sgl, sg, sg_len, i) { 417 for_each_sg(sgl, sg, sg_len, i) {
418 if (sg->length > MAX_XFER_BYTES) { 418 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
419 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 419 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
420 sg->length, MAX_XFER_BYTES); 420 sg_dma_len(sg), MAX_XFER_BYTES);
421 goto err_out; 421 goto err_out;
422 } 422 }
423 423
@@ -425,7 +425,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
425 425
426 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 426 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
427 ccw->bufaddr = sg->dma_address; 427 ccw->bufaddr = sg->dma_address;
428 ccw->xfer_bytes = sg->length; 428 ccw->xfer_bytes = sg_dma_len(sg);
429 429
430 ccw->bits = 0; 430 ccw->bits = 0;
431 ccw->bits |= CCW_CHAIN; 431 ccw->bits |= CCW_CHAIN;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f3..000d309602b2 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2362 } 2362 }
2363 2363
2364 sg[periods].offset = 0; 2364 sg[periods].offset = 0;
2365 sg[periods].length = 0; 2365 sg_dma_len(&sg[periods]) = 0;
2366 sg[periods].page_link = 2366 sg[periods].page_link =
2367 ((unsigned long)sg | 0x01) & ~0x02; 2367 ((unsigned long)sg | 0x01) & ~0x02;
2368 2368