diff options
author | Lars-Peter Clausen <lars@metafoo.de> | 2012-04-25 14:50:52 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-05-11 02:34:38 -0400 |
commit | fdaf9c4b22247a6cc6cda9459be3e52764c14d95 (patch) | |
tree | c567736f1f47f91003a658b11075e974cee4321b /drivers/dma/mxs-dma.c | |
parent | cbb796ccd8c33c50249b876d9773dfa8e67d39cb (diff) |
dmaengine: Use dma_sg_len(sg) instead of sg->length
sg->length may or may not contain the length of the dma region to transfer,
depending on the architecture - dma_sg_len(sg) always will though. For the
architectures which use the drivers modified by this patch it probably is the
case that sg->length contains the dma transfer length. But to be consistent and
future proof change them to use dma_sg_len.
To quote Russel King:
sg->length is meaningless to something performing DMA.
In cases where sg_dma_len(sg) and sg->length are the same storage, then
there's no problem. But scatterlists _can_ (and one some architectures) do
split them - especially when you have an IOMMU which can allow you to
combine a scatterlist into fewer entries.
So, anything using sg->length for the size of a scatterlist's DMA transfer
_after_ a call to dma_map_sg() is almost certainly buggy.
The patch has been generated using the following coccinelle patch:
<smpl>
@@
struct scatterlist *sg;
expression X;
@@
-sg[X].length
+sg_dma_len(&sg[X])
@@
struct scatterlist *sg;
@@
-sg->length
+sg_dma_len(sg)
</smpl>
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/mxs-dma.c')
-rw-r--r-- | drivers/dma/mxs-dma.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 655d4ce6ed0d..3db3a48d3f01 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -415,9 +415,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
415 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); | 415 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); |
416 | } else { | 416 | } else { |
417 | for_each_sg(sgl, sg, sg_len, i) { | 417 | for_each_sg(sgl, sg, sg_len, i) { |
418 | if (sg->length > MAX_XFER_BYTES) { | 418 | if (sg_dma_len(sg) > MAX_XFER_BYTES) { |
419 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", | 419 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", |
420 | sg->length, MAX_XFER_BYTES); | 420 | sg_dma_len(sg), MAX_XFER_BYTES); |
421 | goto err_out; | 421 | goto err_out; |
422 | } | 422 | } |
423 | 423 | ||
@@ -425,7 +425,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
425 | 425 | ||
426 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | 426 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
427 | ccw->bufaddr = sg->dma_address; | 427 | ccw->bufaddr = sg->dma_address; |
428 | ccw->xfer_bytes = sg->length; | 428 | ccw->xfer_bytes = sg_dma_len(sg); |
429 | 429 | ||
430 | ccw->bits = 0; | 430 | ccw->bits = 0; |
431 | ccw->bits |= CCW_CHAIN; | 431 | ccw->bits |= CCW_CHAIN; |