aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-15 07:01:59 -0500
committerThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-20 09:58:58 -0500
commitc35064c4b6f4e03a4f40cc88e3257525a7b31a68 (patch)
tree3121632b277602bcadbd74aee609af4add9e62f7 /drivers
parent01a9508de746bc2ae37dc63b407f2d7cdcb00386 (diff)
dma: mv_xor: simplify dma_sync_single_for_cpu() calls
In mv_xor_memcpy_self_test() and mv_xor_xor_self_test(), all DMA functions are called by passing dma_chan->device->dev as the 'device *', except the calls to dma_sync_single_for_cpu() which uselessly goes through mv_chan->device->pdev->dev. Simplify this by using dma_chan->device->dev direclty in dma_sync_single_for_cpu() calls. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/mv_xor.c8
1 files changed, 2 insertions, 6 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a9994713072e..b799d33d46bc 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -905,7 +905,6 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
905 dma_cookie_t cookie; 905 dma_cookie_t cookie;
906 struct dma_async_tx_descriptor *tx; 906 struct dma_async_tx_descriptor *tx;
907 int err = 0; 907 int err = 0;
908 struct mv_xor_chan *mv_chan;
909 908
910 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 909 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
911 if (!src) 910 if (!src)
@@ -951,8 +950,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
951 goto free_resources; 950 goto free_resources;
952 } 951 }
953 952
954 mv_chan = to_mv_xor_chan(dma_chan); 953 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
955 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 954 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 955 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
958 dev_err(dma_chan->device->dev, 956 dev_err(dma_chan->device->dev,
@@ -984,7 +982,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
984 u8 cmp_byte = 0; 982 u8 cmp_byte = 0;
985 u32 cmp_word; 983 u32 cmp_word;
986 int err = 0; 984 int err = 0;
987 struct mv_xor_chan *mv_chan;
988 985
989 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 986 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
990 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 987 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1049,8 +1046,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1049 goto free_resources; 1046 goto free_resources;
1050 } 1047 }
1051 1048
1052 mv_chan = to_mv_xor_chan(dma_chan); 1049 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1053 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1054 PAGE_SIZE, DMA_FROM_DEVICE); 1050 PAGE_SIZE, DMA_FROM_DEVICE);
1055 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1051 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1056 u32 *ptr = page_address(dest); 1052 u32 *ptr = page_address(dest);