aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-10-18 13:35:25 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:00:39 -0500
commit8971646294bda65f8666b60cb2cb3d5e172c99bf (patch)
treeb3d75fd6c909de400e8028a337d70ca943180c87 /crypto
parent45c463ae924c62af4aa64ded1ca831f334a1db65 (diff)
async_memcpy: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> [bzolnier: add missing unmap->len initialization] [bzolnier: fix whitespace damage] Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> [djbw: add DMA_ENGINE=n support] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 9e62feffb374..72750214f779 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,33 +50,37 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
50 &dest, 1, &src, 1, len); 50 &dest, 1, &src, 1, len);
51 struct dma_device *device = chan ? chan->device : NULL; 51 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL; 52 struct dma_async_tx_descriptor *tx = NULL;
53 struct dmaengine_unmap_data *unmap = NULL;
53 54
54 if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 55 if (device)
55 dma_addr_t dma_dest, dma_src; 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
56 unsigned long dma_prep_flags = 0; 57
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
59 unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP |
60 DMA_COMPL_SKIP_DEST_UNMAP;
57 61
58 if (submit->cb_fn) 62 if (submit->cb_fn)
59 dma_prep_flags |= DMA_PREP_INTERRUPT; 63 dma_prep_flags |= DMA_PREP_INTERRUPT;
60 if (submit->flags & ASYNC_TX_FENCE) 64 if (submit->flags & ASYNC_TX_FENCE)
61 dma_prep_flags |= DMA_PREP_FENCE; 65 dma_prep_flags |= DMA_PREP_FENCE;
62 dma_dest = dma_map_page(device->dev, dest, dest_offset, len, 66
63 DMA_FROM_DEVICE); 67 unmap->to_cnt = 1;
64 68 unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
65 dma_src = dma_map_page(device->dev, src, src_offset, len, 69 DMA_TO_DEVICE);
66 DMA_TO_DEVICE); 70 unmap->from_cnt = 1;
67 71 unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
68 tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, 72 DMA_FROM_DEVICE);
69 len, dma_prep_flags); 73 unmap->len = len;
70 if (!tx) { 74
71 dma_unmap_page(device->dev, dma_dest, len, 75 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
72 DMA_FROM_DEVICE); 76 unmap->addr[0], len,
73 dma_unmap_page(device->dev, dma_src, len, 77 dma_prep_flags);
74 DMA_TO_DEVICE);
75 }
76 } 78 }
77 79
78 if (tx) { 80 if (tx) {
79 pr_debug("%s: (async) len: %zu\n", __func__, len); 81 pr_debug("%s: (async) len: %zu\n", __func__, len);
82
83 dma_set_unmap(tx, unmap);
80 async_tx_submit(chan, tx, submit); 84 async_tx_submit(chan, tx, submit);
81 } else { 85 } else {
82 void *dest_buf, *src_buf; 86 void *dest_buf, *src_buf;
@@ -96,6 +100,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
96 async_tx_sync_epilog(submit); 100 async_tx_sync_epilog(submit);
97 } 101 }
98 102
103 dmaengine_unmap_put(unmap);
104
99 return tx; 105 return tx;
100} 106}
101EXPORT_SYMBOL_GPL(async_memcpy); 107EXPORT_SYMBOL_GPL(async_memcpy);