diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 13:35:27 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:01:30 -0500 |
commit | 173e86b2809234cb5f2a50e9a8c159b70e23da1c (patch) | |
tree | f477ed2bea30a16ef39a973cf7ed5945b125f73e /crypto | |
parent | fb36ab142b2f1dc8c8ad3750413efa7a5cc1c07b (diff) |
async_xor_val: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers.
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
[bzolnier: minor cleanups]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_xor.c | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index f092fa14a745..d2cc77d501c7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -275,18 +275,17 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
275 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); | 275 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
276 | struct dma_device *device = chan ? chan->device : NULL; | 276 | struct dma_device *device = chan ? chan->device : NULL; |
277 | struct dma_async_tx_descriptor *tx = NULL; | 277 | struct dma_async_tx_descriptor *tx = NULL; |
278 | dma_addr_t *dma_src = NULL; | 278 | struct dmaengine_unmap_data *unmap = NULL; |
279 | 279 | ||
280 | BUG_ON(src_cnt <= 1); | 280 | BUG_ON(src_cnt <= 1); |
281 | 281 | ||
282 | if (submit->scribble) | 282 | if (device) |
283 | dma_src = submit->scribble; | 283 | unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); |
284 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
285 | dma_src = (dma_addr_t *) src_list; | ||
286 | 284 | ||
287 | if (dma_src && device && src_cnt <= device->max_xor && | 285 | if (unmap && src_cnt <= device->max_xor && |
288 | is_dma_xor_aligned(device, offset, 0, len)) { | 286 | is_dma_xor_aligned(device, offset, 0, len)) { |
289 | unsigned long dma_prep_flags = 0; | 287 | unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | |
288 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
290 | int i; | 289 | int i; |
291 | 290 | ||
292 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 291 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
@@ -295,11 +294,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
295 | dma_prep_flags |= DMA_PREP_INTERRUPT; | 294 | dma_prep_flags |= DMA_PREP_INTERRUPT; |
296 | if (submit->flags & ASYNC_TX_FENCE) | 295 | if (submit->flags & ASYNC_TX_FENCE) |
297 | dma_prep_flags |= DMA_PREP_FENCE; | 296 | dma_prep_flags |= DMA_PREP_FENCE; |
298 | for (i = 0; i < src_cnt; i++) | ||
299 | dma_src[i] = dma_map_page(device->dev, src_list[i], | ||
300 | offset, len, DMA_TO_DEVICE); | ||
301 | 297 | ||
302 | tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, | 298 | for (i = 0; i < src_cnt; i++) { |
299 | unmap->addr[i] = dma_map_page(device->dev, src_list[i], | ||
300 | offset, len, DMA_TO_DEVICE); | ||
301 | unmap->to_cnt++; | ||
302 | } | ||
303 | unmap->len = len; | ||
304 | |||
305 | tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, | ||
303 | len, result, | 306 | len, result, |
304 | dma_prep_flags); | 307 | dma_prep_flags); |
305 | if (unlikely(!tx)) { | 308 | if (unlikely(!tx)) { |
@@ -308,11 +311,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
308 | while (!tx) { | 311 | while (!tx) { |
309 | dma_async_issue_pending(chan); | 312 | dma_async_issue_pending(chan); |
310 | tx = device->device_prep_dma_xor_val(chan, | 313 | tx = device->device_prep_dma_xor_val(chan, |
311 | dma_src, src_cnt, len, result, | 314 | unmap->addr, src_cnt, len, result, |
312 | dma_prep_flags); | 315 | dma_prep_flags); |
313 | } | 316 | } |
314 | } | 317 | } |
315 | 318 | dma_set_unmap(tx, unmap); | |
316 | async_tx_submit(chan, tx, submit); | 319 | async_tx_submit(chan, tx, submit); |
317 | } else { | 320 | } else { |
318 | enum async_tx_flags flags_orig = submit->flags; | 321 | enum async_tx_flags flags_orig = submit->flags; |
@@ -334,6 +337,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
334 | async_tx_sync_epilog(submit); | 337 | async_tx_sync_epilog(submit); |
335 | submit->flags = flags_orig; | 338 | submit->flags = flags_orig; |
336 | } | 339 | } |
340 | dmaengine_unmap_put(unmap); | ||
337 | 341 | ||
338 | return tx; | 342 | return tx; |
339 | } | 343 | } |