aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-10-18 13:35:30 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:01:31 -0500
commit1786b943dad0b2f655e69b3ad5187f7e39ef32e6 (patch)
tree07b9c593db0e81278fc8f5684155712fcade712b /crypto/async_tx
parent7476bd79fc019dd9a8361de6696627a4eae3ef05 (diff)
async_pq_val: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_pq.c58
1 files changed, 35 insertions, 23 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 8cdbf33bd046..4126b56fbc01 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -290,50 +290,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
290 struct dma_async_tx_descriptor *tx; 290 struct dma_async_tx_descriptor *tx;
291 unsigned char coefs[disks-2]; 291 unsigned char coefs[disks-2];
292 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 292 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
293 dma_addr_t *dma_src = NULL; 293 struct dmaengine_unmap_data *unmap = NULL;
294 int src_cnt = 0;
295 294
296 BUG_ON(disks < 4); 295 BUG_ON(disks < 4);
297 296
298 if (submit->scribble) 297 if (device)
299 dma_src = submit->scribble; 298 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
300 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
301 dma_src = (dma_addr_t *) blocks;
302 299
303 if (dma_src && device && disks <= dma_maxpq(device, 0) && 300 if (unmap && disks <= dma_maxpq(device, 0) &&
304 is_dma_pq_aligned(device, offset, 0, len)) { 301 is_dma_pq_aligned(device, offset, 0, len)) {
305 struct device *dev = device->dev; 302 struct device *dev = device->dev;
306 dma_addr_t *pq = &dma_src[disks-2]; 303 dma_addr_t pq[2];
307 int i; 304 int i, j = 0, src_cnt = 0;
308 305
309 pr_debug("%s: (async) disks: %d len: %zu\n", 306 pr_debug("%s: (async) disks: %d len: %zu\n",
310 __func__, disks, len); 307 __func__, disks, len);
311 if (!P(blocks, disks)) 308
309 unmap->len = len;
310 for (i = 0; i < disks-2; i++)
311 if (likely(blocks[i])) {
312 unmap->addr[j] = dma_map_page(dev, blocks[i],
313 offset, len,
314 DMA_TO_DEVICE);
315 coefs[j] = raid6_gfexp[i];
316 unmap->to_cnt++;
317 src_cnt++;
318 j++;
319 }
320
321 if (!P(blocks, disks)) {
322 pq[0] = 0;
312 dma_flags |= DMA_PREP_PQ_DISABLE_P; 323 dma_flags |= DMA_PREP_PQ_DISABLE_P;
313 else 324 } else {
314 pq[0] = dma_map_page(dev, P(blocks, disks), 325 pq[0] = dma_map_page(dev, P(blocks, disks),
315 offset, len, 326 offset, len,
316 DMA_TO_DEVICE); 327 DMA_TO_DEVICE);
317 if (!Q(blocks, disks)) 328 unmap->addr[j++] = pq[0];
329 unmap->to_cnt++;
330 }
331 if (!Q(blocks, disks)) {
332 pq[1] = 0;
318 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 333 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
319 else 334 } else {
320 pq[1] = dma_map_page(dev, Q(blocks, disks), 335 pq[1] = dma_map_page(dev, Q(blocks, disks),
321 offset, len, 336 offset, len,
322 DMA_TO_DEVICE); 337 DMA_TO_DEVICE);
338 unmap->addr[j++] = pq[1];
339 unmap->to_cnt++;
340 }
323 341
324 if (submit->flags & ASYNC_TX_FENCE) 342 if (submit->flags & ASYNC_TX_FENCE)
325 dma_flags |= DMA_PREP_FENCE; 343 dma_flags |= DMA_PREP_FENCE;
326 for (i = 0; i < disks-2; i++)
327 if (likely(blocks[i])) {
328 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
329 offset, len,
330 DMA_TO_DEVICE);
331 coefs[src_cnt] = raid6_gfexp[i];
332 src_cnt++;
333 }
334
335 for (;;) { 344 for (;;) {
336 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 345 tx = device->device_prep_dma_pq_val(chan, pq,
346 unmap->addr,
337 src_cnt, 347 src_cnt,
338 coefs, 348 coefs,
339 len, pqres, 349 len, pqres,
@@ -343,6 +353,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
343 async_tx_quiesce(&submit->depend_tx); 353 async_tx_quiesce(&submit->depend_tx);
344 dma_async_issue_pending(chan); 354 dma_async_issue_pending(chan);
345 } 355 }
356
357 dma_set_unmap(tx, unmap);
346 async_tx_submit(chan, tx, submit); 358 async_tx_submit(chan, tx, submit);
347 359
348 return tx; 360 return tx;