diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 13:35:25 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:00:39 -0500 |
commit | 8971646294bda65f8666b60cb2cb3d5e172c99bf (patch) | |
tree | b3d75fd6c909de400e8028a337d70ca943180c87 | |
parent | 45c463ae924c62af4aa64ded1ca831f334a1db65 (diff) |
async_memcpy: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers.
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
[bzolnier: add missing unmap->len initialization]
[bzolnier: fix whitespace damage]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
[djbw: add DMA_ENGINE=n support]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 40 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 3 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 17 |
3 files changed, 42 insertions, 18 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb374..72750214f779 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -50,33 +50,37 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
50 | &dest, 1, &src, 1, len); | 50 | &dest, 1, &src, 1, len); |
51 | struct dma_device *device = chan ? chan->device : NULL; | 51 | struct dma_device *device = chan ? chan->device : NULL; |
52 | struct dma_async_tx_descriptor *tx = NULL; | 52 | struct dma_async_tx_descriptor *tx = NULL; |
53 | struct dmaengine_unmap_data *unmap = NULL; | ||
53 | 54 | ||
54 | if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { | 55 | if (device) |
55 | dma_addr_t dma_dest, dma_src; | 56 | unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); |
56 | unsigned long dma_prep_flags = 0; | 57 | |
58 | if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { | ||
59 | unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | | ||
60 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
57 | 61 | ||
58 | if (submit->cb_fn) | 62 | if (submit->cb_fn) |
59 | dma_prep_flags |= DMA_PREP_INTERRUPT; | 63 | dma_prep_flags |= DMA_PREP_INTERRUPT; |
60 | if (submit->flags & ASYNC_TX_FENCE) | 64 | if (submit->flags & ASYNC_TX_FENCE) |
61 | dma_prep_flags |= DMA_PREP_FENCE; | 65 | dma_prep_flags |= DMA_PREP_FENCE; |
62 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, | 66 | |
63 | DMA_FROM_DEVICE); | 67 | unmap->to_cnt = 1; |
64 | 68 | unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, | |
65 | dma_src = dma_map_page(device->dev, src, src_offset, len, | 69 | DMA_TO_DEVICE); |
66 | DMA_TO_DEVICE); | 70 | unmap->from_cnt = 1; |
67 | 71 | unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, | |
68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, | 72 | DMA_FROM_DEVICE); |
69 | len, dma_prep_flags); | 73 | unmap->len = len; |
70 | if (!tx) { | 74 | |
71 | dma_unmap_page(device->dev, dma_dest, len, | 75 | tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], |
72 | DMA_FROM_DEVICE); | 76 | unmap->addr[0], len, |
73 | dma_unmap_page(device->dev, dma_src, len, | 77 | dma_prep_flags); |
74 | DMA_TO_DEVICE); | ||
75 | } | ||
76 | } | 78 | } |
77 | 79 | ||
78 | if (tx) { | 80 | if (tx) { |
79 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 81 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
82 | |||
83 | dma_set_unmap(tx, unmap); | ||
80 | async_tx_submit(chan, tx, submit); | 84 | async_tx_submit(chan, tx, submit); |
81 | } else { | 85 | } else { |
82 | void *dest_buf, *src_buf; | 86 | void *dest_buf, *src_buf; |
@@ -96,6 +100,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
96 | async_tx_sync_epilog(submit); | 100 | async_tx_sync_epilog(submit); |
97 | } | 101 | } |
98 | 102 | ||
103 | dmaengine_unmap_put(unmap); | ||
104 | |||
99 | return tx; | 105 | return tx; |
100 | } | 106 | } |
101 | EXPORT_SYMBOL_GPL(async_memcpy); | 107 | EXPORT_SYMBOL_GPL(async_memcpy); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e721a1caff7f..54138b57b37c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -1009,7 +1009,7 @@ static int __init dmaengine_init_unmap_pool(void) | |||
1009 | return -ENOMEM; | 1009 | return -ENOMEM; |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | static struct dmaengine_unmap_data * | 1012 | struct dmaengine_unmap_data * |
1013 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | 1013 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) |
1014 | { | 1014 | { |
1015 | struct dmaengine_unmap_data *unmap; | 1015 | struct dmaengine_unmap_data *unmap; |
@@ -1024,6 +1024,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | |||
1024 | 1024 | ||
1025 | return unmap; | 1025 | return unmap; |
1026 | } | 1026 | } |
1027 | EXPORT_SYMBOL(dmaengine_get_unmap_data); | ||
1027 | 1028 | ||
1028 | /** | 1029 | /** |
1029 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | 1030 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2fe855a7cab1..3782cdb782a8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -457,6 +457,7 @@ struct dma_async_tx_descriptor { | |||
457 | #endif | 457 | #endif |
458 | }; | 458 | }; |
459 | 459 | ||
460 | #ifdef CONFIG_DMA_ENGINE | ||
460 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | 461 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, |
461 | struct dmaengine_unmap_data *unmap) | 462 | struct dmaengine_unmap_data *unmap) |
462 | { | 463 | { |
@@ -464,7 +465,23 @@ static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | |||
464 | tx->unmap = unmap; | 465 | tx->unmap = unmap; |
465 | } | 466 | } |
466 | 467 | ||
468 | struct dmaengine_unmap_data * | ||
469 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); | ||
467 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); | 470 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); |
471 | #else | ||
472 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
473 | struct dmaengine_unmap_data *unmap) | ||
474 | { | ||
475 | } | ||
476 | static inline struct dmaengine_unmap_data * | ||
477 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
478 | { | ||
479 | return NULL; | ||
480 | } | ||
481 | static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) | ||
482 | { | ||
483 | } | ||
484 | #endif | ||
468 | 485 | ||
469 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) | 486 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) |
470 | { | 487 | { |