diff options
| author | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:27:08 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:27:08 -0500 |
| commit | bb799d3b980eb803ca2da4a4eefbd9308f8d988a (patch) | |
| tree | 69fbe0cd6d47b23a50f5e1d87bf7489532fae149 /crypto/async_tx/async_memcpy.c | |
| parent | 919fc6e34831d1c2b58bfb5ae261dc3facc9b269 (diff) | |
| parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) | |
Merge tag 'v3.13-rc4' into core/locking
Merge Linux 3.13-rc4, to refresh this rather old tree with the latest fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'crypto/async_tx/async_memcpy.c')
| -rw-r--r-- | crypto/async_tx/async_memcpy.c | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb374..f8c0b8dbeb75 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
| @@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
| 50 | &dest, 1, &src, 1, len); | 50 | &dest, 1, &src, 1, len); |
| 51 | struct dma_device *device = chan ? chan->device : NULL; | 51 | struct dma_device *device = chan ? chan->device : NULL; |
| 52 | struct dma_async_tx_descriptor *tx = NULL; | 52 | struct dma_async_tx_descriptor *tx = NULL; |
| 53 | struct dmaengine_unmap_data *unmap = NULL; | ||
| 53 | 54 | ||
| 54 | if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { | 55 | if (device) |
| 55 | dma_addr_t dma_dest, dma_src; | 56 | unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); |
| 57 | |||
| 58 | if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { | ||
| 56 | unsigned long dma_prep_flags = 0; | 59 | unsigned long dma_prep_flags = 0; |
| 57 | 60 | ||
| 58 | if (submit->cb_fn) | 61 | if (submit->cb_fn) |
| 59 | dma_prep_flags |= DMA_PREP_INTERRUPT; | 62 | dma_prep_flags |= DMA_PREP_INTERRUPT; |
| 60 | if (submit->flags & ASYNC_TX_FENCE) | 63 | if (submit->flags & ASYNC_TX_FENCE) |
| 61 | dma_prep_flags |= DMA_PREP_FENCE; | 64 | dma_prep_flags |= DMA_PREP_FENCE; |
| 62 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, | 65 | |
| 63 | DMA_FROM_DEVICE); | 66 | unmap->to_cnt = 1; |
| 64 | 67 | unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, | |
| 65 | dma_src = dma_map_page(device->dev, src, src_offset, len, | 68 | DMA_TO_DEVICE); |
| 66 | DMA_TO_DEVICE); | 69 | unmap->from_cnt = 1; |
| 67 | 70 | unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, | |
| 68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, | 71 | DMA_FROM_DEVICE); |
| 69 | len, dma_prep_flags); | 72 | unmap->len = len; |
| 70 | if (!tx) { | 73 | |
| 71 | dma_unmap_page(device->dev, dma_dest, len, | 74 | tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], |
| 72 | DMA_FROM_DEVICE); | 75 | unmap->addr[0], len, |
| 73 | dma_unmap_page(device->dev, dma_src, len, | 76 | dma_prep_flags); |
| 74 | DMA_TO_DEVICE); | ||
| 75 | } | ||
| 76 | } | 77 | } |
| 77 | 78 | ||
| 78 | if (tx) { | 79 | if (tx) { |
| 79 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 80 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
| 81 | |||
| 82 | dma_set_unmap(tx, unmap); | ||
| 80 | async_tx_submit(chan, tx, submit); | 83 | async_tx_submit(chan, tx, submit); |
| 81 | } else { | 84 | } else { |
| 82 | void *dest_buf, *src_buf; | 85 | void *dest_buf, *src_buf; |
| @@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
| 96 | async_tx_sync_epilog(submit); | 99 | async_tx_sync_epilog(submit); |
| 97 | } | 100 | } |
| 98 | 101 | ||
| 102 | dmaengine_unmap_put(unmap); | ||
| 103 | |||
| 99 | return tx; | 104 | return tx; |
| 100 | } | 105 | } |
| 101 | EXPORT_SYMBOL_GPL(async_memcpy); | 106 | EXPORT_SYMBOL_GPL(async_memcpy); |
