aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2013-10-18 13:35:33 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:04:38 -0500
commit0776ae7b89782124ddd72eafe0b1e0fdcdabe32e (patch)
treef16e917b66a8a60a7341937a40021d683f3e27f0 /crypto
parent54f8d501e842879143e867e70996574a54d1e130 (diff)
dmaengine: remove DMA unmap flags
Remove no longer needed DMA unmap flags: - DMA_COMPL_SKIP_SRC_UNMAP - DMA_COMPL_SKIP_DEST_UNMAP - DMA_COMPL_SRC_UNMAP_SINGLE - DMA_COMPL_DEST_UNMAP_SINGLE Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Jon Mason <jon.mason@intel.com> Acked-by: Mark Brown <broonie@linaro.org> [djbw: clean up straggling skip unmap flags in ntb] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c3
-rw-r--r--crypto/async_tx/async_pq.c1
-rw-r--r--crypto/async_tx/async_raid6_recov.c8
-rw-r--r--crypto/async_tx/async_xor.c6
4 files changed, 5 insertions, 13 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 72750214f779..f8c0b8dbeb75 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -56,8 +56,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
57 57
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
59 unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | 59 unsigned long dma_prep_flags = 0;
60 DMA_COMPL_SKIP_DEST_UNMAP;
61 60
62 if (submit->cb_fn) 61 if (submit->cb_fn)
63 dma_prep_flags |= DMA_PREP_INTERRUPT; 62 dma_prep_flags |= DMA_PREP_INTERRUPT;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 4126b56fbc01..d05327caf69d 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,7 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
62 dma_addr_t dma_dest[2]; 62 dma_addr_t dma_dest[2];
63 int src_off = 0; 63 int src_off = 0;
64 64
65 dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
66 if (submit->flags & ASYNC_TX_FENCE) 65 if (submit->flags & ASYNC_TX_FENCE)
67 dma_flags |= DMA_PREP_FENCE; 66 dma_flags |= DMA_PREP_FENCE;
68 67
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index a3a72a784421..934a84981495 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -47,9 +47,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
47 struct device *dev = dma->dev; 47 struct device *dev = dma->dev;
48 dma_addr_t pq[2]; 48 dma_addr_t pq[2];
49 struct dma_async_tx_descriptor *tx; 49 struct dma_async_tx_descriptor *tx;
50 enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | 50 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
51 DMA_COMPL_SKIP_DEST_UNMAP |
52 DMA_PREP_PQ_DISABLE_P;
53 51
54 if (submit->flags & ASYNC_TX_FENCE) 52 if (submit->flags & ASYNC_TX_FENCE)
55 dma_flags |= DMA_PREP_FENCE; 53 dma_flags |= DMA_PREP_FENCE;
@@ -113,9 +111,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
113 dma_addr_t dma_dest[2]; 111 dma_addr_t dma_dest[2];
114 struct device *dev = dma->dev; 112 struct device *dev = dma->dev;
115 struct dma_async_tx_descriptor *tx; 113 struct dma_async_tx_descriptor *tx;
116 enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | 114 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
117 DMA_COMPL_SKIP_DEST_UNMAP |
118 DMA_PREP_PQ_DISABLE_P;
119 115
120 if (submit->flags & ASYNC_TX_FENCE) 116 if (submit->flags & ASYNC_TX_FENCE)
121 dma_flags |= DMA_PREP_FENCE; 117 dma_flags |= DMA_PREP_FENCE;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index d2cc77d501c7..3c562f5a60bb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -41,7 +41,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
41 dma_async_tx_callback cb_fn_orig = submit->cb_fn; 41 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
42 void *cb_param_orig = submit->cb_param; 42 void *cb_param_orig = submit->cb_param;
43 enum async_tx_flags flags_orig = submit->flags; 43 enum async_tx_flags flags_orig = submit->flags;
44 enum dma_ctrl_flags dma_flags; 44 enum dma_ctrl_flags dma_flags = 0;
45 int src_cnt = unmap->to_cnt; 45 int src_cnt = unmap->to_cnt;
46 int xor_src_cnt; 46 int xor_src_cnt;
47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; 47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
@@ -55,7 +55,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
55 /* if we are submitting additional xors, leave the chain open 55 /* if we are submitting additional xors, leave the chain open
56 * and clear the callback parameters 56 * and clear the callback parameters
57 */ 57 */
58 dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
59 if (src_cnt > xor_src_cnt) { 58 if (src_cnt > xor_src_cnt) {
60 submit->flags &= ~ASYNC_TX_ACK; 59 submit->flags &= ~ASYNC_TX_ACK;
61 submit->flags |= ASYNC_TX_FENCE; 60 submit->flags |= ASYNC_TX_FENCE;
@@ -284,8 +283,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
284 283
285 if (unmap && src_cnt <= device->max_xor && 284 if (unmap && src_cnt <= device->max_xor &&
286 is_dma_xor_aligned(device, offset, 0, len)) { 285 is_dma_xor_aligned(device, offset, 0, len)) {
287 unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | 286 unsigned long dma_prep_flags = 0;
288 DMA_COMPL_SKIP_DEST_UNMAP;
289 int i; 287 int i;
290 288
291 pr_debug("%s: (async) len: %zu\n", __func__, len); 289 pr_debug("%s: (async) len: %zu\n", __func__, len);