diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 13:35:26 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:01:30 -0500 |
commit | fb36ab142b2f1dc8c8ad3750413efa7a5cc1c07b (patch) | |
tree | e0e29838a524d5bfc353d6fb5abd3fe5e40766b5 /crypto | |
parent | 8971646294bda65f8666b60cb2cb3d5e172c99bf (diff) |
async_xor: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers.
Later we can push this unmap object up to the raid layer and get rid of
the 'scribble' parameter.
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
[bzolnier: minor cleanups]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_xor.c | 95 |
1 files changed, 51 insertions, 44 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 8ade0a0481c6..f092fa14a745 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -33,48 +33,32 @@ | |||
33 | 33 | ||
34 | /* do_async_xor - dma map the pages and perform the xor with an engine */ | 34 | /* do_async_xor - dma map the pages and perform the xor with an engine */ |
35 | static __async_inline struct dma_async_tx_descriptor * | 35 | static __async_inline struct dma_async_tx_descriptor * |
36 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | 36 | do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, |
37 | unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, | ||
38 | struct async_submit_ctl *submit) | 37 | struct async_submit_ctl *submit) |
39 | { | 38 | { |
40 | struct dma_device *dma = chan->device; | 39 | struct dma_device *dma = chan->device; |
41 | struct dma_async_tx_descriptor *tx = NULL; | 40 | struct dma_async_tx_descriptor *tx = NULL; |
42 | int src_off = 0; | ||
43 | int i; | ||
44 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | 41 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
45 | void *cb_param_orig = submit->cb_param; | 42 | void *cb_param_orig = submit->cb_param; |
46 | enum async_tx_flags flags_orig = submit->flags; | 43 | enum async_tx_flags flags_orig = submit->flags; |
47 | enum dma_ctrl_flags dma_flags; | 44 | enum dma_ctrl_flags dma_flags; |
48 | int xor_src_cnt = 0; | 45 | int src_cnt = unmap->to_cnt; |
49 | dma_addr_t dma_dest; | 46 | int xor_src_cnt; |
50 | 47 | dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; | |
51 | /* map the dest bidrectional in case it is re-used as a source */ | 48 | dma_addr_t *src_list = unmap->addr; |
52 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | ||
53 | for (i = 0; i < src_cnt; i++) { | ||
54 | /* only map the dest once */ | ||
55 | if (!src_list[i]) | ||
56 | continue; | ||
57 | if (unlikely(src_list[i] == dest)) { | ||
58 | dma_src[xor_src_cnt++] = dma_dest; | ||
59 | continue; | ||
60 | } | ||
61 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, | ||
62 | len, DMA_TO_DEVICE); | ||
63 | } | ||
64 | src_cnt = xor_src_cnt; | ||
65 | 49 | ||
66 | while (src_cnt) { | 50 | while (src_cnt) { |
51 | dma_addr_t tmp; | ||
52 | |||
67 | submit->flags = flags_orig; | 53 | submit->flags = flags_orig; |
68 | dma_flags = 0; | ||
69 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); | 54 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); |
70 | /* if we are submitting additional xors, leave the chain open, | 55 | /* if we are submitting additional xors, leave the chain open |
71 | * clear the callback parameters, and leave the destination | 56 | * and clear the callback parameters |
72 | * buffer mapped | ||
73 | */ | 57 | */ |
58 | dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; | ||
74 | if (src_cnt > xor_src_cnt) { | 59 | if (src_cnt > xor_src_cnt) { |
75 | submit->flags &= ~ASYNC_TX_ACK; | 60 | submit->flags &= ~ASYNC_TX_ACK; |
76 | submit->flags |= ASYNC_TX_FENCE; | 61 | submit->flags |= ASYNC_TX_FENCE; |
77 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; | ||
78 | submit->cb_fn = NULL; | 62 | submit->cb_fn = NULL; |
79 | submit->cb_param = NULL; | 63 | submit->cb_param = NULL; |
80 | } else { | 64 | } else { |
@@ -85,12 +69,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
85 | dma_flags |= DMA_PREP_INTERRUPT; | 69 | dma_flags |= DMA_PREP_INTERRUPT; |
86 | if (submit->flags & ASYNC_TX_FENCE) | 70 | if (submit->flags & ASYNC_TX_FENCE) |
87 | dma_flags |= DMA_PREP_FENCE; | 71 | dma_flags |= DMA_PREP_FENCE; |
88 | /* Since we have clobbered the src_list we are committed | 72 | |
89 | * to doing this asynchronously. Drivers force forward progress | 73 | /* Drivers force forward progress in case they can not provide a |
90 | * in case they can not provide a descriptor | 74 | * descriptor |
91 | */ | 75 | */ |
92 | tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], | 76 | tmp = src_list[0]; |
93 | xor_src_cnt, len, dma_flags); | 77 | if (src_list > unmap->addr) |
78 | src_list[0] = dma_dest; | ||
79 | tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, | ||
80 | xor_src_cnt, unmap->len, | ||
81 | dma_flags); | ||
82 | src_list[0] = tmp; | ||
83 | |||
94 | 84 | ||
95 | if (unlikely(!tx)) | 85 | if (unlikely(!tx)) |
96 | async_tx_quiesce(&submit->depend_tx); | 86 | async_tx_quiesce(&submit->depend_tx); |
@@ -99,22 +89,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
99 | while (unlikely(!tx)) { | 89 | while (unlikely(!tx)) { |
100 | dma_async_issue_pending(chan); | 90 | dma_async_issue_pending(chan); |
101 | tx = dma->device_prep_dma_xor(chan, dma_dest, | 91 | tx = dma->device_prep_dma_xor(chan, dma_dest, |
102 | &dma_src[src_off], | 92 | src_list, |
103 | xor_src_cnt, len, | 93 | xor_src_cnt, unmap->len, |
104 | dma_flags); | 94 | dma_flags); |
105 | } | 95 | } |
106 | 96 | ||
97 | dma_set_unmap(tx, unmap); | ||
107 | async_tx_submit(chan, tx, submit); | 98 | async_tx_submit(chan, tx, submit); |
108 | submit->depend_tx = tx; | 99 | submit->depend_tx = tx; |
109 | 100 | ||
110 | if (src_cnt > xor_src_cnt) { | 101 | if (src_cnt > xor_src_cnt) { |
111 | /* drop completed sources */ | 102 | /* drop completed sources */ |
112 | src_cnt -= xor_src_cnt; | 103 | src_cnt -= xor_src_cnt; |
113 | src_off += xor_src_cnt; | ||
114 | |||
115 | /* use the intermediate result a source */ | 104 | /* use the intermediate result a source */ |
116 | dma_src[--src_off] = dma_dest; | ||
117 | src_cnt++; | 105 | src_cnt++; |
106 | src_list += xor_src_cnt - 1; | ||
118 | } else | 107 | } else |
119 | break; | 108 | break; |
120 | } | 109 | } |
@@ -189,22 +178,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
189 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, | 178 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, |
190 | &dest, 1, src_list, | 179 | &dest, 1, src_list, |
191 | src_cnt, len); | 180 | src_cnt, len); |
192 | dma_addr_t *dma_src = NULL; | 181 | struct dma_device *device = chan ? chan->device : NULL; |
182 | struct dmaengine_unmap_data *unmap = NULL; | ||
193 | 183 | ||
194 | BUG_ON(src_cnt <= 1); | 184 | BUG_ON(src_cnt <= 1); |
195 | 185 | ||
196 | if (submit->scribble) | 186 | if (device) |
197 | dma_src = submit->scribble; | 187 | unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); |
198 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | 188 | |
199 | dma_src = (dma_addr_t *) src_list; | 189 | if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { |
190 | struct dma_async_tx_descriptor *tx; | ||
191 | int i, j; | ||
200 | 192 | ||
201 | if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { | ||
202 | /* run the xor asynchronously */ | 193 | /* run the xor asynchronously */ |
203 | pr_debug("%s (async): len: %zu\n", __func__, len); | 194 | pr_debug("%s (async): len: %zu\n", __func__, len); |
204 | 195 | ||
205 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, | 196 | unmap->len = len; |
206 | dma_src, submit); | 197 | for (i = 0, j = 0; i < src_cnt; i++) { |
198 | if (!src_list[i]) | ||
199 | continue; | ||
200 | unmap->to_cnt++; | ||
201 | unmap->addr[j++] = dma_map_page(device->dev, src_list[i], | ||
202 | offset, len, DMA_TO_DEVICE); | ||
203 | } | ||
204 | |||
205 | /* map it bidirectional as it may be re-used as a source */ | ||
206 | unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, | ||
207 | DMA_BIDIRECTIONAL); | ||
208 | unmap->bidi_cnt = 1; | ||
209 | |||
210 | tx = do_async_xor(chan, unmap, submit); | ||
211 | dmaengine_unmap_put(unmap); | ||
212 | return tx; | ||
207 | } else { | 213 | } else { |
214 | dmaengine_unmap_put(unmap); | ||
208 | /* run the xor synchronously */ | 215 | /* run the xor synchronously */ |
209 | pr_debug("%s (sync): len: %zu\n", __func__, len); | 216 | pr_debug("%s (sync): len: %zu\n", __func__, len); |
210 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", | 217 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", |