diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 13:35:29 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:01:31 -0500 |
commit | 7476bd79fc019dd9a8361de6696627a4eae3ef05 (patch) | |
tree | b95b39b60a38c2b84e668eb97c87bf2aeb6eedb2 /crypto | |
parent | 3bbdd49872931b8c4282aeb1cab5af7cce2cfb0d (diff) |
async_pq: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers.
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
[bzolnier: keep temporary dma_dest array in do_async_gen_syndrome()]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_pq.c | 117 |
1 files changed, 65 insertions, 52 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899e..8cdbf33bd046 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -46,49 +46,25 @@ static struct page *pq_scribble_page; | |||
46 | * do_async_gen_syndrome - asynchronously calculate P and/or Q | 46 | * do_async_gen_syndrome - asynchronously calculate P and/or Q |
47 | */ | 47 | */ |
48 | static __async_inline struct dma_async_tx_descriptor * | 48 | static __async_inline struct dma_async_tx_descriptor * |
49 | do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | 49 | do_async_gen_syndrome(struct dma_chan *chan, |
50 | const unsigned char *scfs, unsigned int offset, int disks, | 50 | const unsigned char *scfs, int disks, |
51 | size_t len, dma_addr_t *dma_src, | 51 | struct dmaengine_unmap_data *unmap, |
52 | enum dma_ctrl_flags dma_flags, | ||
52 | struct async_submit_ctl *submit) | 53 | struct async_submit_ctl *submit) |
53 | { | 54 | { |
54 | struct dma_async_tx_descriptor *tx = NULL; | 55 | struct dma_async_tx_descriptor *tx = NULL; |
55 | struct dma_device *dma = chan->device; | 56 | struct dma_device *dma = chan->device; |
56 | enum dma_ctrl_flags dma_flags = 0; | ||
57 | enum async_tx_flags flags_orig = submit->flags; | 57 | enum async_tx_flags flags_orig = submit->flags; |
58 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | 58 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
59 | dma_async_tx_callback cb_param_orig = submit->cb_param; | 59 | dma_async_tx_callback cb_param_orig = submit->cb_param; |
60 | int src_cnt = disks - 2; | 60 | int src_cnt = disks - 2; |
61 | unsigned char coefs[src_cnt]; | ||
62 | unsigned short pq_src_cnt; | 61 | unsigned short pq_src_cnt; |
63 | dma_addr_t dma_dest[2]; | 62 | dma_addr_t dma_dest[2]; |
64 | int src_off = 0; | 63 | int src_off = 0; |
65 | int idx; | ||
66 | int i; | ||
67 | 64 | ||
68 | /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ | 65 | dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; |
69 | if (P(blocks, disks)) | 66 | if (submit->flags & ASYNC_TX_FENCE) |
70 | dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, | 67 | dma_flags |= DMA_PREP_FENCE; |
71 | len, DMA_BIDIRECTIONAL); | ||
72 | else | ||
73 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
74 | if (Q(blocks, disks)) | ||
75 | dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, | ||
76 | len, DMA_BIDIRECTIONAL); | ||
77 | else | ||
78 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
79 | |||
80 | /* convert source addresses being careful to collapse 'empty' | ||
81 | * sources and update the coefficients accordingly | ||
82 | */ | ||
83 | for (i = 0, idx = 0; i < src_cnt; i++) { | ||
84 | if (blocks[i] == NULL) | ||
85 | continue; | ||
86 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | ||
87 | DMA_TO_DEVICE); | ||
88 | coefs[idx] = scfs[i]; | ||
89 | idx++; | ||
90 | } | ||
91 | src_cnt = idx; | ||
92 | 68 | ||
93 | while (src_cnt > 0) { | 69 | while (src_cnt > 0) { |
94 | submit->flags = flags_orig; | 70 | submit->flags = flags_orig; |
@@ -100,28 +76,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
100 | if (src_cnt > pq_src_cnt) { | 76 | if (src_cnt > pq_src_cnt) { |
101 | submit->flags &= ~ASYNC_TX_ACK; | 77 | submit->flags &= ~ASYNC_TX_ACK; |
102 | submit->flags |= ASYNC_TX_FENCE; | 78 | submit->flags |= ASYNC_TX_FENCE; |
103 | dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; | ||
104 | submit->cb_fn = NULL; | 79 | submit->cb_fn = NULL; |
105 | submit->cb_param = NULL; | 80 | submit->cb_param = NULL; |
106 | } else { | 81 | } else { |
107 | dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; | ||
108 | submit->cb_fn = cb_fn_orig; | 82 | submit->cb_fn = cb_fn_orig; |
109 | submit->cb_param = cb_param_orig; | 83 | submit->cb_param = cb_param_orig; |
110 | if (cb_fn_orig) | 84 | if (cb_fn_orig) |
111 | dma_flags |= DMA_PREP_INTERRUPT; | 85 | dma_flags |= DMA_PREP_INTERRUPT; |
112 | } | 86 | } |
113 | if (submit->flags & ASYNC_TX_FENCE) | ||
114 | dma_flags |= DMA_PREP_FENCE; | ||
115 | 87 | ||
116 | /* Since we have clobbered the src_list we are committed | 88 | /* Drivers force forward progress in case they can not provide |
117 | * to doing this asynchronously. Drivers force forward | 89 | * a descriptor |
118 | * progress in case they can not provide a descriptor | ||
119 | */ | 90 | */ |
120 | for (;;) { | 91 | for (;;) { |
92 | dma_dest[0] = unmap->addr[disks - 2]; | ||
93 | dma_dest[1] = unmap->addr[disks - 1]; | ||
121 | tx = dma->device_prep_dma_pq(chan, dma_dest, | 94 | tx = dma->device_prep_dma_pq(chan, dma_dest, |
122 | &dma_src[src_off], | 95 | &unmap->addr[src_off], |
123 | pq_src_cnt, | 96 | pq_src_cnt, |
124 | &coefs[src_off], len, | 97 | &scfs[src_off], unmap->len, |
125 | dma_flags); | 98 | dma_flags); |
126 | if (likely(tx)) | 99 | if (likely(tx)) |
127 | break; | 100 | break; |
@@ -129,6 +102,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
129 | dma_async_issue_pending(chan); | 102 | dma_async_issue_pending(chan); |
130 | } | 103 | } |
131 | 104 | ||
105 | dma_set_unmap(tx, unmap); | ||
132 | async_tx_submit(chan, tx, submit); | 106 | async_tx_submit(chan, tx, submit); |
133 | submit->depend_tx = tx; | 107 | submit->depend_tx = tx; |
134 | 108 | ||
@@ -188,10 +162,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
188 | * set to NULL those buffers will be replaced with the raid6_zero_page | 162 | * set to NULL those buffers will be replaced with the raid6_zero_page |
189 | * in the synchronous path and omitted in the hardware-asynchronous | 163 | * in the synchronous path and omitted in the hardware-asynchronous |
190 | * path. | 164 | * path. |
191 | * | ||
192 | * 'blocks' note: if submit->scribble is NULL then the contents of | ||
193 | * 'blocks' may be overwritten to perform address conversions | ||
194 | * (dma_map_page() or page_address()). | ||
195 | */ | 165 | */ |
196 | struct dma_async_tx_descriptor * | 166 | struct dma_async_tx_descriptor * |
197 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 167 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
@@ -202,26 +172,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
202 | &P(blocks, disks), 2, | 172 | &P(blocks, disks), 2, |
203 | blocks, src_cnt, len); | 173 | blocks, src_cnt, len); |
204 | struct dma_device *device = chan ? chan->device : NULL; | 174 | struct dma_device *device = chan ? chan->device : NULL; |
205 | dma_addr_t *dma_src = NULL; | 175 | struct dmaengine_unmap_data *unmap = NULL; |
206 | 176 | ||
207 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); | 177 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); |
208 | 178 | ||
209 | if (submit->scribble) | 179 | if (device) |
210 | dma_src = submit->scribble; | 180 | unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); |
211 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
212 | dma_src = (dma_addr_t *) blocks; | ||
213 | 181 | ||
214 | if (dma_src && device && | 182 | if (unmap && |
215 | (src_cnt <= dma_maxpq(device, 0) || | 183 | (src_cnt <= dma_maxpq(device, 0) || |
216 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && | 184 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && |
217 | is_dma_pq_aligned(device, offset, 0, len)) { | 185 | is_dma_pq_aligned(device, offset, 0, len)) { |
186 | struct dma_async_tx_descriptor *tx; | ||
187 | enum dma_ctrl_flags dma_flags = 0; | ||
188 | unsigned char coefs[src_cnt]; | ||
189 | int i, j; | ||
190 | |||
218 | /* run the p+q asynchronously */ | 191 | /* run the p+q asynchronously */ |
219 | pr_debug("%s: (async) disks: %d len: %zu\n", | 192 | pr_debug("%s: (async) disks: %d len: %zu\n", |
220 | __func__, disks, len); | 193 | __func__, disks, len); |
221 | return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, | 194 | |
222 | disks, len, dma_src, submit); | 195 | /* convert source addresses being careful to collapse 'empty' |
196 | * sources and update the coefficients accordingly | ||
197 | */ | ||
198 | unmap->len = len; | ||
199 | for (i = 0, j = 0; i < src_cnt; i++) { | ||
200 | if (blocks[i] == NULL) | ||
201 | continue; | ||
202 | unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, | ||
203 | len, DMA_TO_DEVICE); | ||
204 | coefs[j] = raid6_gfexp[i]; | ||
205 | unmap->to_cnt++; | ||
206 | j++; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * DMAs use destinations as sources, | ||
211 | * so use BIDIRECTIONAL mapping | ||
212 | */ | ||
213 | unmap->bidi_cnt++; | ||
214 | if (P(blocks, disks)) | ||
215 | unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), | ||
216 | offset, len, DMA_BIDIRECTIONAL); | ||
217 | else { | ||
218 | unmap->addr[j++] = 0; | ||
219 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
220 | } | ||
221 | |||
222 | unmap->bidi_cnt++; | ||
223 | if (Q(blocks, disks)) | ||
224 | unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), | ||
225 | offset, len, DMA_BIDIRECTIONAL); | ||
226 | else { | ||
227 | unmap->addr[j++] = 0; | ||
228 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
229 | } | ||
230 | |||
231 | tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); | ||
232 | dmaengine_unmap_put(unmap); | ||
233 | return tx; | ||
223 | } | 234 | } |
224 | 235 | ||
236 | dmaengine_unmap_put(unmap); | ||
237 | |||
225 | /* run the pq synchronously */ | 238 | /* run the pq synchronously */ |
226 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); | 239 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); |
227 | 240 | ||