aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-10-16 01:40:34 -0400
committerNeilBrown <neilb@suse.de>2009-10-16 01:40:34 -0400
commitb2141e6951ad56c8f65e70547baeabd5698e390a (patch)
treecc0a000cb6bc1eee887a6e3de5d8073f471bf9b5 /crypto/async_tx
parent5dd33c9a4c29015f6d87568d33521c98931a387e (diff)
raid6/async_tx: handle holes in block list in async_syndrome_val
async_syndrome_val check the P and Q blocks used for RAID6 calculations. With DDF raid6, some of the data blocks might be NULL, so this needs to be handled in the same way that async_gen_syndrome handles it. As async_syndrome_val calls async_xor, also enhance async_xor to detect and skip NULL blocks in the list. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_pq.c31
-rw-r--r--crypto/async_tx/async_xor.c18
2 files changed, 35 insertions, 14 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 9ab1ce4af3cc..43b1436bd968 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -260,8 +260,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
260 len); 260 len);
261 struct dma_device *device = chan ? chan->device : NULL; 261 struct dma_device *device = chan ? chan->device : NULL;
262 struct dma_async_tx_descriptor *tx; 262 struct dma_async_tx_descriptor *tx;
263 unsigned char coefs[disks-2];
263 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 264 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
264 dma_addr_t *dma_src = NULL; 265 dma_addr_t *dma_src = NULL;
266 int src_cnt = 0;
265 267
266 BUG_ON(disks < 4); 268 BUG_ON(disks < 4);
267 269
@@ -280,20 +282,35 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
280 __func__, disks, len); 282 __func__, disks, len);
281 if (!P(blocks, disks)) 283 if (!P(blocks, disks))
282 dma_flags |= DMA_PREP_PQ_DISABLE_P; 284 dma_flags |= DMA_PREP_PQ_DISABLE_P;
285 else
286 pq[0] = dma_map_page(dev, P(blocks,disks),
287 offset, len,
288 DMA_TO_DEVICE);
283 if (!Q(blocks, disks)) 289 if (!Q(blocks, disks))
284 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 290 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
291 else
292 pq[1] = dma_map_page(dev, Q(blocks,disks),
293 offset, len,
294 DMA_TO_DEVICE);
295
285 if (submit->flags & ASYNC_TX_FENCE) 296 if (submit->flags & ASYNC_TX_FENCE)
286 dma_flags |= DMA_PREP_FENCE; 297 dma_flags |= DMA_PREP_FENCE;
287 for (i = 0; i < disks; i++) 298 for (i = 0; i < disks-2; i++)
288 if (likely(blocks[i])) 299 if (likely(blocks[i])) {
289 dma_src[i] = dma_map_page(dev, blocks[i], 300 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
290 offset, len, 301 offset, len,
291 DMA_TO_DEVICE); 302 DMA_TO_DEVICE);
303 coefs[src_cnt] = raid6_gfexp[i];
304 src_cnt++;
305 }
306 pq[1] = dma_map_page(dev, Q(blocks,disks),
307 offset, len,
308 DMA_TO_DEVICE);
292 309
293 for (;;) { 310 for (;;) {
294 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 311 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
295 disks - 2, 312 src_cnt,
296 raid6_gfexp, 313 coefs,
297 len, pqres, 314 len, pqres,
298 dma_flags); 315 dma_flags);
299 if (likely(tx)) 316 if (likely(tx))
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index b459a9034aac..79182dcb91b7 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
44 void *cb_param_orig = submit->cb_param; 44 void *cb_param_orig = submit->cb_param;
45 enum async_tx_flags flags_orig = submit->flags; 45 enum async_tx_flags flags_orig = submit->flags;
46 enum dma_ctrl_flags dma_flags; 46 enum dma_ctrl_flags dma_flags;
47 int xor_src_cnt; 47 int xor_src_cnt = 0;
48 dma_addr_t dma_dest; 48 dma_addr_t dma_dest;
49 49
50 /* map the dest bidrectional in case it is re-used as a source */ 50 /* map the dest bidrectional in case it is re-used as a source */
51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); 51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
52 for (i = 0; i < src_cnt; i++) { 52 for (i = 0; i < src_cnt; i++) {
53 /* only map the dest once */ 53 /* only map the dest once */
54 if (!src_list[i])
55 continue;
54 if (unlikely(src_list[i] == dest)) { 56 if (unlikely(src_list[i] == dest)) {
55 dma_src[i] = dma_dest; 57 dma_src[xor_src_cnt++] = dma_dest;
56 continue; 58 continue;
57 } 59 }
58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, 60 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
59 len, DMA_TO_DEVICE); 61 len, DMA_TO_DEVICE);
60 } 62 }
63 src_cnt = xor_src_cnt;
61 64
62 while (src_cnt) { 65 while (src_cnt) {
63 submit->flags = flags_orig; 66 submit->flags = flags_orig;
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
123 int src_cnt, size_t len, struct async_submit_ctl *submit) 126 int src_cnt, size_t len, struct async_submit_ctl *submit)
124{ 127{
125 int i; 128 int i;
126 int xor_src_cnt; 129 int xor_src_cnt = 0;
127 int src_off = 0; 130 int src_off = 0;
128 void *dest_buf; 131 void *dest_buf;
129 void **srcs; 132 void **srcs;
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
135 138
136 /* convert to buffer pointers */ 139 /* convert to buffer pointers */
137 for (i = 0; i < src_cnt; i++) 140 for (i = 0; i < src_cnt; i++)
138 srcs[i] = page_address(src_list[i]) + offset; 141 if (src_list[i])
139 142 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
143 src_cnt = xor_src_cnt;
140 /* set destination address */ 144 /* set destination address */
141 dest_buf = page_address(dest) + offset; 145 dest_buf = page_address(dest) + offset;
142 146