diff options
author | NeilBrown <neilb@suse.de> | 2009-10-16 01:40:34 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-10-16 01:40:34 -0400 |
commit | b2141e6951ad56c8f65e70547baeabd5698e390a (patch) | |
tree | cc0a000cb6bc1eee887a6e3de5d8073f471bf9b5 /crypto/async_tx/async_pq.c | |
parent | 5dd33c9a4c29015f6d87568d33521c98931a387e (diff) |
raid6/async_tx: handle holes in block list in async_syndrome_val
async_syndrome_val check the P and Q blocks used for RAID6
calculations.
With DDF raid6, some of the data blocks might be NULL, so
this needs to be handled in the same way that async_gen_syndrome
handles it.
As async_syndrome_val calls async_xor, also enhance async_xor
to detect and skip NULL blocks in the list.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'crypto/async_tx/async_pq.c')
-rw-r--r-- | crypto/async_tx/async_pq.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 9ab1ce4af3cc..43b1436bd968 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -260,8 +260,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
260 | len); | 260 | len); |
261 | struct dma_device *device = chan ? chan->device : NULL; | 261 | struct dma_device *device = chan ? chan->device : NULL; |
262 | struct dma_async_tx_descriptor *tx; | 262 | struct dma_async_tx_descriptor *tx; |
263 | unsigned char coefs[disks-2]; | ||
263 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | 264 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; |
264 | dma_addr_t *dma_src = NULL; | 265 | dma_addr_t *dma_src = NULL; |
266 | int src_cnt = 0; | ||
265 | 267 | ||
266 | BUG_ON(disks < 4); | 268 | BUG_ON(disks < 4); |
267 | 269 | ||
@@ -280,20 +282,35 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
280 | __func__, disks, len); | 282 | __func__, disks, len); |
281 | if (!P(blocks, disks)) | 283 | if (!P(blocks, disks)) |
282 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 284 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
285 | else | ||
286 | pq[0] = dma_map_page(dev, P(blocks,disks), | ||
287 | offset, len, | ||
288 | DMA_TO_DEVICE); | ||
283 | if (!Q(blocks, disks)) | 289 | if (!Q(blocks, disks)) |
284 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 290 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
291 | else | ||
292 | pq[1] = dma_map_page(dev, Q(blocks,disks), | ||
293 | offset, len, | ||
294 | DMA_TO_DEVICE); | ||
295 | |||
285 | if (submit->flags & ASYNC_TX_FENCE) | 296 | if (submit->flags & ASYNC_TX_FENCE) |
286 | dma_flags |= DMA_PREP_FENCE; | 297 | dma_flags |= DMA_PREP_FENCE; |
287 | for (i = 0; i < disks; i++) | 298 | for (i = 0; i < disks-2; i++) |
288 | if (likely(blocks[i])) | 299 | if (likely(blocks[i])) { |
289 | dma_src[i] = dma_map_page(dev, blocks[i], | 300 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], |
290 | offset, len, | 301 | offset, len, |
291 | DMA_TO_DEVICE); | 302 | DMA_TO_DEVICE); |
303 | coefs[src_cnt] = raid6_gfexp[i]; | ||
304 | src_cnt++; | ||
305 | } | ||
306 | pq[1] = dma_map_page(dev, Q(blocks,disks), | ||
307 | offset, len, | ||
308 | DMA_TO_DEVICE); | ||
292 | 309 | ||
293 | for (;;) { | 310 | for (;;) { |
294 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 311 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |
295 | disks - 2, | 312 | src_cnt, |
296 | raid6_gfexp, | 313 | coefs, |
297 | len, pqres, | 314 | len, pqres, |
298 | dma_flags); | 315 | dma_flags); |
299 | if (likely(tx)) | 316 | if (likely(tx)) |