diff options
-rw-r--r-- | crypto/async_tx/async_pq.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 43b1436bd968..60476560e0b0 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -181,10 +181,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
181 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | 181 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= |
182 | * PAGE_SIZE as a temporary buffer of this size is used in the | 182 | * PAGE_SIZE as a temporary buffer of this size is used in the |
183 | * synchronous path. 'disks' always accounts for both destination | 183 | * synchronous path. 'disks' always accounts for both destination |
184 | * buffers. | 184 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are |
185 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
186 | * in the synchronous path and omitted in the hardware-asynchronous | ||
187 | * path. | ||
185 | * | 188 | * |
186 | * 'blocks' note: if submit->scribble is NULL then the contents of | 189 | * 'blocks' note: if submit->scribble is NULL then the contents of |
187 | * 'blocks' may be overridden | 190 | * 'blocks' may be overwritten to perform address conversions |
191 | * (dma_map_page() or page_address()). | ||
188 | */ | 192 | */ |
189 | struct dma_async_tx_descriptor * | 193 | struct dma_async_tx_descriptor * |
190 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 194 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
@@ -283,13 +287,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
283 | if (!P(blocks, disks)) | 287 | if (!P(blocks, disks)) |
284 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 288 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
285 | else | 289 | else |
286 | pq[0] = dma_map_page(dev, P(blocks,disks), | 290 | pq[0] = dma_map_page(dev, P(blocks, disks), |
287 | offset, len, | 291 | offset, len, |
288 | DMA_TO_DEVICE); | 292 | DMA_TO_DEVICE); |
289 | if (!Q(blocks, disks)) | 293 | if (!Q(blocks, disks)) |
290 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 294 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
291 | else | 295 | else |
292 | pq[1] = dma_map_page(dev, Q(blocks,disks), | 296 | pq[1] = dma_map_page(dev, Q(blocks, disks), |
293 | offset, len, | 297 | offset, len, |
294 | DMA_TO_DEVICE); | 298 | DMA_TO_DEVICE); |
295 | 299 | ||
@@ -303,9 +307,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
303 | coefs[src_cnt] = raid6_gfexp[i]; | 307 | coefs[src_cnt] = raid6_gfexp[i]; |
304 | src_cnt++; | 308 | src_cnt++; |
305 | } | 309 | } |
306 | pq[1] = dma_map_page(dev, Q(blocks,disks), | ||
307 | offset, len, | ||
308 | DMA_TO_DEVICE); | ||
309 | 310 | ||
310 | for (;;) { | 311 | for (;;) { |
311 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 312 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |