diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-10-19 21:09:32 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-10-19 21:20:20 -0400 |
commit | 5676470f06f783aebf545c8f17ca772911022068 (patch) | |
tree | 58a33d5fc031fc0a6090299dd8535c24182b7897 /crypto | |
parent | 6629542e79255e0dbef8ec82eaf644e1b2546c3c (diff) |
async_pq: kill a stray dma_map() call and other cleanups
- update the kernel doc for async_syndrome to indicate what NULL in the
source list means
- whitespace fixups
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_pq.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 43b1436bd968..60476560e0b0 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -181,10 +181,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
181 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | 181 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= |
182 | * PAGE_SIZE as a temporary buffer of this size is used in the | 182 | * PAGE_SIZE as a temporary buffer of this size is used in the |
183 | * synchronous path. 'disks' always accounts for both destination | 183 | * synchronous path. 'disks' always accounts for both destination |
184 | * buffers. | 184 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are |
185 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
186 | * in the synchronous path and omitted in the hardware-asynchronous | ||
187 | * path. | ||
185 | * | 188 | * |
186 | * 'blocks' note: if submit->scribble is NULL then the contents of | 189 | * 'blocks' note: if submit->scribble is NULL then the contents of |
187 | * 'blocks' may be overridden | 190 | * 'blocks' may be overwritten to perform address conversions |
191 | * (dma_map_page() or page_address()). | ||
188 | */ | 192 | */ |
189 | struct dma_async_tx_descriptor * | 193 | struct dma_async_tx_descriptor * |
190 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 194 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
@@ -283,13 +287,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
283 | if (!P(blocks, disks)) | 287 | if (!P(blocks, disks)) |
284 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 288 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
285 | else | 289 | else |
286 | pq[0] = dma_map_page(dev, P(blocks,disks), | 290 | pq[0] = dma_map_page(dev, P(blocks, disks), |
287 | offset, len, | 291 | offset, len, |
288 | DMA_TO_DEVICE); | 292 | DMA_TO_DEVICE); |
289 | if (!Q(blocks, disks)) | 293 | if (!Q(blocks, disks)) |
290 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 294 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
291 | else | 295 | else |
292 | pq[1] = dma_map_page(dev, Q(blocks,disks), | 296 | pq[1] = dma_map_page(dev, Q(blocks, disks), |
293 | offset, len, | 297 | offset, len, |
294 | DMA_TO_DEVICE); | 298 | DMA_TO_DEVICE); |
295 | 299 | ||
@@ -303,9 +307,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
303 | coefs[src_cnt] = raid6_gfexp[i]; | 307 | coefs[src_cnt] = raid6_gfexp[i]; |
304 | src_cnt++; | 308 | src_cnt++; |
305 | } | 309 | } |
306 | pq[1] = dma_map_page(dev, Q(blocks,disks), | ||
307 | offset, len, | ||
308 | DMA_TO_DEVICE); | ||
309 | 310 | ||
310 | for (;;) { | 311 | for (;;) { |
311 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 312 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |