aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-10-31 15:12:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-10-31 15:12:19 -0400
commitbf699c9bac124f0a095d8ef06f2d6b219300a822 (patch)
treede1c4f852b0c68a87eeed25119443eeca8379c66 /crypto
parentaefba418bfecd1985a08f50a95bd854a119f0153 (diff)
parentda17bf4306fd3a52e938b121df82a7baa10eb282 (diff)
Merge branch 'for-linus' of git://neil.brown.name/md
* 'for-linus' of git://neil.brown.name/md: async_tx: fix asynchronous raid6 recovery for ddf layouts async_pq: rename scribble page async_pq: kill a stray dma_map() call and other cleanups md/raid6: kill a gcc-4.0.1 'uninitialized variable' warning raid6/async_tx: handle holes in block list in async_syndrome_val md/async: don't pass a memory pointer as a page pointer. md: Fix handling of raid5 array which is being reshaped to fewer devices. md: fix problems with RAID6 calculations for DDF. md/raid456: downlevel multicore operations to raid_run_ops md: drivers/md/unroll.pl replaced with awk analog md: remove clumsy usage of do_sync_mapping_range from bitmap code md: raid1/raid10: handle allocation errors during array setup. md/raid5: initialize conf->device_lock earlier md/raid1/raid10: add a cond_resched Revert "md: do not progress the resync process if the stripe was blocked"
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_pq.c60
-rw-r--r--crypto/async_tx/async_raid6_recov.c100
-rw-r--r--crypto/async_tx/async_xor.c18
3 files changed, 113 insertions, 65 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index b88db6d1dc65..6b5cc4fba59f 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -26,14 +26,10 @@
26#include <linux/async_tx.h> 26#include <linux/async_tx.h>
27 27
28/** 28/**
29 * scribble - space to hold throwaway P buffer for synchronous gen_syndrome 29 * pq_scribble_page - space to hold throwaway P or Q buffer for
30 * synchronous gen_syndrome
30 */ 31 */
31static struct page *scribble; 32static struct page *pq_scribble_page;
32
33static bool is_raid6_zero_block(struct page *p)
34{
35 return p == (void *) raid6_empty_zero_page;
36}
37 33
38/* the struct page *blocks[] parameter passed to async_gen_syndrome() 34/* the struct page *blocks[] parameter passed to async_gen_syndrome()
39 * and async_syndrome_val() contains the 'P' destination address at 35 * and async_syndrome_val() contains the 'P' destination address at
@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
83 * sources and update the coefficients accordingly 79 * sources and update the coefficients accordingly
84 */ 80 */
85 for (i = 0, idx = 0; i < src_cnt; i++) { 81 for (i = 0, idx = 0; i < src_cnt; i++) {
86 if (is_raid6_zero_block(blocks[i])) 82 if (blocks[i] == NULL)
87 continue; 83 continue;
88 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, 84 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
89 DMA_TO_DEVICE); 85 DMA_TO_DEVICE);
@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
160 srcs = (void **) blocks; 156 srcs = (void **) blocks;
161 157
162 for (i = 0; i < disks; i++) { 158 for (i = 0; i < disks; i++) {
163 if (is_raid6_zero_block(blocks[i])) { 159 if (blocks[i] == NULL) {
164 BUG_ON(i > disks - 3); /* P or Q can't be zero */ 160 BUG_ON(i > disks - 3); /* P or Q can't be zero */
165 srcs[i] = blocks[i]; 161 srcs[i] = (void*)raid6_empty_zero_page;
166 } else 162 } else
167 srcs[i] = page_address(blocks[i]) + offset; 163 srcs[i] = page_address(blocks[i]) + offset;
168 } 164 }
@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
186 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= 182 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
187 * PAGE_SIZE as a temporary buffer of this size is used in the 183 * PAGE_SIZE as a temporary buffer of this size is used in the
188 * synchronous path. 'disks' always accounts for both destination 184 * synchronous path. 'disks' always accounts for both destination
189 * buffers. 185 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
186 * set to NULL those buffers will be replaced with the raid6_zero_page
187 * in the synchronous path and omitted in the hardware-asynchronous
188 * path.
190 * 189 *
191 * 'blocks' note: if submit->scribble is NULL then the contents of 190 * 'blocks' note: if submit->scribble is NULL then the contents of
192 * 'blocks' may be overridden 191 * 'blocks' may be overwritten to perform address conversions
192 * (dma_map_page() or page_address()).
193 */ 193 */
194struct dma_async_tx_descriptor * 194struct dma_async_tx_descriptor *
195async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, 195async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
227 async_tx_quiesce(&submit->depend_tx); 227 async_tx_quiesce(&submit->depend_tx);
228 228
229 if (!P(blocks, disks)) { 229 if (!P(blocks, disks)) {
230 P(blocks, disks) = scribble; 230 P(blocks, disks) = pq_scribble_page;
231 BUG_ON(len + offset > PAGE_SIZE); 231 BUG_ON(len + offset > PAGE_SIZE);
232 } 232 }
233 if (!Q(blocks, disks)) { 233 if (!Q(blocks, disks)) {
234 Q(blocks, disks) = scribble; 234 Q(blocks, disks) = pq_scribble_page;
235 BUG_ON(len + offset > PAGE_SIZE); 235 BUG_ON(len + offset > PAGE_SIZE);
236 } 236 }
237 do_sync_gen_syndrome(blocks, offset, disks, len, submit); 237 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
@@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
265 len); 265 len);
266 struct dma_device *device = chan ? chan->device : NULL; 266 struct dma_device *device = chan ? chan->device : NULL;
267 struct dma_async_tx_descriptor *tx; 267 struct dma_async_tx_descriptor *tx;
268 unsigned char coefs[disks-2];
268 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 269 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
269 dma_addr_t *dma_src = NULL; 270 dma_addr_t *dma_src = NULL;
271 int src_cnt = 0;
270 272
271 BUG_ON(disks < 4); 273 BUG_ON(disks < 4);
272 274
@@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
285 __func__, disks, len); 287 __func__, disks, len);
286 if (!P(blocks, disks)) 288 if (!P(blocks, disks))
287 dma_flags |= DMA_PREP_PQ_DISABLE_P; 289 dma_flags |= DMA_PREP_PQ_DISABLE_P;
290 else
291 pq[0] = dma_map_page(dev, P(blocks, disks),
292 offset, len,
293 DMA_TO_DEVICE);
288 if (!Q(blocks, disks)) 294 if (!Q(blocks, disks))
289 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 295 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
296 else
297 pq[1] = dma_map_page(dev, Q(blocks, disks),
298 offset, len,
299 DMA_TO_DEVICE);
300
290 if (submit->flags & ASYNC_TX_FENCE) 301 if (submit->flags & ASYNC_TX_FENCE)
291 dma_flags |= DMA_PREP_FENCE; 302 dma_flags |= DMA_PREP_FENCE;
292 for (i = 0; i < disks; i++) 303 for (i = 0; i < disks-2; i++)
293 if (likely(blocks[i])) { 304 if (likely(blocks[i])) {
294 BUG_ON(is_raid6_zero_block(blocks[i])); 305 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
295 dma_src[i] = dma_map_page(dev, blocks[i], 306 offset, len,
296 offset, len, 307 DMA_TO_DEVICE);
297 DMA_TO_DEVICE); 308 coefs[src_cnt] = raid6_gfexp[i];
309 src_cnt++;
298 } 310 }
299 311
300 for (;;) { 312 for (;;) {
301 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 313 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
302 disks - 2, 314 src_cnt,
303 raid6_gfexp, 315 coefs,
304 len, pqres, 316 len, pqres,
305 dma_flags); 317 dma_flags);
306 if (likely(tx)) 318 if (likely(tx))
@@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
373 385
374static int __init async_pq_init(void) 386static int __init async_pq_init(void)
375{ 387{
376 scribble = alloc_page(GFP_KERNEL); 388 pq_scribble_page = alloc_page(GFP_KERNEL);
377 389
378 if (scribble) 390 if (pq_scribble_page)
379 return 0; 391 return 0;
380 392
381 pr_err("%s: failed to allocate required spare page\n", __func__); 393 pr_err("%s: failed to allocate required spare page\n", __func__);
@@ -385,7 +397,7 @@ static int __init async_pq_init(void)
385 397
386static void __exit async_pq_exit(void) 398static void __exit async_pq_exit(void)
387{ 399{
388 put_page(scribble); 400 put_page(pq_scribble_page);
389} 401}
390 402
391module_init(async_pq_init); 403module_init(async_pq_init);
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 6d73dde4786d..943f2abac9b4 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
131} 131}
132 132
133static struct dma_async_tx_descriptor * 133static struct dma_async_tx_descriptor *
134__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, 134__2data_recov_4(int disks, size_t bytes, int faila, int failb,
135 struct async_submit_ctl *submit) 135 struct page **blocks, struct async_submit_ctl *submit)
136{ 136{
137 struct dma_async_tx_descriptor *tx = NULL; 137 struct dma_async_tx_descriptor *tx = NULL;
138 struct page *p, *q, *a, *b; 138 struct page *p, *q, *a, *b;
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
143 void *cb_param = submit->cb_param; 143 void *cb_param = submit->cb_param;
144 void *scribble = submit->scribble; 144 void *scribble = submit->scribble;
145 145
146 p = blocks[4-2]; 146 p = blocks[disks-2];
147 q = blocks[4-1]; 147 q = blocks[disks-1];
148 148
149 a = blocks[faila]; 149 a = blocks[faila];
150 b = blocks[failb]; 150 b = blocks[failb];
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
170} 170}
171 171
172static struct dma_async_tx_descriptor * 172static struct dma_async_tx_descriptor *
173__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, 173__2data_recov_5(int disks, size_t bytes, int faila, int failb,
174 struct async_submit_ctl *submit) 174 struct page **blocks, struct async_submit_ctl *submit)
175{ 175{
176 struct dma_async_tx_descriptor *tx = NULL; 176 struct dma_async_tx_descriptor *tx = NULL;
177 struct page *p, *q, *g, *dp, *dq; 177 struct page *p, *q, *g, *dp, *dq;
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
181 dma_async_tx_callback cb_fn = submit->cb_fn; 181 dma_async_tx_callback cb_fn = submit->cb_fn;
182 void *cb_param = submit->cb_param; 182 void *cb_param = submit->cb_param;
183 void *scribble = submit->scribble; 183 void *scribble = submit->scribble;
184 int uninitialized_var(good); 184 int good_srcs, good, i;
185 int i;
186 185
187 for (i = 0; i < 3; i++) { 186 good_srcs = 0;
187 good = -1;
188 for (i = 0; i < disks-2; i++) {
189 if (blocks[i] == NULL)
190 continue;
188 if (i == faila || i == failb) 191 if (i == faila || i == failb)
189 continue; 192 continue;
190 else { 193 good = i;
191 good = i; 194 good_srcs++;
192 break;
193 }
194 } 195 }
195 BUG_ON(i >= 3); 196 BUG_ON(good_srcs > 1);
196 197
197 p = blocks[5-2]; 198 p = blocks[disks-2];
198 q = blocks[5-1]; 199 q = blocks[disks-1];
199 g = blocks[good]; 200 g = blocks[good];
200 201
201 /* Compute syndrome with zero for the missing data pages 202 /* Compute syndrome with zero for the missing data pages
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
263 * delta p and delta q 264 * delta p and delta q
264 */ 265 */
265 dp = blocks[faila]; 266 dp = blocks[faila];
266 blocks[faila] = (void *)raid6_empty_zero_page; 267 blocks[faila] = NULL;
267 blocks[disks-2] = dp; 268 blocks[disks-2] = dp;
268 dq = blocks[failb]; 269 dq = blocks[failb];
269 blocks[failb] = (void *)raid6_empty_zero_page; 270 blocks[failb] = NULL;
270 blocks[disks-1] = dq; 271 blocks[disks-1] = dq;
271 272
272 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 273 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
323async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, 324async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
324 struct page **blocks, struct async_submit_ctl *submit) 325 struct page **blocks, struct async_submit_ctl *submit)
325{ 326{
327 int non_zero_srcs, i;
328
326 BUG_ON(faila == failb); 329 BUG_ON(faila == failb);
327 if (failb < faila) 330 if (failb < faila)
328 swap(faila, failb); 331 swap(faila, failb);
@@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
334 */ 337 */
335 if (!submit->scribble) { 338 if (!submit->scribble) {
336 void **ptrs = (void **) blocks; 339 void **ptrs = (void **) blocks;
337 int i;
338 340
339 async_tx_quiesce(&submit->depend_tx); 341 async_tx_quiesce(&submit->depend_tx);
340 for (i = 0; i < disks; i++) 342 for (i = 0; i < disks; i++)
341 ptrs[i] = page_address(blocks[i]); 343 if (blocks[i] == NULL)
344 ptrs[i] = (void *) raid6_empty_zero_page;
345 else
346 ptrs[i] = page_address(blocks[i]);
342 347
343 raid6_2data_recov(disks, bytes, faila, failb, ptrs); 348 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
344 349
@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
347 return NULL; 352 return NULL;
348 } 353 }
349 354
350 switch (disks) { 355 non_zero_srcs = 0;
351 case 4: 356 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
357 if (blocks[i])
358 non_zero_srcs++;
359 switch (non_zero_srcs) {
360 case 0:
361 case 1:
362 /* There must be at least 2 sources - the failed devices. */
363 BUG();
364
365 case 2:
352 /* dma devices do not uniformly understand a zero source pq 366 /* dma devices do not uniformly understand a zero source pq
353 * operation (in contrast to the synchronous case), so 367 * operation (in contrast to the synchronous case), so
354 * explicitly handle the 4 disk special case 368 * explicitly handle the special case of a 4 disk array with
369 * both data disks missing.
355 */ 370 */
356 return __2data_recov_4(bytes, faila, failb, blocks, submit); 371 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
357 case 5: 372 case 3:
358 /* dma devices do not uniformly understand a single 373 /* dma devices do not uniformly understand a single
359 * source pq operation (in contrast to the synchronous 374 * source pq operation (in contrast to the synchronous
360 * case), so explicitly handle the 5 disk special case 375 * case), so explicitly handle the special case of a 5 disk
376 * array with 2 of 3 data disks missing.
361 */ 377 */
362 return __2data_recov_5(bytes, faila, failb, blocks, submit); 378 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
363 default: 379 default:
364 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); 380 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
365 } 381 }
@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
385 dma_async_tx_callback cb_fn = submit->cb_fn; 401 dma_async_tx_callback cb_fn = submit->cb_fn;
386 void *cb_param = submit->cb_param; 402 void *cb_param = submit->cb_param;
387 void *scribble = submit->scribble; 403 void *scribble = submit->scribble;
404 int good_srcs, good, i;
388 struct page *srcs[2]; 405 struct page *srcs[2];
389 406
390 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 407 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
@@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
394 */ 411 */
395 if (!scribble) { 412 if (!scribble) {
396 void **ptrs = (void **) blocks; 413 void **ptrs = (void **) blocks;
397 int i;
398 414
399 async_tx_quiesce(&submit->depend_tx); 415 async_tx_quiesce(&submit->depend_tx);
400 for (i = 0; i < disks; i++) 416 for (i = 0; i < disks; i++)
401 ptrs[i] = page_address(blocks[i]); 417 if (blocks[i] == NULL)
418 ptrs[i] = (void*)raid6_empty_zero_page;
419 else
420 ptrs[i] = page_address(blocks[i]);
402 421
403 raid6_datap_recov(disks, bytes, faila, ptrs); 422 raid6_datap_recov(disks, bytes, faila, ptrs);
404 423
@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
407 return NULL; 426 return NULL;
408 } 427 }
409 428
429 good_srcs = 0;
430 good = -1;
431 for (i = 0; i < disks-2; i++) {
432 if (i == faila)
433 continue;
434 if (blocks[i]) {
435 good = i;
436 good_srcs++;
437 if (good_srcs > 1)
438 break;
439 }
440 }
441 BUG_ON(good_srcs == 0);
442
410 p = blocks[disks-2]; 443 p = blocks[disks-2];
411 q = blocks[disks-1]; 444 q = blocks[disks-1];
412 445
@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
414 * Use the dead data page as temporary storage for delta q 447 * Use the dead data page as temporary storage for delta q
415 */ 448 */
416 dq = blocks[faila]; 449 dq = blocks[faila];
417 blocks[faila] = (void *)raid6_empty_zero_page; 450 blocks[faila] = NULL;
418 blocks[disks-1] = dq; 451 blocks[disks-1] = dq;
419 452
420 /* in the 4 disk case we only need to perform a single source 453 /* in the 4-disk case we only need to perform a single source
421 * multiplication 454 * multiplication with the one good data block.
422 */ 455 */
423 if (disks == 4) { 456 if (good_srcs == 1) {
424 int good = faila == 0 ? 1 : 0;
425 struct page *g = blocks[good]; 457 struct page *g = blocks[good];
426 458
427 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, 459 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index b459a9034aac..79182dcb91b7 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
44 void *cb_param_orig = submit->cb_param; 44 void *cb_param_orig = submit->cb_param;
45 enum async_tx_flags flags_orig = submit->flags; 45 enum async_tx_flags flags_orig = submit->flags;
46 enum dma_ctrl_flags dma_flags; 46 enum dma_ctrl_flags dma_flags;
47 int xor_src_cnt; 47 int xor_src_cnt = 0;
48 dma_addr_t dma_dest; 48 dma_addr_t dma_dest;
49 49
50 /* map the dest bidrectional in case it is re-used as a source */ 50 /* map the dest bidrectional in case it is re-used as a source */
51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); 51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
52 for (i = 0; i < src_cnt; i++) { 52 for (i = 0; i < src_cnt; i++) {
53 /* only map the dest once */ 53 /* only map the dest once */
54 if (!src_list[i])
55 continue;
54 if (unlikely(src_list[i] == dest)) { 56 if (unlikely(src_list[i] == dest)) {
55 dma_src[i] = dma_dest; 57 dma_src[xor_src_cnt++] = dma_dest;
56 continue; 58 continue;
57 } 59 }
58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, 60 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
59 len, DMA_TO_DEVICE); 61 len, DMA_TO_DEVICE);
60 } 62 }
63 src_cnt = xor_src_cnt;
61 64
62 while (src_cnt) { 65 while (src_cnt) {
63 submit->flags = flags_orig; 66 submit->flags = flags_orig;
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
123 int src_cnt, size_t len, struct async_submit_ctl *submit) 126 int src_cnt, size_t len, struct async_submit_ctl *submit)
124{ 127{
125 int i; 128 int i;
126 int xor_src_cnt; 129 int xor_src_cnt = 0;
127 int src_off = 0; 130 int src_off = 0;
128 void *dest_buf; 131 void *dest_buf;
129 void **srcs; 132 void **srcs;
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
135 138
136 /* convert to buffer pointers */ 139 /* convert to buffer pointers */
137 for (i = 0; i < src_cnt; i++) 140 for (i = 0; i < src_cnt; i++)
138 srcs[i] = page_address(src_list[i]) + offset; 141 if (src_list[i])
139 142 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
143 src_cnt = xor_src_cnt;
140 /* set destination address */ 144 /* set destination address */
141 dest_buf = page_address(dest) + offset; 145 dest_buf = page_address(dest) + offset;
142 146