diff options
author | NeilBrown <neilb@suse.de> | 2009-10-16 01:40:25 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-10-16 01:40:25 -0400 |
commit | 5dd33c9a4c29015f6d87568d33521c98931a387e (patch) | |
tree | a79ac38e1957580743b708410b54694f2b1cbf77 | |
parent | 5e5e3e78ed9038b8f7112835d07084eefb9daa47 (diff) |
md/async: don't pass a memory pointer as a page pointer.
md/raid6 passes a list of 'struct page *' to the async_tx routines,
which then either DMA map them for offload, or take the page_address
for CPU based calculations.
For RAID6 we sometime leave 'blanks' in the list of pages.
For CPU based calcs, we want to treat theses as a page of zeros.
For offloaded calculations, we simply don't pass a page to the
hardware.
Currently the 'blanks' are encoded as a pointer to
raid6_empty_zero_page. This is a 4096 byte memory region, not a
'struct page'. This is mostly handled correctly but is rather ugly.
So change the code to pass and expect a NULL pointer for the blanks.
When taking page_address of a page, we need to check for a NULL and
in that case use raid6_empty_zero_page.
Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r-- | crypto/async_tx/async_pq.c | 15 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 16 | ||||
-rw-r--r-- | drivers/md/raid5.c | 4 |
3 files changed, 17 insertions, 18 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index b88db6d1dc65..9ab1ce4af3cc 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -30,11 +30,6 @@ | |||
30 | */ | 30 | */ |
31 | static struct page *scribble; | 31 | static struct page *scribble; |
32 | 32 | ||
33 | static bool is_raid6_zero_block(struct page *p) | ||
34 | { | ||
35 | return p == (void *) raid6_empty_zero_page; | ||
36 | } | ||
37 | |||
38 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | 33 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() |
39 | * and async_syndrome_val() contains the 'P' destination address at | 34 | * and async_syndrome_val() contains the 'P' destination address at |
40 | * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] | 35 | * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] |
@@ -83,7 +78,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
83 | * sources and update the coefficients accordingly | 78 | * sources and update the coefficients accordingly |
84 | */ | 79 | */ |
85 | for (i = 0, idx = 0; i < src_cnt; i++) { | 80 | for (i = 0, idx = 0; i < src_cnt; i++) { |
86 | if (is_raid6_zero_block(blocks[i])) | 81 | if (blocks[i] == NULL) |
87 | continue; | 82 | continue; |
88 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | 83 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, |
89 | DMA_TO_DEVICE); | 84 | DMA_TO_DEVICE); |
@@ -160,9 +155,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
160 | srcs = (void **) blocks; | 155 | srcs = (void **) blocks; |
161 | 156 | ||
162 | for (i = 0; i < disks; i++) { | 157 | for (i = 0; i < disks; i++) { |
163 | if (is_raid6_zero_block(blocks[i])) { | 158 | if (blocks[i] == NULL) { |
164 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | 159 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ |
165 | srcs[i] = blocks[i]; | 160 | srcs[i] = (void*)raid6_empty_zero_page; |
166 | } else | 161 | } else |
167 | srcs[i] = page_address(blocks[i]) + offset; | 162 | srcs[i] = page_address(blocks[i]) + offset; |
168 | } | 163 | } |
@@ -290,12 +285,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
290 | if (submit->flags & ASYNC_TX_FENCE) | 285 | if (submit->flags & ASYNC_TX_FENCE) |
291 | dma_flags |= DMA_PREP_FENCE; | 286 | dma_flags |= DMA_PREP_FENCE; |
292 | for (i = 0; i < disks; i++) | 287 | for (i = 0; i < disks; i++) |
293 | if (likely(blocks[i])) { | 288 | if (likely(blocks[i])) |
294 | BUG_ON(is_raid6_zero_block(blocks[i])); | ||
295 | dma_src[i] = dma_map_page(dev, blocks[i], | 289 | dma_src[i] = dma_map_page(dev, blocks[i], |
296 | offset, len, | 290 | offset, len, |
297 | DMA_TO_DEVICE); | 291 | DMA_TO_DEVICE); |
298 | } | ||
299 | 292 | ||
300 | for (;;) { | 293 | for (;;) { |
301 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 294 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 6d73dde4786d..8e30b6ed0789 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -263,10 +263,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, | |||
263 | * delta p and delta q | 263 | * delta p and delta q |
264 | */ | 264 | */ |
265 | dp = blocks[faila]; | 265 | dp = blocks[faila]; |
266 | blocks[faila] = (void *)raid6_empty_zero_page; | 266 | blocks[faila] = NULL; |
267 | blocks[disks-2] = dp; | 267 | blocks[disks-2] = dp; |
268 | dq = blocks[failb]; | 268 | dq = blocks[failb]; |
269 | blocks[failb] = (void *)raid6_empty_zero_page; | 269 | blocks[failb] = NULL; |
270 | blocks[disks-1] = dq; | 270 | blocks[disks-1] = dq; |
271 | 271 | ||
272 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | 272 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
@@ -338,7 +338,10 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
338 | 338 | ||
339 | async_tx_quiesce(&submit->depend_tx); | 339 | async_tx_quiesce(&submit->depend_tx); |
340 | for (i = 0; i < disks; i++) | 340 | for (i = 0; i < disks; i++) |
341 | ptrs[i] = page_address(blocks[i]); | 341 | if (blocks[i] == NULL) |
342 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
343 | else | ||
344 | ptrs[i] = page_address(blocks[i]); | ||
342 | 345 | ||
343 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | 346 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); |
344 | 347 | ||
@@ -398,7 +401,10 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
398 | 401 | ||
399 | async_tx_quiesce(&submit->depend_tx); | 402 | async_tx_quiesce(&submit->depend_tx); |
400 | for (i = 0; i < disks; i++) | 403 | for (i = 0; i < disks; i++) |
401 | ptrs[i] = page_address(blocks[i]); | 404 | if (blocks[i] == NULL) |
405 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
406 | else | ||
407 | ptrs[i] = page_address(blocks[i]); | ||
402 | 408 | ||
403 | raid6_datap_recov(disks, bytes, faila, ptrs); | 409 | raid6_datap_recov(disks, bytes, faila, ptrs); |
404 | 410 | ||
@@ -414,7 +420,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
414 | * Use the dead data page as temporary storage for delta q | 420 | * Use the dead data page as temporary storage for delta q |
415 | */ | 421 | */ |
416 | dq = blocks[faila]; | 422 | dq = blocks[faila]; |
417 | blocks[faila] = (void *)raid6_empty_zero_page; | 423 | blocks[faila] = NULL; |
418 | blocks[disks-1] = dq; | 424 | blocks[disks-1] = dq; |
419 | 425 | ||
420 | /* in the 4 disk case we only need to perform a single source | 426 | /* in the 4 disk case we only need to perform a single source |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c4366c9373c5..dcd9e659ed9d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -720,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) | |||
720 | int i; | 720 | int i; |
721 | 721 | ||
722 | for (i = 0; i < disks; i++) | 722 | for (i = 0; i < disks; i++) |
723 | srcs[i] = (void *)raid6_empty_zero_page; | 723 | srcs[i] = NULL; |
724 | 724 | ||
725 | count = 0; | 725 | count = 0; |
726 | i = d0_idx; | 726 | i = d0_idx; |
@@ -816,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
816 | * slot number conversion for 'faila' and 'failb' | 816 | * slot number conversion for 'faila' and 'failb' |
817 | */ | 817 | */ |
818 | for (i = 0; i < disks ; i++) | 818 | for (i = 0; i < disks ; i++) |
819 | blocks[i] = (void *)raid6_empty_zero_page; | 819 | blocks[i] = NULL; |
820 | count = 0; | 820 | count = 0; |
821 | i = d0_idx; | 821 | i = d0_idx; |
822 | do { | 822 | do { |