diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2010-05-04 23:41:56 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-05 10:52:56 -0400 |
| commit | 5157b4aa5b7de8787b6318e61bcc285031bb9088 (patch) | |
| tree | cf47d7739d646895a151bf3fd71e56e64dc9d260 | |
| parent | 7ebd467551ed6ae200d7835a84bbda0dcadaa511 (diff) | |
raid6: fix recovery performance regression
The raid6 recovery code should immediately drop back to the optimized
synchronous path when a p+q dma resource is not available. Otherwise we
run the non-optimized/multi-pass async code in sync mode.
Verified with raid6test (NDISKS=255)
Applies to kernels >= 2.6.32.
Cc: <stable@kernel.org>
Acked-by: NeilBrown <neilb@suse.de>
Reported-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 943f2abac9b4..ce038d861eb9 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
| @@ -324,6 +324,7 @@ struct dma_async_tx_descriptor * | |||
| 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, |
| 325 | struct page **blocks, struct async_submit_ctl *submit) | 325 | struct page **blocks, struct async_submit_ctl *submit) |
| 326 | { | 326 | { |
| 327 | void *scribble = submit->scribble; | ||
| 327 | int non_zero_srcs, i; | 328 | int non_zero_srcs, i; |
| 328 | 329 | ||
| 329 | BUG_ON(faila == failb); | 330 | BUG_ON(faila == failb); |
| @@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
| 332 | 333 | ||
| 333 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 334 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
| 334 | 335 | ||
| 335 | /* we need to preserve the contents of 'blocks' for the async | 336 | /* if a dma resource is not available or a scribble buffer is not |
| 336 | * case, so punt to synchronous if a scribble buffer is not available | 337 | * available punt to the synchronous path. In the 'dma not |
| 338 | * available' case be sure to use the scribble buffer to | ||
| 339 | * preserve the content of 'blocks' as the caller intended. | ||
| 337 | */ | 340 | */ |
| 338 | if (!submit->scribble) { | 341 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { |
| 339 | void **ptrs = (void **) blocks; | 342 | void **ptrs = scribble ? scribble : (void **) blocks; |
| 340 | 343 | ||
| 341 | async_tx_quiesce(&submit->depend_tx); | 344 | async_tx_quiesce(&submit->depend_tx); |
| 342 | for (i = 0; i < disks; i++) | 345 | for (i = 0; i < disks; i++) |
| @@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
| 406 | 409 | ||
| 407 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 410 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
| 408 | 411 | ||
| 409 | /* we need to preserve the contents of 'blocks' for the async | 412 | /* if a dma resource is not available or a scribble buffer is not |
| 410 | * case, so punt to synchronous if a scribble buffer is not available | 413 | * available punt to the synchronous path. In the 'dma not |
| 414 | * available' case be sure to use the scribble buffer to | ||
| 415 | * preserve the content of 'blocks' as the caller intended. | ||
| 411 | */ | 416 | */ |
| 412 | if (!scribble) { | 417 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { |
| 413 | void **ptrs = (void **) blocks; | 418 | void **ptrs = scribble ? scribble : (void **) blocks; |
| 414 | 419 | ||
| 415 | async_tx_quiesce(&submit->depend_tx); | 420 | async_tx_quiesce(&submit->depend_tx); |
| 416 | for (i = 0; i < disks; i++) | 421 | for (i = 0; i < disks; i++) |
