diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-04-09 19:16:18 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-06-03 17:07:34 -0400 |
commit | 88ba2aa586c874681c072101287e15d40de7e6e2 (patch) | |
tree | 69a124fcd0a8a75ef9aae0cc4081bad83c770374 /drivers/md/raid5.c | |
parent | 099f53cb50e45ef617a9f1d63ceec799e489418b (diff) |
async_tx: kill ASYNC_TX_DEP_ACK flag
In support of inter-channel chaining async_tx utilizes an ack flag to
gate whether a dependent operation can be chained to another. While the
flag is not set the chain can be considered open for appending. Setting
the ack flag closes the chain and flags the descriptor for garbage
collection. The ASYNC_TX_DEP_ACK flag essentially means "close the
chain after adding this dependency". Since each operation can only have
one child the api now implicitly sets the ack flag at dependency
submission time. This removes an unnecessary management burden from
clients of the api.
[ Impact: clean up and enforce one dependency per operation ]
Reviewed-by: Andre Noll <maan@systemlinux.org>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 25 |
1 files changed, 11 insertions, 14 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f8d2d35ed298..0ef5362c8d02 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -525,14 +525,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
525 | bio_page = bio_iovec_idx(bio, i)->bv_page; | 525 | bio_page = bio_iovec_idx(bio, i)->bv_page; |
526 | if (frombio) | 526 | if (frombio) |
527 | tx = async_memcpy(page, bio_page, page_offset, | 527 | tx = async_memcpy(page, bio_page, page_offset, |
528 | b_offset, clen, | 528 | b_offset, clen, 0, |
529 | ASYNC_TX_DEP_ACK, | 529 | tx, NULL, NULL); |
530 | tx, NULL, NULL); | ||
531 | else | 530 | else |
532 | tx = async_memcpy(bio_page, page, b_offset, | 531 | tx = async_memcpy(bio_page, page, b_offset, |
533 | page_offset, clen, | 532 | page_offset, clen, 0, |
534 | ASYNC_TX_DEP_ACK, | 533 | tx, NULL, NULL); |
535 | tx, NULL, NULL); | ||
536 | } | 534 | } |
537 | if (clen < len) /* hit end of page */ | 535 | if (clen < len) /* hit end of page */ |
538 | break; | 536 | break; |
@@ -615,8 +613,7 @@ static void ops_run_biofill(struct stripe_head *sh) | |||
615 | } | 613 | } |
616 | 614 | ||
617 | atomic_inc(&sh->count); | 615 | atomic_inc(&sh->count); |
618 | async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, | 616 | async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); |
619 | ops_complete_biofill, sh); | ||
620 | } | 617 | } |
621 | 618 | ||
622 | static void ops_complete_compute5(void *stripe_head_ref) | 619 | static void ops_complete_compute5(void *stripe_head_ref) |
@@ -701,8 +698,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
701 | } | 698 | } |
702 | 699 | ||
703 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 700 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
704 | ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, | 701 | ASYNC_TX_XOR_DROP_DST, tx, |
705 | ops_complete_prexor, sh); | 702 | ops_complete_prexor, sh); |
706 | 703 | ||
707 | return tx; | 704 | return tx; |
708 | } | 705 | } |
@@ -809,7 +806,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
809 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST | 806 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST |
810 | * for the synchronous xor case | 807 | * for the synchronous xor case |
811 | */ | 808 | */ |
812 | flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | | 809 | flags = ASYNC_TX_ACK | |
813 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); | 810 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); |
814 | 811 | ||
815 | atomic_inc(&sh->count); | 812 | atomic_inc(&sh->count); |
@@ -858,7 +855,7 @@ static void ops_run_check(struct stripe_head *sh) | |||
858 | &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); | 855 | &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); |
859 | 856 | ||
860 | atomic_inc(&sh->count); | 857 | atomic_inc(&sh->count); |
861 | tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, | 858 | tx = async_trigger_callback(ASYNC_TX_ACK, tx, |
862 | ops_complete_check, sh); | 859 | ops_complete_check, sh); |
863 | } | 860 | } |
864 | 861 | ||
@@ -2687,8 +2684,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2687 | 2684 | ||
2688 | /* place all the copies on one channel */ | 2685 | /* place all the copies on one channel */ |
2689 | tx = async_memcpy(sh2->dev[dd_idx].page, | 2686 | tx = async_memcpy(sh2->dev[dd_idx].page, |
2690 | sh->dev[i].page, 0, 0, STRIPE_SIZE, | 2687 | sh->dev[i].page, 0, 0, STRIPE_SIZE, |
2691 | ASYNC_TX_DEP_ACK, tx, NULL, NULL); | 2688 | 0, tx, NULL, NULL); |
2692 | 2689 | ||
2693 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); | 2690 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); |
2694 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); | 2691 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |